commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6afac74a504515e0f4151fc96fdc4752cb9b890
|
CodeFights/electionsWinners.py
|
CodeFights/electionsWinners.py
|
#!/usr/local/bin/python
# Code Fights Election Winners Problem
def electionsWinners(votes, k):
max_v = max(votes)
count_max = votes.count(max_v)
if k == 0:
if count_max == 1:
return 1
else:
return 0
return sum([1 for v in votes if v + k > max_v])
# Alternative solution:
# max_v = max(votes)
# return (int(votes.count(max_v) == 1) if k == 0 else
# len([n for n in votes if max_v < n + k]))
def main():
tests = [
[[2, 3, 5, 2], 3, 2],
[[1, 3, 3, 1, 1], 0, 0],
[[5, 1, 3, 4, 1], 0, 1],
[[1, 1, 1, 1], 1, 4],
[[1, 1, 1, 1], 0, 0],
[[3, 1, 1, 3, 1], 2, 2]
]
for t in tests:
res = electionsWinners(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: electionsWinners({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: electionsWinners({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights elections winners problem
|
Solve Code Fights elections winners problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights elections winners problem
|
#!/usr/local/bin/python
# Code Fights Election Winners Problem
def electionsWinners(votes, k):
max_v = max(votes)
count_max = votes.count(max_v)
if k == 0:
if count_max == 1:
return 1
else:
return 0
return sum([1 for v in votes if v + k > max_v])
# Alternative solution:
# max_v = max(votes)
# return (int(votes.count(max_v) == 1) if k == 0 else
# len([n for n in votes if max_v < n + k]))
def main():
tests = [
[[2, 3, 5, 2], 3, 2],
[[1, 3, 3, 1, 1], 0, 0],
[[5, 1, 3, 4, 1], 0, 1],
[[1, 1, 1, 1], 1, 4],
[[1, 1, 1, 1], 0, 0],
[[3, 1, 1, 3, 1], 2, 2]
]
for t in tests:
res = electionsWinners(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: electionsWinners({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: electionsWinners({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights elections winners problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Election Winners Problem
def electionsWinners(votes, k):
max_v = max(votes)
count_max = votes.count(max_v)
if k == 0:
if count_max == 1:
return 1
else:
return 0
return sum([1 for v in votes if v + k > max_v])
# Alternative solution:
# max_v = max(votes)
# return (int(votes.count(max_v) == 1) if k == 0 else
# len([n for n in votes if max_v < n + k]))
def main():
tests = [
[[2, 3, 5, 2], 3, 2],
[[1, 3, 3, 1, 1], 0, 0],
[[5, 1, 3, 4, 1], 0, 1],
[[1, 1, 1, 1], 1, 4],
[[1, 1, 1, 1], 0, 0],
[[3, 1, 1, 3, 1], 2, 2]
]
for t in tests:
res = electionsWinners(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: electionsWinners({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: electionsWinners({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights elections winners problem#!/usr/local/bin/python
# Code Fights Election Winners Problem
def electionsWinners(votes, k):
max_v = max(votes)
count_max = votes.count(max_v)
if k == 0:
if count_max == 1:
return 1
else:
return 0
return sum([1 for v in votes if v + k > max_v])
# Alternative solution:
# max_v = max(votes)
# return (int(votes.count(max_v) == 1) if k == 0 else
# len([n for n in votes if max_v < n + k]))
def main():
tests = [
[[2, 3, 5, 2], 3, 2],
[[1, 3, 3, 1, 1], 0, 0],
[[5, 1, 3, 4, 1], 0, 1],
[[1, 1, 1, 1], 1, 4],
[[1, 1, 1, 1], 0, 0],
[[3, 1, 1, 3, 1], 2, 2]
]
for t in tests:
res = electionsWinners(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: electionsWinners({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: electionsWinners({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights elections winners problem<commit_after>#!/usr/local/bin/python
# Code Fights Election Winners Problem
def electionsWinners(votes, k):
max_v = max(votes)
count_max = votes.count(max_v)
if k == 0:
if count_max == 1:
return 1
else:
return 0
return sum([1 for v in votes if v + k > max_v])
# Alternative solution:
# max_v = max(votes)
# return (int(votes.count(max_v) == 1) if k == 0 else
# len([n for n in votes if max_v < n + k]))
def main():
tests = [
[[2, 3, 5, 2], 3, 2],
[[1, 3, 3, 1, 1], 0, 0],
[[5, 1, 3, 4, 1], 0, 1],
[[1, 1, 1, 1], 1, 4],
[[1, 1, 1, 1], 0, 0],
[[3, 1, 1, 3, 1], 2, 2]
]
for t in tests:
res = electionsWinners(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: electionsWinners({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: electionsWinners({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
92f074c50fdf98ad89723e13e7f0de1f5e7f30b7
|
yunity/migrations/0003_auto_20151117_1506.py
|
yunity/migrations/0003_auto_20151117_1506.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-17 15:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import yunity.utils.models.field
class Migration(migrations.Migration):
dependencies = [
('yunity', '0002_item'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('name', yunity.utils.models.field.MaxLengthCharField(max_length=255)),
('description', models.TextField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yunity.Group')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='user',
old_name='date_joined',
new_name='created_at',
),
migrations.AddField(
model_name='conversation',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='item',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='conversationmessage',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='groupmembership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='yunity.GroupMembership', to=settings.AUTH_USER_MODEL),
),
]
|
Add group and creation date field migration
|
Add group and creation date field migration
|
Python
|
agpl-3.0
|
yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/yunity-core,yunity/yunity-core,yunity/foodsaving-backend
|
Add group and creation date field migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-17 15:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import yunity.utils.models.field
class Migration(migrations.Migration):
dependencies = [
('yunity', '0002_item'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('name', yunity.utils.models.field.MaxLengthCharField(max_length=255)),
('description', models.TextField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yunity.Group')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='user',
old_name='date_joined',
new_name='created_at',
),
migrations.AddField(
model_name='conversation',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='item',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='conversationmessage',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='groupmembership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='yunity.GroupMembership', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add group and creation date field migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-17 15:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import yunity.utils.models.field
class Migration(migrations.Migration):
dependencies = [
('yunity', '0002_item'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('name', yunity.utils.models.field.MaxLengthCharField(max_length=255)),
('description', models.TextField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yunity.Group')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='user',
old_name='date_joined',
new_name='created_at',
),
migrations.AddField(
model_name='conversation',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='item',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='conversationmessage',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='groupmembership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='yunity.GroupMembership', to=settings.AUTH_USER_MODEL),
),
]
|
Add group and creation date field migration# -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-17 15:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import yunity.utils.models.field
class Migration(migrations.Migration):
dependencies = [
('yunity', '0002_item'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('name', yunity.utils.models.field.MaxLengthCharField(max_length=255)),
('description', models.TextField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yunity.Group')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='user',
old_name='date_joined',
new_name='created_at',
),
migrations.AddField(
model_name='conversation',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='item',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='conversationmessage',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='groupmembership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='yunity.GroupMembership', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add group and creation date field migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-17 15:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import yunity.utils.models.field
class Migration(migrations.Migration):
dependencies = [
('yunity', '0002_item'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('name', yunity.utils.models.field.MaxLengthCharField(max_length=255)),
('description', models.TextField(null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GroupMembership',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='yunity.Group')),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='user',
old_name='date_joined',
new_name='created_at',
),
migrations.AddField(
model_name='conversation',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='item',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='conversationmessage',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='groupmembership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='group',
name='members',
field=models.ManyToManyField(through='yunity.GroupMembership', to=settings.AUTH_USER_MODEL),
),
]
|
|
1e6ac739fc7a6ec153d0a4d96d80fb1957c413ae
|
pypeline/common/system.py
|
pypeline/common/system.py
|
#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
def set_procname(name = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the
given name. Fails silently if this is not possible.
Currently only works for Linux systems."""
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, name, 0, 0, 0)
except StandardError:
pass
|
Add module/function to set proc name.
|
Add module/function to set proc name.
|
Python
|
mit
|
MikkelSchubert/paleomix,MikkelSchubert/paleomix,MikkelSchubert/paleomix
|
Add module/function to set proc name.
|
#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
def set_procname(name = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the
given name. Fails silently if this is not possible.
Currently only works for Linux systems."""
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, name, 0, 0, 0)
except StandardError:
pass
|
<commit_before><commit_msg>Add module/function to set proc name.<commit_after>
|
#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
def set_procname(name = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the
given name. Fails silently if this is not possible.
Currently only works for Linux systems."""
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, name, 0, 0, 0)
except StandardError:
pass
|
Add module/function to set proc name.#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
def set_procname(name = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the
given name. Fails silently if this is not possible.
Currently only works for Linux systems."""
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, name, 0, 0, 0)
except StandardError:
pass
|
<commit_before><commit_msg>Add module/function to set proc name.<commit_after>#!/usr/bin/python
#
# Copyright (c) 2013 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
def set_procname(name = os.path.basename(sys.argv[0])):
"""Attempts to set the current process-name to the
given name. Fails silently if this is not possible.
Currently only works for Linux systems."""
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
libc.prctl(15, name, 0, 0, 0)
except StandardError:
pass
|
|
e0c88839c4646e2d619f27399a0e30845e98735d
|
examples/validation_example.py
|
examples/validation_example.py
|
"""Usage: prog.py --count=N OUTDIR FILE
Arguments:
FILE input file
OUTDIR out directory
Options:
--count NUM number of operations [default: 1]
"""
import docopt
try:
import voluptuous as v
except ImportError:
exit('This example assumes that `voluptuous` data-validation library\n'
'is installed: pip install voluptuous\n'
'https://github.com/alecthomas/voluptuous')
if __name__ == '__main__':
args = docopt.docopt(__doc__)
schema = v.Schema({
'FILE': v.isfile('FILE does not exist.'),
'OUTDIR': v.isdir('OUTDIR directory does not exist.'),
'--count': v.all(v.coerce(int, '--count should be integer.'),
v.clamp(min=1, max=5))})
try:
args = schema(args)
except v.Invalid as ex:
exit('\n'.join(['error: ' + e.msg.split('.')[0] for e in ex.errors]))
print(args)
|
Add example that uses voluptuous.
|
Add example that uses voluptuous.
|
Python
|
mit
|
docopt/docopt,crcsmnky/docopt,snowsnail/docopt,wkentaro/docopt,devonjones/docopt,kenwilcox/docopt,benthomasson/docopt,Zearin/docopt,jagguli/docopt
|
Add example that uses voluptuous.
|
"""Usage: prog.py --count=N OUTDIR FILE
Arguments:
FILE input file
OUTDIR out directory
Options:
--count NUM number of operations [default: 1]
"""
import docopt
try:
import voluptuous as v
except ImportError:
exit('This example assumes that `voluptuous` data-validation library\n'
'is installed: pip install voluptuous\n'
'https://github.com/alecthomas/voluptuous')
if __name__ == '__main__':
args = docopt.docopt(__doc__)
schema = v.Schema({
'FILE': v.isfile('FILE does not exist.'),
'OUTDIR': v.isdir('OUTDIR directory does not exist.'),
'--count': v.all(v.coerce(int, '--count should be integer.'),
v.clamp(min=1, max=5))})
try:
args = schema(args)
except v.Invalid as ex:
exit('\n'.join(['error: ' + e.msg.split('.')[0] for e in ex.errors]))
print(args)
|
<commit_before><commit_msg>Add example that uses voluptuous.<commit_after>
|
"""Usage: prog.py --count=N OUTDIR FILE
Arguments:
FILE input file
OUTDIR out directory
Options:
--count NUM number of operations [default: 1]
"""
import docopt
try:
import voluptuous as v
except ImportError:
exit('This example assumes that `voluptuous` data-validation library\n'
'is installed: pip install voluptuous\n'
'https://github.com/alecthomas/voluptuous')
if __name__ == '__main__':
args = docopt.docopt(__doc__)
schema = v.Schema({
'FILE': v.isfile('FILE does not exist.'),
'OUTDIR': v.isdir('OUTDIR directory does not exist.'),
'--count': v.all(v.coerce(int, '--count should be integer.'),
v.clamp(min=1, max=5))})
try:
args = schema(args)
except v.Invalid as ex:
exit('\n'.join(['error: ' + e.msg.split('.')[0] for e in ex.errors]))
print(args)
|
Add example that uses voluptuous."""Usage: prog.py --count=N OUTDIR FILE
Arguments:
FILE input file
OUTDIR out directory
Options:
--count NUM number of operations [default: 1]
"""
import docopt
try:
import voluptuous as v
except ImportError:
exit('This example assumes that `voluptuous` data-validation library\n'
'is installed: pip install voluptuous\n'
'https://github.com/alecthomas/voluptuous')
if __name__ == '__main__':
args = docopt.docopt(__doc__)
schema = v.Schema({
'FILE': v.isfile('FILE does not exist.'),
'OUTDIR': v.isdir('OUTDIR directory does not exist.'),
'--count': v.all(v.coerce(int, '--count should be integer.'),
v.clamp(min=1, max=5))})
try:
args = schema(args)
except v.Invalid as ex:
exit('\n'.join(['error: ' + e.msg.split('.')[0] for e in ex.errors]))
print(args)
|
<commit_before><commit_msg>Add example that uses voluptuous.<commit_after>"""Usage: prog.py --count=N OUTDIR FILE
Arguments:
FILE input file
OUTDIR out directory
Options:
--count NUM number of operations [default: 1]
"""
import docopt
try:
import voluptuous as v
except ImportError:
exit('This example assumes that `voluptuous` data-validation library\n'
'is installed: pip install voluptuous\n'
'https://github.com/alecthomas/voluptuous')
if __name__ == '__main__':
args = docopt.docopt(__doc__)
schema = v.Schema({
'FILE': v.isfile('FILE does not exist.'),
'OUTDIR': v.isdir('OUTDIR directory does not exist.'),
'--count': v.all(v.coerce(int, '--count should be integer.'),
v.clamp(min=1, max=5))})
try:
args = schema(args)
except v.Invalid as ex:
exit('\n'.join(['error: ' + e.msg.split('.')[0] for e in ex.errors]))
print(args)
|
|
ac70be9e200b34a4509b47d0db5ed65f3a8f072a
|
tests/integration/run_all_tests.py
|
tests/integration/run_all_tests.py
|
import nose
import os
from tests.integration import SUBSCRIPTION, ONDEMAND
testfiles = [file for file in os.listdir('.') if file.startswith("test") and file.endswith(".py")]
try:
for service in (SUBSCRIPTION, ONDEMAND):
for test in testfiles:
result = nose.run(argv=['-x', '-v', '-s', '--tc={0}:'.format(service), test])
if not result:
raise RuntimeError("Test failed")
except RuntimeError as e:
print e
|
Add script for executing all tests in both services.
|
Add script for executing all tests in both services.
|
Python
|
apache-2.0
|
kemiz/tosca-vcloud-plugin,nmishkin/tosca-vcloud-plugin,denismakogon/tosca-vcloud-plugin,cloudify-cosmo/tosca-vcloud-plugin,vmware/tosca-vcloud-plugin
|
Add script for executing all tests in both services.
|
import nose
import os
from tests.integration import SUBSCRIPTION, ONDEMAND
testfiles = [file for file in os.listdir('.') if file.startswith("test") and file.endswith(".py")]
try:
for service in (SUBSCRIPTION, ONDEMAND):
for test in testfiles:
result = nose.run(argv=['-x', '-v', '-s', '--tc={0}:'.format(service), test])
if not result:
raise RuntimeError("Test failed")
except RuntimeError as e:
print e
|
<commit_before><commit_msg>Add script for executing all tests in both services.<commit_after>
|
import nose
import os
from tests.integration import SUBSCRIPTION, ONDEMAND
testfiles = [file for file in os.listdir('.') if file.startswith("test") and file.endswith(".py")]
try:
for service in (SUBSCRIPTION, ONDEMAND):
for test in testfiles:
result = nose.run(argv=['-x', '-v', '-s', '--tc={0}:'.format(service), test])
if not result:
raise RuntimeError("Test failed")
except RuntimeError as e:
print e
|
Add script for executing all tests in both services.import nose
import os
from tests.integration import SUBSCRIPTION, ONDEMAND
testfiles = [file for file in os.listdir('.') if file.startswith("test") and file.endswith(".py")]
try:
for service in (SUBSCRIPTION, ONDEMAND):
for test in testfiles:
result = nose.run(argv=['-x', '-v', '-s', '--tc={0}:'.format(service), test])
if not result:
raise RuntimeError("Test failed")
except RuntimeError as e:
print e
|
<commit_before><commit_msg>Add script for executing all tests in both services.<commit_after>import nose
import os
from tests.integration import SUBSCRIPTION, ONDEMAND
testfiles = [file for file in os.listdir('.') if file.startswith("test") and file.endswith(".py")]
try:
for service in (SUBSCRIPTION, ONDEMAND):
for test in testfiles:
result = nose.run(argv=['-x', '-v', '-s', '--tc={0}:'.format(service), test])
if not result:
raise RuntimeError("Test failed")
except RuntimeError as e:
print e
|
|
06df8e5729e95f74b974a8b38f6b16044fe2aa8a
|
py/find-the-difference.py
|
py/find-the-difference.py
|
from collections import Counter
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sc, tc = Counter(s), Counter(t)
for c, v in tc.iteritems():
if v != sc[c]:
return c
|
Add py solution for 389. Find the Difference
|
Add py solution for 389. Find the Difference
389. Find the Difference: https://leetcode.com/problems/find-the-difference/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 389. Find the Difference
389. Find the Difference: https://leetcode.com/problems/find-the-difference/
|
from collections import Counter
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sc, tc = Counter(s), Counter(t)
for c, v in tc.iteritems():
if v != sc[c]:
return c
|
<commit_before><commit_msg>Add py solution for 389. Find the Difference
389. Find the Difference: https://leetcode.com/problems/find-the-difference/<commit_after>
|
from collections import Counter
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sc, tc = Counter(s), Counter(t)
for c, v in tc.iteritems():
if v != sc[c]:
return c
|
Add py solution for 389. Find the Difference
389. Find the Difference: https://leetcode.com/problems/find-the-difference/from collections import Counter
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sc, tc = Counter(s), Counter(t)
for c, v in tc.iteritems():
if v != sc[c]:
return c
|
<commit_before><commit_msg>Add py solution for 389. Find the Difference
389. Find the Difference: https://leetcode.com/problems/find-the-difference/<commit_after>from collections import Counter
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sc, tc = Counter(s), Counter(t)
for c, v in tc.iteritems():
if v != sc[c]:
return c
|
|
4574fe87c6efa5b1b9431546f787fcf30ad0d6b6
|
examples/training/train_parser.py
|
examples/training/train_parser.py
|
from __future__ import unicode_literals, print_function
import json
import pathlib
import random
import spacy
from spacy.pipeline import DependencyParser
from spacy.gold import GoldParse
from spacy.tokens import Doc
def train_parser(nlp, train_data, left_labels, right_labels):
parser = DependencyParser.blank(
nlp.vocab,
left_labels=left_labels,
right_labels=right_labels,
features=nlp.defaults.parser_features)
for itn in range(1000):
random.shuffle(train_data)
loss = 0
for words, heads, deps in train_data:
doc = nlp.make_doc(words)
gold = GoldParse(doc, heads=heads, deps=deps)
loss += parser.update(doc, gold)
parser.model.end_training()
return parser
def main(model_dir=None):
if model_dir is not None:
model_dir = pathlb.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.isdir()
nlp = spacy.load('en', tagger=False, parser=False, entity=False, vectors=False)
nlp.make_doc = lambda words: Doc(nlp.vocab, zip(words, [True]*len(words)))
train_data = [
(
['They', 'trade', 'mortgage', '-', 'backed', 'securities', '.'],
[1, 1, 4, 4, 5, 1, 1],
['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
),
(
['I', 'like', 'London', 'and', 'Berlin', '.'],
[1, 1, 1, 2, 2, 1],
['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
)
]
left_labels = set()
right_labels = set()
for _, heads, deps in train_data:
for i, (head, dep) in enumerate(zip(heads, deps)):
if i < head:
left_labels.add(dep)
elif i > head:
right_labels.add(dep)
parser = train_parser(nlp, train_data, sorted(left_labels), sorted(right_labels))
doc = nlp.make_doc(['I', 'like', 'securities', '.'])
with parser.step_through(doc) as state:
while not state.is_final:
action = state.predict()
state.transition(action)
#parser(doc)
for word in doc:
print(word.text, word.dep_, word.head.text)
if model_dir is not None:
with (model_dir / 'config.json').open('wb') as file_:
json.dump(parser.cfg, file_)
parser.model.dump(str(model_dir / 'model'))
if __name__ == '__main__':
main()
# I nsubj like
# like ROOT like
# securities dobj like
# . cc securities
|
Add example for training parser
|
Add example for training parser
|
Python
|
mit
|
raphael0202/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,banglakit/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,explosion/spaCy,raphael0202/spaCy,recognai/spaCy,aikramer2/spaCy,recognai/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,recognai/spaCy,banglakit/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,oroszgy/spaCy.hu,recognai/spaCy,spacy-io/spaCy,raphael0202/spaCy,honnibal/spaCy,explosion/spaCy,raphael0202/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,explosion/spaCy,banglakit/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,honnibal/spaCy,banglakit/spaCy,recognai/spaCy,explosion/spaCy,aikramer2/spaCy,raphael0202/spaCy,banglakit/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,banglakit/spaCy
|
Add example for training parser
|
from __future__ import unicode_literals, print_function
import json
import pathlib
import random
import spacy
from spacy.pipeline import DependencyParser
from spacy.gold import GoldParse
from spacy.tokens import Doc
def train_parser(nlp, train_data, left_labels, right_labels):
parser = DependencyParser.blank(
nlp.vocab,
left_labels=left_labels,
right_labels=right_labels,
features=nlp.defaults.parser_features)
for itn in range(1000):
random.shuffle(train_data)
loss = 0
for words, heads, deps in train_data:
doc = nlp.make_doc(words)
gold = GoldParse(doc, heads=heads, deps=deps)
loss += parser.update(doc, gold)
parser.model.end_training()
return parser
def main(model_dir=None):
if model_dir is not None:
model_dir = pathlb.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.isdir()
nlp = spacy.load('en', tagger=False, parser=False, entity=False, vectors=False)
nlp.make_doc = lambda words: Doc(nlp.vocab, zip(words, [True]*len(words)))
train_data = [
(
['They', 'trade', 'mortgage', '-', 'backed', 'securities', '.'],
[1, 1, 4, 4, 5, 1, 1],
['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
),
(
['I', 'like', 'London', 'and', 'Berlin', '.'],
[1, 1, 1, 2, 2, 1],
['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
)
]
left_labels = set()
right_labels = set()
for _, heads, deps in train_data:
for i, (head, dep) in enumerate(zip(heads, deps)):
if i < head:
left_labels.add(dep)
elif i > head:
right_labels.add(dep)
parser = train_parser(nlp, train_data, sorted(left_labels), sorted(right_labels))
doc = nlp.make_doc(['I', 'like', 'securities', '.'])
with parser.step_through(doc) as state:
while not state.is_final:
action = state.predict()
state.transition(action)
#parser(doc)
for word in doc:
print(word.text, word.dep_, word.head.text)
if model_dir is not None:
with (model_dir / 'config.json').open('wb') as file_:
json.dump(parser.cfg, file_)
parser.model.dump(str(model_dir / 'model'))
if __name__ == '__main__':
main()
# I nsubj like
# like ROOT like
# securities dobj like
# . cc securities
|
<commit_before><commit_msg>Add example for training parser<commit_after>
|
from __future__ import unicode_literals, print_function
import json
import pathlib
import random
import spacy
from spacy.pipeline import DependencyParser
from spacy.gold import GoldParse
from spacy.tokens import Doc
def train_parser(nlp, train_data, left_labels, right_labels):
parser = DependencyParser.blank(
nlp.vocab,
left_labels=left_labels,
right_labels=right_labels,
features=nlp.defaults.parser_features)
for itn in range(1000):
random.shuffle(train_data)
loss = 0
for words, heads, deps in train_data:
doc = nlp.make_doc(words)
gold = GoldParse(doc, heads=heads, deps=deps)
loss += parser.update(doc, gold)
parser.model.end_training()
return parser
def main(model_dir=None):
if model_dir is not None:
model_dir = pathlb.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.isdir()
nlp = spacy.load('en', tagger=False, parser=False, entity=False, vectors=False)
nlp.make_doc = lambda words: Doc(nlp.vocab, zip(words, [True]*len(words)))
train_data = [
(
['They', 'trade', 'mortgage', '-', 'backed', 'securities', '.'],
[1, 1, 4, 4, 5, 1, 1],
['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
),
(
['I', 'like', 'London', 'and', 'Berlin', '.'],
[1, 1, 1, 2, 2, 1],
['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
)
]
left_labels = set()
right_labels = set()
for _, heads, deps in train_data:
for i, (head, dep) in enumerate(zip(heads, deps)):
if i < head:
left_labels.add(dep)
elif i > head:
right_labels.add(dep)
parser = train_parser(nlp, train_data, sorted(left_labels), sorted(right_labels))
doc = nlp.make_doc(['I', 'like', 'securities', '.'])
with parser.step_through(doc) as state:
while not state.is_final:
action = state.predict()
state.transition(action)
#parser(doc)
for word in doc:
print(word.text, word.dep_, word.head.text)
if model_dir is not None:
with (model_dir / 'config.json').open('wb') as file_:
json.dump(parser.cfg, file_)
parser.model.dump(str(model_dir / 'model'))
if __name__ == '__main__':
main()
# I nsubj like
# like ROOT like
# securities dobj like
# . cc securities
|
Add example for training parserfrom __future__ import unicode_literals, print_function
import json
import pathlib
import random
import spacy
from spacy.pipeline import DependencyParser
from spacy.gold import GoldParse
from spacy.tokens import Doc
def train_parser(nlp, train_data, left_labels, right_labels):
parser = DependencyParser.blank(
nlp.vocab,
left_labels=left_labels,
right_labels=right_labels,
features=nlp.defaults.parser_features)
for itn in range(1000):
random.shuffle(train_data)
loss = 0
for words, heads, deps in train_data:
doc = nlp.make_doc(words)
gold = GoldParse(doc, heads=heads, deps=deps)
loss += parser.update(doc, gold)
parser.model.end_training()
return parser
def main(model_dir=None):
if model_dir is not None:
model_dir = pathlb.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.isdir()
nlp = spacy.load('en', tagger=False, parser=False, entity=False, vectors=False)
nlp.make_doc = lambda words: Doc(nlp.vocab, zip(words, [True]*len(words)))
train_data = [
(
['They', 'trade', 'mortgage', '-', 'backed', 'securities', '.'],
[1, 1, 4, 4, 5, 1, 1],
['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
),
(
['I', 'like', 'London', 'and', 'Berlin', '.'],
[1, 1, 1, 2, 2, 1],
['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
)
]
left_labels = set()
right_labels = set()
for _, heads, deps in train_data:
for i, (head, dep) in enumerate(zip(heads, deps)):
if i < head:
left_labels.add(dep)
elif i > head:
right_labels.add(dep)
parser = train_parser(nlp, train_data, sorted(left_labels), sorted(right_labels))
doc = nlp.make_doc(['I', 'like', 'securities', '.'])
with parser.step_through(doc) as state:
while not state.is_final:
action = state.predict()
state.transition(action)
#parser(doc)
for word in doc:
print(word.text, word.dep_, word.head.text)
if model_dir is not None:
with (model_dir / 'config.json').open('wb') as file_:
json.dump(parser.cfg, file_)
parser.model.dump(str(model_dir / 'model'))
if __name__ == '__main__':
main()
# I nsubj like
# like ROOT like
# securities dobj like
# . cc securities
|
<commit_before><commit_msg>Add example for training parser<commit_after>from __future__ import unicode_literals, print_function
import json
import pathlib
import random
import spacy
from spacy.pipeline import DependencyParser
from spacy.gold import GoldParse
from spacy.tokens import Doc
def train_parser(nlp, train_data, left_labels, right_labels):
parser = DependencyParser.blank(
nlp.vocab,
left_labels=left_labels,
right_labels=right_labels,
features=nlp.defaults.parser_features)
for itn in range(1000):
random.shuffle(train_data)
loss = 0
for words, heads, deps in train_data:
doc = nlp.make_doc(words)
gold = GoldParse(doc, heads=heads, deps=deps)
loss += parser.update(doc, gold)
parser.model.end_training()
return parser
def main(model_dir=None):
if model_dir is not None:
model_dir = pathlb.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.isdir()
nlp = spacy.load('en', tagger=False, parser=False, entity=False, vectors=False)
nlp.make_doc = lambda words: Doc(nlp.vocab, zip(words, [True]*len(words)))
train_data = [
(
['They', 'trade', 'mortgage', '-', 'backed', 'securities', '.'],
[1, 1, 4, 4, 5, 1, 1],
['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
),
(
['I', 'like', 'London', 'and', 'Berlin', '.'],
[1, 1, 1, 2, 2, 1],
['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
)
]
left_labels = set()
right_labels = set()
for _, heads, deps in train_data:
for i, (head, dep) in enumerate(zip(heads, deps)):
if i < head:
left_labels.add(dep)
elif i > head:
right_labels.add(dep)
parser = train_parser(nlp, train_data, sorted(left_labels), sorted(right_labels))
doc = nlp.make_doc(['I', 'like', 'securities', '.'])
with parser.step_through(doc) as state:
while not state.is_final:
action = state.predict()
state.transition(action)
#parser(doc)
for word in doc:
print(word.text, word.dep_, word.head.text)
if model_dir is not None:
with (model_dir / 'config.json').open('wb') as file_:
json.dump(parser.cfg, file_)
parser.model.dump(str(model_dir / 'model'))
if __name__ == '__main__':
main()
# I nsubj like
# like ROOT like
# securities dobj like
# . cc securities
|
|
39940ba7b5cfd2a62f5168a58efbd03eab1b8728
|
Examples/Infovis/Python/create_tree.py
|
Examples/Infovis/Python/create_tree.py
|
from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
window.GetInteractor().Start()
|
Add a Python example of creating a tree.
|
ENH: Add a Python example of creating a tree.
|
Python
|
bsd-3-clause
|
jeffbaumes/jeffbaumes-vtk,candy7393/VTK,candy7393/VTK,naucoin/VTKSlicerWidgets,biddisco/VTK,gram526/VTK,naucoin/VTKSlicerWidgets,sumedhasingla/VTK,sankhesh/VTK,msmolens/VTK,jeffbaumes/jeffbaumes-vtk,biddisco/VTK,johnkit/vtk-dev,SimVascular/VTK,keithroe/vtkoptix,sumedhasingla/VTK,mspark93/VTK,cjh1/VTK,spthaolt/VTK,spthaolt/VTK,gram526/VTK,johnkit/vtk-dev,ashray/VTK-EVM,johnkit/vtk-dev,SimVascular/VTK,candy7393/VTK,naucoin/VTKSlicerWidgets,berendkleinhaneveld/VTK,biddisco/VTK,berendkleinhaneveld/VTK,arnaudgelas/VTK,aashish24/VTK-old,Wuteyan/VTK,candy7393/VTK,msmolens/VTK,ashray/VTK-EVM,arnaudgelas/VTK,berendkleinhaneveld/VTK,aashish24/VTK-old,spthaolt/VTK,demarle/VTK,ashray/VTK-EVM,jmerkow/VTK,collects/VTK,berendkleinhaneveld/VTK,sankhesh/VTK,demarle/VTK,collects/VTK,mspark93/VTK,Wuteyan/VTK,demarle/VTK,collects/VTK,arnaudgelas/VTK,gram526/VTK,msmolens/VTK,msmolens/VTK,cjh1/VTK,sumedhasingla/VTK,jmerkow/VTK,daviddoria/PointGraphsPhase1,sumedhasingla/VTK,demarle/VTK,cjh1/VTK,SimVascular/VTK,keithroe/vtkoptix,berendkleinhaneveld/VTK,collects/VTK,keithroe/vtkoptix,gram526/VTK,gram526/VTK,Wuteyan/VTK,berendkleinhaneveld/VTK,daviddoria/PointGraphsPhase1,sumedhasingla/VTK,sankhesh/VTK,cjh1/VTK,hendradarwin/VTK,demarle/VTK,collects/VTK,sankhesh/VTK,sumedhasingla/VTK,jmerkow/VTK,cjh1/VTK,biddisco/VTK,spthaolt/VTK,aashish24/VTK-old,demarle/VTK,gram526/VTK,berendkleinhaneveld/VTK,msmolens/VTK,biddisco/VTK,sankhesh/VTK,spthaolt/VTK,SimVascular/VTK,naucoin/VTKSlicerWidgets,sankhesh/VTK,daviddoria/PointGraphsPhase1,johnkit/vtk-dev,jeffbaumes/jeffbaumes-vtk,sumedhasingla/VTK,aashish24/VTK-old,sumedhasingla/VTK,biddisco/VTK,hendradarwin/VTK,biddisco/VTK,SimVascular/VTK,daviddoria/PointGraphsPhase1,ashray/VTK-EVM,jeffbaumes/jeffbaumes-vtk,hendradarwin/VTK,mspark93/VTK,sankhesh/VTK,mspark93/VTK,SimVascular/VTK,spthaolt/VTK,msmolens/VTK,ashray/VTK-EVM,jmerkow/VTK,gram526/VTK,naucoin/VTKSlicerWidgets,mspark93/VTK,keithroe/vtkoptix,candy7393/VTK,sankhesh/VTK,candy7393/VTK,demarle/VTK,ashray/VTK-EVM,ashray/VTK-EVM,mspark93/VTK,Wuteyan/VTK,jmerkow/VTK,msmolens/VTK,jeffbaumes/jeffbaumes-vtk,aashish24/VTK-old,daviddoria/PointGraphsPhase1,SimVascular/VTK,msmolens/VTK,Wuteyan/VTK,mspark93/VTK,jeffbaumes/jeffbaumes-vtk,keithroe/vtkoptix,hendradarwin/VTK,arnaudgelas/VTK,hendradarwin/VTK,keithroe/vtkoptix,jmerkow/VTK,johnkit/vtk-dev,spthaolt/VTK,cjh1/VTK,arnaudgelas/VTK,keithroe/vtkoptix,naucoin/VTKSlicerWidgets,hendradarwin/VTK,gram526/VTK,ashray/VTK-EVM,candy7393/VTK,collects/VTK,mspark93/VTK,Wuteyan/VTK,aashish24/VTK-old,demarle/VTK,johnkit/vtk-dev,SimVascular/VTK,daviddoria/PointGraphsPhase1,jmerkow/VTK,jmerkow/VTK,arnaudgelas/VTK,candy7393/VTK,Wuteyan/VTK,johnkit/vtk-dev,keithroe/vtkoptix,hendradarwin/VTK
|
ENH: Add a Python example of creating a tree.
|
from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
window.GetInteractor().Start()
|
<commit_before><commit_msg>ENH: Add a Python example of creating a tree.<commit_after>
|
from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
window.GetInteractor().Start()
|
ENH: Add a Python example of creating a tree.from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
window.GetInteractor().Start()
|
<commit_before><commit_msg>ENH: Add a Python example of creating a tree.<commit_after>from vtk import *
graph = vtkMutableDirectedGraph()
a = graph.AddVertex()
b = graph.AddChild(a)
c = graph.AddChild(a)
d = graph.AddChild(b)
e = graph.AddChild(c)
f = graph.AddChild(c)
tree = vtkTree()
tree.CheckedShallowCopy(graph)
view = vtkGraphLayoutView()
view.AddRepresentationFromInput(tree)
window = vtkRenderWindow()
window.SetSize(600, 600)
view.SetupRenderWindow(window)
window.GetInteractor().Start()
|
|
61f1c3c8492bbed80dc5e217708f4ebfb2201413
|
scripts/verify_schema.py
|
scripts/verify_schema.py
|
#!/usr/bin/env python
#
# verify_schema.py: simple LXML wrapper for checking XML against
# a RelaxNG schema.
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from lxml import etree as ET
if len(sys.argv) < 2:
print("Usage: {0} <RelaxNG schema file> <XML file>".format(sys.argv[0]))
sys.exit(1)
schema = sys.argv[1]
xml_source = sys.argv[2]
xml_tree = ET.parse(xml_source)
relaxng_xml = ET.parse(schema)
validator = ET.RelaxNG(relaxng_xml)
if not validator.validate(xml_tree):
print(validator.error_log)
print("File {0} does not match the schema!".format(xml_source))
sys.exit(1)
|
Add a simple schema validation script based on LXML.
|
Add a simple schema validation script based on LXML.
Some systems don't have rnv or any other ready to use
validators in repos/ports/whatever.
|
Python
|
lgpl-2.1
|
vyos-legacy/vyconfd,vyos-legacy/vyconfd
|
Add a simple schema validation script based on LXML.
Some systems don't have rnv or any other ready to use
validators in repos/ports/whatever.
|
#!/usr/bin/env python
#
# verify_schema.py: simple LXML wrapper for checking XML against
# a RelaxNG schema.
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from lxml import etree as ET
if len(sys.argv) < 2:
print("Usage: {0} <RelaxNG schema file> <XML file>".format(sys.argv[0]))
sys.exit(1)
schema = sys.argv[1]
xml_source = sys.argv[2]
xml_tree = ET.parse(xml_source)
relaxng_xml = ET.parse(schema)
validator = ET.RelaxNG(relaxng_xml)
if not validator.validate(xml_tree):
print(validator.error_log)
print("File {0} does not match the schema!".format(xml_source))
sys.exit(1)
|
<commit_before><commit_msg>Add a simple schema validation script based on LXML.
Some systems don't have rnv or any other ready to use
validators in repos/ports/whatever.<commit_after>
|
#!/usr/bin/env python
#
# verify_schema.py: simple LXML wrapper for checking XML against
# a RelaxNG schema.
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from lxml import etree as ET
if len(sys.argv) < 2:
print("Usage: {0} <RelaxNG schema file> <XML file>".format(sys.argv[0]))
sys.exit(1)
schema = sys.argv[1]
xml_source = sys.argv[2]
xml_tree = ET.parse(xml_source)
relaxng_xml = ET.parse(schema)
validator = ET.RelaxNG(relaxng_xml)
if not validator.validate(xml_tree):
print(validator.error_log)
print("File {0} does not match the schema!".format(xml_source))
sys.exit(1)
|
Add a simple schema validation script based on LXML.
Some systems don't have rnv or any other ready to use
validators in repos/ports/whatever.#!/usr/bin/env python
#
# verify_schema.py: simple LXML wrapper for checking XML against
# a RelaxNG schema.
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from lxml import etree as ET
if len(sys.argv) < 2:
print("Usage: {0} <RelaxNG schema file> <XML file>".format(sys.argv[0]))
sys.exit(1)
schema = sys.argv[1]
xml_source = sys.argv[2]
xml_tree = ET.parse(xml_source)
relaxng_xml = ET.parse(schema)
validator = ET.RelaxNG(relaxng_xml)
if not validator.validate(xml_tree):
print(validator.error_log)
print("File {0} does not match the schema!".format(xml_source))
sys.exit(1)
|
<commit_before><commit_msg>Add a simple schema validation script based on LXML.
Some systems don't have rnv or any other ready to use
validators in repos/ports/whatever.<commit_after>#!/usr/bin/env python
#
# verify_schema.py: simple LXML wrapper for checking XML against
# a RelaxNG schema.
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
from lxml import etree as ET
if len(sys.argv) < 2:
print("Usage: {0} <RelaxNG schema file> <XML file>".format(sys.argv[0]))
sys.exit(1)
schema = sys.argv[1]
xml_source = sys.argv[2]
xml_tree = ET.parse(xml_source)
relaxng_xml = ET.parse(schema)
validator = ET.RelaxNG(relaxng_xml)
if not validator.validate(xml_tree):
print(validator.error_log)
print("File {0} does not match the schema!".format(xml_source))
sys.exit(1)
|
|
46fa53acba8b00c5f7e78d1502aab72838cbccb6
|
example-get-data-bulb.py
|
example-get-data-bulb.py
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import pymystrom
bulb = pymystrom.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
# Get the details of the bulb
print("Current color:", bulb.get_color())
print("Brightness:", bulb.get_brightness())
print("Transition time:", bulb.get_transition_time())
print("Firmware version:", bulb.get_firmware())
|
Add simple bulb example (getting the details of a bulb)
|
Add simple bulb example (getting the details of a bulb)
|
Python
|
mit
|
fabaff/python-mystrom
|
Add simple bulb example (getting the details of a bulb)
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import pymystrom
bulb = pymystrom.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
# Get the details of the bulb
print("Current color:", bulb.get_color())
print("Brightness:", bulb.get_brightness())
print("Transition time:", bulb.get_transition_time())
print("Firmware version:", bulb.get_firmware())
|
<commit_before><commit_msg>Add simple bulb example (getting the details of a bulb)<commit_after>
|
"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import pymystrom
bulb = pymystrom.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
# Get the details of the bulb
print("Current color:", bulb.get_color())
print("Brightness:", bulb.get_brightness())
print("Transition time:", bulb.get_transition_time())
print("Firmware version:", bulb.get_firmware())
|
Add simple bulb example (getting the details of a bulb)"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import pymystrom
bulb = pymystrom.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
# Get the details of the bulb
print("Current color:", bulb.get_color())
print("Brightness:", bulb.get_brightness())
print("Transition time:", bulb.get_transition_time())
print("Firmware version:", bulb.get_firmware())
|
<commit_before><commit_msg>Add simple bulb example (getting the details of a bulb)<commit_after>"""
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import pymystrom
bulb = pymystrom.MyStromBulb('192.168.0.51', '5CCF7FA0AFB0')
# Get the details of the bulb
print("Current color:", bulb.get_color())
print("Brightness:", bulb.get_brightness())
print("Transition time:", bulb.get_transition_time())
print("Firmware version:", bulb.get_firmware())
|
|
32a02c58a67813417eaa49c80edbdd77f8b2569f
|
py/minimum-time-difference.py
|
py/minimum-time-difference.py
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints = map(lambda x:int(x.split(':')[0]) * 60 + int(x.split(':')[1]), timePoints)
MINUTES_IN_A_DAY = 24 * 60
timePoints.sort()
m = timePoints[0] + MINUTES_IN_A_DAY - timePoints[-1]
it1, it2 = iter(timePoints), iter(timePoints)
it2.next()
return min(m, min(t2 - t1 for (t1, t2) in zip(it1, it2)))
|
Add py solution for 539. Minimum Time Difference
|
Add py solution for 539. Minimum Time Difference
539. Minimum Time Difference: https://leetcode.com/problems/minimum-time-difference/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 539. Minimum Time Difference
539. Minimum Time Difference: https://leetcode.com/problems/minimum-time-difference/
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints = map(lambda x:int(x.split(':')[0]) * 60 + int(x.split(':')[1]), timePoints)
MINUTES_IN_A_DAY = 24 * 60
timePoints.sort()
m = timePoints[0] + MINUTES_IN_A_DAY - timePoints[-1]
it1, it2 = iter(timePoints), iter(timePoints)
it2.next()
return min(m, min(t2 - t1 for (t1, t2) in zip(it1, it2)))
|
<commit_before><commit_msg>Add py solution for 539. Minimum Time Difference
539. Minimum Time Difference: https://leetcode.com/problems/minimum-time-difference/<commit_after>
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints = map(lambda x:int(x.split(':')[0]) * 60 + int(x.split(':')[1]), timePoints)
MINUTES_IN_A_DAY = 24 * 60
timePoints.sort()
m = timePoints[0] + MINUTES_IN_A_DAY - timePoints[-1]
it1, it2 = iter(timePoints), iter(timePoints)
it2.next()
return min(m, min(t2 - t1 for (t1, t2) in zip(it1, it2)))
|
Add py solution for 539. Minimum Time Difference
539. Minimum Time Difference: https://leetcode.com/problems/minimum-time-difference/class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints = map(lambda x:int(x.split(':')[0]) * 60 + int(x.split(':')[1]), timePoints)
MINUTES_IN_A_DAY = 24 * 60
timePoints.sort()
m = timePoints[0] + MINUTES_IN_A_DAY - timePoints[-1]
it1, it2 = iter(timePoints), iter(timePoints)
it2.next()
return min(m, min(t2 - t1 for (t1, t2) in zip(it1, it2)))
|
<commit_before><commit_msg>Add py solution for 539. Minimum Time Difference
539. Minimum Time Difference: https://leetcode.com/problems/minimum-time-difference/<commit_after>class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints = map(lambda x:int(x.split(':')[0]) * 60 + int(x.split(':')[1]), timePoints)
MINUTES_IN_A_DAY = 24 * 60
timePoints.sort()
m = timePoints[0] + MINUTES_IN_A_DAY - timePoints[-1]
it1, it2 = iter(timePoints), iter(timePoints)
it2.next()
return min(m, min(t2 - t1 for (t1, t2) in zip(it1, it2)))
|
|
efc89a6858782da6e27f1f5abf32b4acc390e2ac
|
scraper.py
|
scraper.py
|
from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup()
INSPECTION_DOMAIN = 'http://info.kingcounty.gov'
INSPECTION_PATH = '/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMS = {
'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'H'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN + INSPECTION_PATH
params = INSPECTION_PARAMS.copy()
for key, value in kwargs.items():
if key in INSPECTION_PARAMS:
params[key] = value
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
Add get_inspection_page method to scrape entire listing.
|
Add get_inspection_page method to scrape entire listing.
|
Python
|
mit
|
bm5w/souptests,bm5w/souptests
|
Add get_inspection_page method to scrape entire listing.
|
from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup()
INSPECTION_DOMAIN = 'http://info.kingcounty.gov'
INSPECTION_PATH = '/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMS = {
'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'H'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN + INSPECTION_PATH
params = INSPECTION_PARAMS.copy()
for key, value in kwargs.items():
if key in INSPECTION_PARAMS:
params[key] = value
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
<commit_before><commit_msg>Add get_inspection_page method to scrape entire listing.<commit_after>
|
from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup()
INSPECTION_DOMAIN = 'http://info.kingcounty.gov'
INSPECTION_PATH = '/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMS = {
'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'H'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN + INSPECTION_PATH
params = INSPECTION_PARAMS.copy()
for key, value in kwargs.items():
if key in INSPECTION_PARAMS:
params[key] = value
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
Add get_inspection_page method to scrape entire listing.from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup()
INSPECTION_DOMAIN = 'http://info.kingcounty.gov'
INSPECTION_PATH = '/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMS = {
'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'H'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN + INSPECTION_PATH
params = INSPECTION_PARAMS.copy()
for key, value in kwargs.items():
if key in INSPECTION_PARAMS:
params[key] = value
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
<commit_before><commit_msg>Add get_inspection_page method to scrape entire listing.<commit_after>from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup()
INSPECTION_DOMAIN = 'http://info.kingcounty.gov'
INSPECTION_PATH = '/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMS = {
'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'H'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN + INSPECTION_PATH
params = INSPECTION_PARAMS.copy()
for key, value in kwargs.items():
if key in INSPECTION_PARAMS:
params[key] = value
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
|
d4114f47b36d1ecbcf06f7bda2c0dd6c9a6a1b01
|
holidays/countries/malaysia.py
|
holidays/countries/malaysia.py
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, SA, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import get_gre_date
class Malaysia(HolidayBase):
STATES = [
"Johor",
"Kedah",
"Kelantan",
"Malacca",
"Negeri",
"Sembilan",
"Pahang",
"Perak",
"Perlis",
"Sabah",
"Sarawak",
"Selangor",
"Terengganu",
"Kuala Lumpur",
]
def __init__(self, **kwargs):
self.country = "MY"
HolidayBase.__init__(self, **kwargs)
class MY(Malaysia):
pass
class MYS(Malaysia):
pass
|
Add Malaysian states and ISO Codes
|
Add Malaysian states and ISO Codes
|
Python
|
mit
|
ryanss/holidays.py,dr-prodigy/python-holidays
|
Add Malaysian states and ISO Codes
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, SA, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import get_gre_date
class Malaysia(HolidayBase):
STATES = [
"Johor",
"Kedah",
"Kelantan",
"Malacca",
"Negeri",
"Sembilan",
"Pahang",
"Perak",
"Perlis",
"Sabah",
"Sarawak",
"Selangor",
"Terengganu",
"Kuala Lumpur",
]
def __init__(self, **kwargs):
self.country = "MY"
HolidayBase.__init__(self, **kwargs)
class MY(Malaysia):
pass
class MYS(Malaysia):
pass
|
<commit_before><commit_msg>Add Malaysian states and ISO Codes<commit_after>
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, SA, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import get_gre_date
class Malaysia(HolidayBase):
STATES = [
"Johor",
"Kedah",
"Kelantan",
"Malacca",
"Negeri",
"Sembilan",
"Pahang",
"Perak",
"Perlis",
"Sabah",
"Sarawak",
"Selangor",
"Terengganu",
"Kuala Lumpur",
]
def __init__(self, **kwargs):
self.country = "MY"
HolidayBase.__init__(self, **kwargs)
class MY(Malaysia):
pass
class MYS(Malaysia):
pass
|
Add Malaysian states and ISO Codes# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, SA, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import get_gre_date
class Malaysia(HolidayBase):
STATES = [
"Johor",
"Kedah",
"Kelantan",
"Malacca",
"Negeri",
"Sembilan",
"Pahang",
"Perak",
"Perlis",
"Sabah",
"Sarawak",
"Selangor",
"Terengganu",
"Kuala Lumpur",
]
def __init__(self, **kwargs):
self.country = "MY"
HolidayBase.__init__(self, **kwargs)
class MY(Malaysia):
pass
class MYS(Malaysia):
pass
|
<commit_before><commit_msg>Add Malaysian states and ISO Codes<commit_after># -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, timedelta
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, SA, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
from holidays.utils import get_gre_date
class Malaysia(HolidayBase):
STATES = [
"Johor",
"Kedah",
"Kelantan",
"Malacca",
"Negeri",
"Sembilan",
"Pahang",
"Perak",
"Perlis",
"Sabah",
"Sarawak",
"Selangor",
"Terengganu",
"Kuala Lumpur",
]
def __init__(self, **kwargs):
self.country = "MY"
HolidayBase.__init__(self, **kwargs)
class MY(Malaysia):
pass
class MYS(Malaysia):
pass
|
|
145532e77d2bf10860df3dfb13ce0ef1a4e57772
|
spark/pca_preparation.py
|
spark/pca_preparation.py
|
import cPickle as pkl
import base64
import numpy as np
from lopq.model import eigenvalue_allocation
def main(args):
params = pkl.load(open(args.pca_params))
P = params['P']
E = params['E']
mu = params['mu']
# Reduce dimension
E = E[-args.D:]
P = P[:,-args.D:]
# Balance variance across halves
permuted_inds = eigenvalue_allocation(2, E)
P = P[:, permuted_inds]
# Save new params
pkl.dump({ 'P': P, 'mu': mu }, open(args.output, 'w'))
def apply_PCA(x, mu, P):
"""
Example of applying PCA.
"""
return np.dot(x - mu, P)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--pca_params', dest='pca_params', type=str, required=True, help='path to pickle file of PCA parameters')
parser.add_argument('--D', dest='D', type=int, default=128, help='desired final feature dimension')
parser.add_argument('--output', dest='output', type=str, required=True, help='path to pickle file of new PCA parameters')
args = parser.parse_args()
main(args)
|
Add script demonstrating preparing PCA parameters computed by train_pca.py
|
Add script demonstrating preparing PCA parameters computed by train_pca.py
|
Python
|
apache-2.0
|
yahoo/lopq,yahoo/lopq
|
Add script demonstrating preparing PCA parameters computed by train_pca.py
|
import cPickle as pkl
import base64
import numpy as np
from lopq.model import eigenvalue_allocation
def main(args):
params = pkl.load(open(args.pca_params))
P = params['P']
E = params['E']
mu = params['mu']
# Reduce dimension
E = E[-args.D:]
P = P[:,-args.D:]
# Balance variance across halves
permuted_inds = eigenvalue_allocation(2, E)
P = P[:, permuted_inds]
# Save new params
pkl.dump({ 'P': P, 'mu': mu }, open(args.output, 'w'))
def apply_PCA(x, mu, P):
"""
Example of applying PCA.
"""
return np.dot(x - mu, P)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--pca_params', dest='pca_params', type=str, required=True, help='path to pickle file of PCA parameters')
parser.add_argument('--D', dest='D', type=int, default=128, help='desired final feature dimension')
parser.add_argument('--output', dest='output', type=str, required=True, help='path to pickle file of new PCA parameters')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script demonstrating preparing PCA parameters computed by train_pca.py<commit_after>
|
import cPickle as pkl
import base64
import numpy as np
from lopq.model import eigenvalue_allocation
def main(args):
params = pkl.load(open(args.pca_params))
P = params['P']
E = params['E']
mu = params['mu']
# Reduce dimension
E = E[-args.D:]
P = P[:,-args.D:]
# Balance variance across halves
permuted_inds = eigenvalue_allocation(2, E)
P = P[:, permuted_inds]
# Save new params
pkl.dump({ 'P': P, 'mu': mu }, open(args.output, 'w'))
def apply_PCA(x, mu, P):
"""
Example of applying PCA.
"""
return np.dot(x - mu, P)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--pca_params', dest='pca_params', type=str, required=True, help='path to pickle file of PCA parameters')
parser.add_argument('--D', dest='D', type=int, default=128, help='desired final feature dimension')
parser.add_argument('--output', dest='output', type=str, required=True, help='path to pickle file of new PCA parameters')
args = parser.parse_args()
main(args)
|
Add script demonstrating preparing PCA parameters computed by train_pca.pyimport cPickle as pkl
import base64
import numpy as np
from lopq.model import eigenvalue_allocation
def main(args):
params = pkl.load(open(args.pca_params))
P = params['P']
E = params['E']
mu = params['mu']
# Reduce dimension
E = E[-args.D:]
P = P[:,-args.D:]
# Balance variance across halves
permuted_inds = eigenvalue_allocation(2, E)
P = P[:, permuted_inds]
# Save new params
pkl.dump({ 'P': P, 'mu': mu }, open(args.output, 'w'))
def apply_PCA(x, mu, P):
"""
Example of applying PCA.
"""
return np.dot(x - mu, P)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--pca_params', dest='pca_params', type=str, required=True, help='path to pickle file of PCA parameters')
parser.add_argument('--D', dest='D', type=int, default=128, help='desired final feature dimension')
parser.add_argument('--output', dest='output', type=str, required=True, help='path to pickle file of new PCA parameters')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script demonstrating preparing PCA parameters computed by train_pca.py<commit_after>import cPickle as pkl
import base64
import numpy as np
from lopq.model import eigenvalue_allocation
def main(args):
params = pkl.load(open(args.pca_params))
P = params['P']
E = params['E']
mu = params['mu']
# Reduce dimension
E = E[-args.D:]
P = P[:,-args.D:]
# Balance variance across halves
permuted_inds = eigenvalue_allocation(2, E)
P = P[:, permuted_inds]
# Save new params
pkl.dump({ 'P': P, 'mu': mu }, open(args.output, 'w'))
def apply_PCA(x, mu, P):
"""
Example of applying PCA.
"""
return np.dot(x - mu, P)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--pca_params', dest='pca_params', type=str, required=True, help='path to pickle file of PCA parameters')
parser.add_argument('--D', dest='D', type=int, default=128, help='desired final feature dimension')
parser.add_argument('--output', dest='output', type=str, required=True, help='path to pickle file of new PCA parameters')
args = parser.parse_args()
main(args)
|
|
da547f5533f338cc0c65877b44ca40adf31754f7
|
support/appveyor-build.py
|
support/appveyor-build.py
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
os.environ['PATH'] = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
Fix mingw build, take 2
|
Fix mingw build, take 2
|
Python
|
bsd-2-clause
|
cppformat/cppformat,mojoBrendan/fmt,mojoBrendan/fmt,alabuzhev/fmt,nelson4722/cppformat,seungrye/cppformat,lightslife/cppformat,blaquee/cppformat,mojoBrendan/fmt,Jopie64/cppformat,lightslife/cppformat,Jopie64/cppformat,alabuzhev/fmt,wangshijin/cppformat,dean0x7d/cppformat,seungrye/cppformat,cppformat/cppformat,nelson4722/cppformat,cppformat/cppformat,dean0x7d/cppformat,Jopie64/cppformat,dean0x7d/cppformat,blaquee/cppformat,wangshijin/cppformat,blaquee/cppformat,alabuzhev/fmt,nelson4722/cppformat,wangshijin/cppformat,seungrye/cppformat,lightslife/cppformat
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
Fix mingw build, take 2
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
os.environ['PATH'] = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
<commit_before>#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
<commit_msg>Fix mingw build, take 2<commit_after>
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
os.environ['PATH'] = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
Fix mingw build, take 2#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
os.environ['PATH'] = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
<commit_before>#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
path = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
<commit_msg>Fix mingw build, take 2<commit_after>#!/usr/bin/env python
# Build the project on AppVeyor.
import os
from subprocess import check_call
build = os.environ['BUILD']
config = os.environ['CONFIG']
cmake_command = ['cmake', '-DFMT_EXTRA_TESTS=ON', '-DCMAKE_BUILD_TYPE=' + config]
if build == 'mingw':
cmake_command.append('-GMinGW Makefiles')
build_command = ['mingw32-make', '-j4']
test_command = ['mingw32-make', 'test']
# Remove the path to Git bin directory from $PATH because it breaks MinGW config.
os.environ['PATH'] = os.environ['PATH'].replace(r'C:\Program Files (x86)\Git\bin', '')
else:
build_command = ['msbuild', '/m:4', '/p:Config=' + config, 'FORMAT.sln']
test_command = ['msbuild', 'RUN_TESTS.vcxproj']
check_call(cmake_command)
check_call(build_command)
check_call(test_command)
|
0c6f90ded8b7d5a209f648417510aadc0a017425
|
kovot/stream/mastodon.py
|
kovot/stream/mastodon.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections.abc
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from queue import Queue
from kovot.response import Response
from logging import Logger
from typing import Iterator, Optional
__all__ = ['MastodonResponse', 'Mastodon']
_TOOT_LIMIT = 500
class MastodonResponse(Response):
def __init__(self, text, score,
in_reply_to_id: Optional[int]=None,
sensitive: bool=False,
visibility: Optional[str]=None,
spoiler_text: Optional[str]=None,
*args, **kwargs
):
super(MastodonResponse, self).__init__(text, score, *args, **kwargs)
self.in_reply_to_id = in_reply_to_id
self.sensitive = sensitive
self.visibility = visibility
self.spoiler_text = spoiler_text
def post(self, api: MastodonAPI) -> str:
return api.status_post(
self.text, in_reply_to_id=self.in_reply_to_id, sensitive=self.sensitive,
visibility=self.visibility, spoiler_text=self.spoiler_text)
class Mastodon(collections.abc.Iterable):
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
def __iter__(self) -> Iterator:
class Listener(collections.abc.Iterable, StreamListener):
def __init__(self):
self.queue = Queue()
def __iter__(self) -> Iterator:
while True:
yield self.queue.get()
def on_notification(self, notification) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
listener = Listener()
self.api.stream_user(listener, run_async=True)
return iter(listener)
def post(self, response: MastodonResponse) -> bool:
self.logger.info("Trying to toot: " + response.text)
if len(response.text) > _TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
result = response.post(self.api)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
|
Add a kovot stream for Mastodon
|
Add a kovot stream for Mastodon
|
Python
|
mit
|
kenkov/kovot,kenkov/kovot
|
Add a kovot stream for Mastodon
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections.abc
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from queue import Queue
from kovot.response import Response
from logging import Logger
from typing import Iterator, Optional
__all__ = ['MastodonResponse', 'Mastodon']
_TOOT_LIMIT = 500
class MastodonResponse(Response):
def __init__(self, text, score,
in_reply_to_id: Optional[int]=None,
sensitive: bool=False,
visibility: Optional[str]=None,
spoiler_text: Optional[str]=None,
*args, **kwargs
):
super(MastodonResponse, self).__init__(text, score, *args, **kwargs)
self.in_reply_to_id = in_reply_to_id
self.sensitive = sensitive
self.visibility = visibility
self.spoiler_text = spoiler_text
def post(self, api: MastodonAPI) -> str:
return api.status_post(
self.text, in_reply_to_id=self.in_reply_to_id, sensitive=self.sensitive,
visibility=self.visibility, spoiler_text=self.spoiler_text)
class Mastodon(collections.abc.Iterable):
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
def __iter__(self) -> Iterator:
class Listener(collections.abc.Iterable, StreamListener):
def __init__(self):
self.queue = Queue()
def __iter__(self) -> Iterator:
while True:
yield self.queue.get()
def on_notification(self, notification) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
listener = Listener()
self.api.stream_user(listener, run_async=True)
return iter(listener)
def post(self, response: MastodonResponse) -> bool:
self.logger.info("Trying to toot: " + response.text)
if len(response.text) > _TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
result = response.post(self.api)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
|
<commit_before><commit_msg>Add a kovot stream for Mastodon<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections.abc
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from queue import Queue
from kovot.response import Response
from logging import Logger
from typing import Iterator, Optional
__all__ = ['MastodonResponse', 'Mastodon']
_TOOT_LIMIT = 500
class MastodonResponse(Response):
def __init__(self, text, score,
in_reply_to_id: Optional[int]=None,
sensitive: bool=False,
visibility: Optional[str]=None,
spoiler_text: Optional[str]=None,
*args, **kwargs
):
super(MastodonResponse, self).__init__(text, score, *args, **kwargs)
self.in_reply_to_id = in_reply_to_id
self.sensitive = sensitive
self.visibility = visibility
self.spoiler_text = spoiler_text
def post(self, api: MastodonAPI) -> str:
return api.status_post(
self.text, in_reply_to_id=self.in_reply_to_id, sensitive=self.sensitive,
visibility=self.visibility, spoiler_text=self.spoiler_text)
class Mastodon(collections.abc.Iterable):
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
def __iter__(self) -> Iterator:
class Listener(collections.abc.Iterable, StreamListener):
def __init__(self):
self.queue = Queue()
def __iter__(self) -> Iterator:
while True:
yield self.queue.get()
def on_notification(self, notification) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
listener = Listener()
self.api.stream_user(listener, run_async=True)
return iter(listener)
def post(self, response: MastodonResponse) -> bool:
self.logger.info("Trying to toot: " + response.text)
if len(response.text) > _TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
result = response.post(self.api)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
|
Add a kovot stream for Mastodon#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections.abc
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from queue import Queue
from kovot.response import Response
from logging import Logger
from typing import Iterator, Optional
__all__ = ['MastodonResponse', 'Mastodon']
_TOOT_LIMIT = 500
class MastodonResponse(Response):
def __init__(self, text, score,
in_reply_to_id: Optional[int]=None,
sensitive: bool=False,
visibility: Optional[str]=None,
spoiler_text: Optional[str]=None,
*args, **kwargs
):
super(MastodonResponse, self).__init__(text, score, *args, **kwargs)
self.in_reply_to_id = in_reply_to_id
self.sensitive = sensitive
self.visibility = visibility
self.spoiler_text = spoiler_text
def post(self, api: MastodonAPI) -> str:
return api.status_post(
self.text, in_reply_to_id=self.in_reply_to_id, sensitive=self.sensitive,
visibility=self.visibility, spoiler_text=self.spoiler_text)
class Mastodon(collections.abc.Iterable):
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
def __iter__(self) -> Iterator:
class Listener(collections.abc.Iterable, StreamListener):
def __init__(self):
self.queue = Queue()
def __iter__(self) -> Iterator:
while True:
yield self.queue.get()
def on_notification(self, notification) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
listener = Listener()
self.api.stream_user(listener, run_async=True)
return iter(listener)
def post(self, response: MastodonResponse) -> bool:
self.logger.info("Trying to toot: " + response.text)
if len(response.text) > _TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
result = response.post(self.api)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
|
<commit_before><commit_msg>Add a kovot stream for Mastodon<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections.abc
from mastodon import StreamListener, MastodonError, Mastodon as MastodonAPI
from queue import Queue
from kovot.response import Response
from logging import Logger
from typing import Iterator, Optional
__all__ = ['MastodonResponse', 'Mastodon']
_TOOT_LIMIT = 500
class MastodonResponse(Response):
def __init__(self, text, score,
in_reply_to_id: Optional[int]=None,
sensitive: bool=False,
visibility: Optional[str]=None,
spoiler_text: Optional[str]=None,
*args, **kwargs
):
super(MastodonResponse, self).__init__(text, score, *args, **kwargs)
self.in_reply_to_id = in_reply_to_id
self.sensitive = sensitive
self.visibility = visibility
self.spoiler_text = spoiler_text
def post(self, api: MastodonAPI) -> str:
return api.status_post(
self.text, in_reply_to_id=self.in_reply_to_id, sensitive=self.sensitive,
visibility=self.visibility, spoiler_text=self.spoiler_text)
class Mastodon(collections.abc.Iterable):
def __init__(
self,
logger: Logger,
client_id: str,
client_secret: str,
access_token: str,
api_base_url: str
):
self.logger = logger
self.api = MastodonAPI(
client_id,
client_secret,
access_token,
api_base_url
)
def __iter__(self) -> Iterator:
class Listener(collections.abc.Iterable, StreamListener):
def __init__(self):
self.queue = Queue()
def __iter__(self) -> Iterator:
while True:
yield self.queue.get()
def on_notification(self, notification) -> None:
if notification['type'] == 'mention':
self.queue.put(notification['status'])
listener = Listener()
self.api.stream_user(listener, run_async=True)
return iter(listener)
def post(self, response: MastodonResponse) -> bool:
self.logger.info("Trying to toot: " + response.text)
if len(response.text) > _TOOT_LIMIT:
self.logger.error('Length of given status has exceeded the limit: %d' % len(response.text))
return False
try:
result = response.post(self.api)
self.logger.info('Updated: ' + str(result))
except MastodonError:
self.logger.error('An API error has occured.')
return False
return True
|
|
bc693c94fcc7ccd06e283c08012937c5732c0da3
|
sympy/__init__.py
|
sympy/__init__.py
|
__version__ = "0.4.2"
from sympy.core import *
from sympy.modules import *
|
__version__ = "0.5.0"
from sympy.core import *
from sympy.modules import *
|
Use the version 0.5.0 when we merge it.
|
Use the version 0.5.0 when we merge it.
|
Python
|
bsd-3-clause
|
yukoba/sympy,jamesblunt/sympy,garvitr/sympy,abhiii5459/sympy,shikil/sympy,madan96/sympy,amitjamadagni/sympy,VaibhavAgarwalVA/sympy,kaushik94/sympy,hrashk/sympy,asm666/sympy,AkademieOlympia/sympy,Designist/sympy,jaimahajan1997/sympy,sampadsaha5/sympy,jbaayen/sympy,sunny94/temp,garvitr/sympy,AkademieOlympia/sympy,Shaswat27/sympy,shikil/sympy,tovrstra/sympy,Mitchkoens/sympy,kaushik94/sympy,Curious72/sympy,VaibhavAgarwalVA/sympy,ga7g08/sympy,madan96/sympy,Titan-C/sympy,maniteja123/sympy,hrashk/sympy,diofant/diofant,drufat/sympy,mcdaniel67/sympy,kevalds51/sympy,Mitchkoens/sympy,yukoba/sympy,sampadsaha5/sympy,atsao72/sympy,jerli/sympy,Sumith1896/sympy,maniteja123/sympy,farhaanbukhsh/sympy,mafiya69/sympy,sunny94/temp,atreyv/sympy,bukzor/sympy,bukzor/sympy,kaichogami/sympy,pandeyadarsh/sympy,rahuldan/sympy,meghana1995/sympy,kevalds51/sympy,ahhda/sympy,amitjamadagni/sympy,vipulroxx/sympy,mcdaniel67/sympy,grevutiu-gabriel/sympy,Arafatk/sympy,shipci/sympy,Vishluck/sympy,sahmed95/sympy,cccfran/sympy,shipci/sympy,bukzor/sympy,MridulS/sympy,beni55/sympy,cswiercz/sympy,sahilshekhawat/sympy,sampadsaha5/sympy,Gadal/sympy,Shaswat27/sympy,lidavidm/sympy,sahmed95/sympy,mcdaniel67/sympy,moble/sympy,mattpap/sympy-polys,aktech/sympy,vipulroxx/sympy,wanglongqi/sympy,shipci/sympy,Curious72/sympy,wyom/sympy,lidavidm/sympy,saurabhjn76/sympy,Vishluck/sympy,dqnykamp/sympy,Mitchkoens/sympy,sahilshekhawat/sympy,Gadal/sympy,MechCoder/sympy,ahhda/sympy,oliverlee/sympy,pbrady/sympy,kmacinnis/sympy,kaushik94/sympy,toolforger/sympy,Sumith1896/sympy,souravsingh/sympy,farhaanbukhsh/sympy,kmacinnis/sympy,saurabhjn76/sympy,liangjiaxing/sympy,jerli/sympy,souravsingh/sympy,aktech/sympy,drufat/sympy,kaichogami/sympy,abloomston/sympy,atreyv/sympy,ChristinaZografou/sympy,rahuldan/sympy,atreyv/sympy,hargup/sympy,abhiii5459/sympy,asm666/sympy,wyom/sympy,oliverlee/sympy,meghana1995/sympy,MridulS/sympy,pandeyadarsh/sympy,atsao72/sympy,Designist/sympy,jerli/sympy,beni55/sympy,ChristinaZografou/sympy,iamutkarshtiwari/sympy,ga7g08/sympy,moble/sympy,AunShiLord/sympy,iamutkarshtiwari/sympy,Designist/sympy,lindsayad/sympy,Davidjohnwilson/sympy,Arafatk/sympy,lindsayad/sympy,jbbskinny/sympy,kumarkrishna/sympy,aktech/sympy,wanglongqi/sympy,saurabhjn76/sympy,iamutkarshtiwari/sympy,liangjiaxing/sympy,hrashk/sympy,Titan-C/sympy,lidavidm/sympy,postvakje/sympy,pandeyadarsh/sympy,kmacinnis/sympy,sahilshekhawat/sympy,kaichogami/sympy,ahhda/sympy,Vishluck/sympy,pernici/sympy,liangjiaxing/sympy,emon10005/sympy,farhaanbukhsh/sympy,minrk/sympy,hazelnusse/sympy-old,Gadal/sympy,madan96/sympy,abhiii5459/sympy,Sumith1896/sympy,cccfran/sympy,Shaswat27/sympy,cccfran/sympy,yashsharan/sympy,AunShiLord/sympy,jaimahajan1997/sympy,srjoglekar246/sympy,yashsharan/sympy,skirpichev/omg,toolforger/sympy,Davidjohnwilson/sympy,postvakje/sympy,Arafatk/sympy,chaffra/sympy,maniteja123/sympy,abloomston/sympy,sunny94/temp,garvitr/sympy,cswiercz/sympy,AunShiLord/sympy,skidzo/sympy,VaibhavAgarwalVA/sympy,pbrady/sympy,grevutiu-gabriel/sympy,grevutiu-gabriel/sympy,wyom/sympy,debugger22/sympy,skidzo/sympy,moble/sympy,chaffra/sympy,ga7g08/sympy,kumarkrishna/sympy,MechCoder/sympy,sahmed95/sympy,beni55/sympy,meghana1995/sympy,flacjacket/sympy,cswiercz/sympy,Curious72/sympy,skidzo/sympy,Titan-C/sympy,wanglongqi/sympy,yashsharan/sympy,dqnykamp/sympy,Davidjohnwilson/sympy,ChristinaZografou/sympy,souravsingh/sympy,shikil/sympy,MechCoder/sympy,mafiya69/sympy,asm666/sympy,abloomston/sympy,jbbskinny/sympy,mafiya69/sympy,toolforger/sympy,emon10005/sympy,jbbskinny/sympy,vipulroxx/sympy,atsao72/sympy,pbrady/sympy,yukoba/sympy,debugger22/sympy,kevalds51/sympy,drufat/sympy,jamesblunt/sympy,fperez/sympy,MridulS/sympy,hargup/sympy,dqnykamp/sympy,emon10005/sympy,kumarkrishna/sympy,lindsayad/sympy,ryanGT/sympy,KevinGoodsell/sympy,minrk/sympy,AkademieOlympia/sympy,hargup/sympy,postvakje/sympy,debugger22/sympy,oliverlee/sympy,hazelnusse/sympy-old,jaimahajan1997/sympy,chaffra/sympy,jamesblunt/sympy,rahuldan/sympy
|
__version__ = "0.4.2"
from sympy.core import *
from sympy.modules import *
Use the version 0.5.0 when we merge it.
|
__version__ = "0.5.0"
from sympy.core import *
from sympy.modules import *
|
<commit_before>
__version__ = "0.4.2"
from sympy.core import *
from sympy.modules import *
<commit_msg>Use the version 0.5.0 when we merge it.<commit_after>
|
__version__ = "0.5.0"
from sympy.core import *
from sympy.modules import *
|
__version__ = "0.4.2"
from sympy.core import *
from sympy.modules import *
Use the version 0.5.0 when we merge it.
__version__ = "0.5.0"
from sympy.core import *
from sympy.modules import *
|
<commit_before>
__version__ = "0.4.2"
from sympy.core import *
from sympy.modules import *
<commit_msg>Use the version 0.5.0 when we merge it.<commit_after>
__version__ = "0.5.0"
from sympy.core import *
from sympy.modules import *
|
063d662561264888a53195a677c36e524a5b465b
|
test/test_gref.py
|
test/test_gref.py
|
import store_fixture
import groundstation.store
from groundstation.gref import Gref
class TestGitGref(store_fixture.StoreTestCase):
storeClass = groundstation.store.git_store.GitStore
def test_write_tip(self):
gref = Gref(self.repo, "testchannel", "test_write_tip")
gref.write_tip("foobarbaz", "")
self.assertEqual(list(gref), ["foobarbaz"])
|
Add test for gref writing
|
Add test for gref writing
|
Python
|
mit
|
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
|
Add test for gref writing
|
import store_fixture
import groundstation.store
from groundstation.gref import Gref
class TestGitGref(store_fixture.StoreTestCase):
storeClass = groundstation.store.git_store.GitStore
def test_write_tip(self):
gref = Gref(self.repo, "testchannel", "test_write_tip")
gref.write_tip("foobarbaz", "")
self.assertEqual(list(gref), ["foobarbaz"])
|
<commit_before><commit_msg>Add test for gref writing<commit_after>
|
import store_fixture
import groundstation.store
from groundstation.gref import Gref
class TestGitGref(store_fixture.StoreTestCase):
storeClass = groundstation.store.git_store.GitStore
def test_write_tip(self):
gref = Gref(self.repo, "testchannel", "test_write_tip")
gref.write_tip("foobarbaz", "")
self.assertEqual(list(gref), ["foobarbaz"])
|
Add test for gref writingimport store_fixture
import groundstation.store
from groundstation.gref import Gref
class TestGitGref(store_fixture.StoreTestCase):
storeClass = groundstation.store.git_store.GitStore
def test_write_tip(self):
gref = Gref(self.repo, "testchannel", "test_write_tip")
gref.write_tip("foobarbaz", "")
self.assertEqual(list(gref), ["foobarbaz"])
|
<commit_before><commit_msg>Add test for gref writing<commit_after>import store_fixture
import groundstation.store
from groundstation.gref import Gref
class TestGitGref(store_fixture.StoreTestCase):
storeClass = groundstation.store.git_store.GitStore
def test_write_tip(self):
gref = Gref(self.repo, "testchannel", "test_write_tip")
gref.write_tip("foobarbaz", "")
self.assertEqual(list(gref), ["foobarbaz"])
|
|
ff975c1bea7d0ea1e88e2b0667bc6e31676f66a7
|
osf/management/commands/vacuum.py
|
osf/management/commands/vacuum.py
|
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Runs VACUUM [ANALYZE] on tables.
Examples:
python manage.py vacuum --dry osf.OSFUser
python manage.py vacuum --analyze osf.OSFUser
python manage.py vacuum osf.OSFUser osf.Node
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('models', nargs='+', type=str)
parser.add_argument(
'--analyze',
action='store_true',
dest='analyze',
help='Whether to run VACUUM ANALYZE'
)
parser.add_argument(
'--dry',
action='store_true',
dest='dry',
help='If true, no SQL commands will be executed'
)
def handle(self, *args, **options):
analyze = options.get('analyze', False)
model_names = options.get('models', [])
dry = options.get('dry', False)
models = [
apps.get_model(each)
for each in model_names
]
table_names = [
each._meta.db_table
for each in models
]
statement_format = 'VACUUM ANALYZE {table};' if analyze else 'VACUUM {table};'
statements = [
statement_format.format(table=table)
for table in table_names
]
if dry:
for statement in statements:
logger.info('[DRY]: {}'.format(statement))
else:
with connection.cursor() as cursor:
for table in table_names:
statement = statement_format.format(table=table)
logger.info(statement)
cursor.execute(statement)
|
Add management command for running VACUUM
|
Add management command for running VACUUM [ANALYZE]
[skip ci]
|
Python
|
apache-2.0
|
saradbowman/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,felliott/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,adlius/osf.io,sloria/osf.io,mattclark/osf.io,adlius/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,binoculars/osf.io,cslzchen/osf.io,icereval/osf.io,felliott/osf.io,mfraezz/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,mattclark/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,sloria/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,icereval/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,icereval/osf.io,adlius/osf.io,binoculars/osf.io,felliott/osf.io,cslzchen/osf.io,erinspace/osf.io,erinspace/osf.io,erinspace/osf.io,saradbowman/osf.io,aaxelb/osf.io,adlius/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,sloria/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,binoculars/osf.io
|
Add management command for running VACUUM [ANALYZE]
[skip ci]
|
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Runs VACUUM [ANALYZE] on tables.
Examples:
python manage.py vacuum --dry osf.OSFUser
python manage.py vacuum --analyze osf.OSFUser
python manage.py vacuum osf.OSFUser osf.Node
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('models', nargs='+', type=str)
parser.add_argument(
'--analyze',
action='store_true',
dest='analyze',
help='Whether to run VACUUM ANALYZE'
)
parser.add_argument(
'--dry',
action='store_true',
dest='dry',
help='If true, no SQL commands will be executed'
)
def handle(self, *args, **options):
analyze = options.get('analyze', False)
model_names = options.get('models', [])
dry = options.get('dry', False)
models = [
apps.get_model(each)
for each in model_names
]
table_names = [
each._meta.db_table
for each in models
]
statement_format = 'VACUUM ANALYZE {table};' if analyze else 'VACUUM {table};'
statements = [
statement_format.format(table=table)
for table in table_names
]
if dry:
for statement in statements:
logger.info('[DRY]: {}'.format(statement))
else:
with connection.cursor() as cursor:
for table in table_names:
statement = statement_format.format(table=table)
logger.info(statement)
cursor.execute(statement)
|
<commit_before><commit_msg>Add management command for running VACUUM [ANALYZE]
[skip ci]<commit_after>
|
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Runs VACUUM [ANALYZE] on tables.
Examples:
python manage.py vacuum --dry osf.OSFUser
python manage.py vacuum --analyze osf.OSFUser
python manage.py vacuum osf.OSFUser osf.Node
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('models', nargs='+', type=str)
parser.add_argument(
'--analyze',
action='store_true',
dest='analyze',
help='Whether to run VACUUM ANALYZE'
)
parser.add_argument(
'--dry',
action='store_true',
dest='dry',
help='If true, no SQL commands will be executed'
)
def handle(self, *args, **options):
analyze = options.get('analyze', False)
model_names = options.get('models', [])
dry = options.get('dry', False)
models = [
apps.get_model(each)
for each in model_names
]
table_names = [
each._meta.db_table
for each in models
]
statement_format = 'VACUUM ANALYZE {table};' if analyze else 'VACUUM {table};'
statements = [
statement_format.format(table=table)
for table in table_names
]
if dry:
for statement in statements:
logger.info('[DRY]: {}'.format(statement))
else:
with connection.cursor() as cursor:
for table in table_names:
statement = statement_format.format(table=table)
logger.info(statement)
cursor.execute(statement)
|
Add management command for running VACUUM [ANALYZE]
[skip ci]from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Runs VACUUM [ANALYZE] on tables.
Examples:
python manage.py vacuum --dry osf.OSFUser
python manage.py vacuum --analyze osf.OSFUser
python manage.py vacuum osf.OSFUser osf.Node
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('models', nargs='+', type=str)
parser.add_argument(
'--analyze',
action='store_true',
dest='analyze',
help='Whether to run VACUUM ANALYZE'
)
parser.add_argument(
'--dry',
action='store_true',
dest='dry',
help='If true, no SQL commands will be executed'
)
def handle(self, *args, **options):
analyze = options.get('analyze', False)
model_names = options.get('models', [])
dry = options.get('dry', False)
models = [
apps.get_model(each)
for each in model_names
]
table_names = [
each._meta.db_table
for each in models
]
statement_format = 'VACUUM ANALYZE {table};' if analyze else 'VACUUM {table};'
statements = [
statement_format.format(table=table)
for table in table_names
]
if dry:
for statement in statements:
logger.info('[DRY]: {}'.format(statement))
else:
with connection.cursor() as cursor:
for table in table_names:
statement = statement_format.format(table=table)
logger.info(statement)
cursor.execute(statement)
|
<commit_before><commit_msg>Add management command for running VACUUM [ANALYZE]
[skip ci]<commit_after>from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand
from django.apps import apps
from django.db import connection
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Runs VACUUM [ANALYZE] on tables.
Examples:
python manage.py vacuum --dry osf.OSFUser
python manage.py vacuum --analyze osf.OSFUser
python manage.py vacuum osf.OSFUser osf.Node
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('models', nargs='+', type=str)
parser.add_argument(
'--analyze',
action='store_true',
dest='analyze',
help='Whether to run VACUUM ANALYZE'
)
parser.add_argument(
'--dry',
action='store_true',
dest='dry',
help='If true, no SQL commands will be executed'
)
def handle(self, *args, **options):
analyze = options.get('analyze', False)
model_names = options.get('models', [])
dry = options.get('dry', False)
models = [
apps.get_model(each)
for each in model_names
]
table_names = [
each._meta.db_table
for each in models
]
statement_format = 'VACUUM ANALYZE {table};' if analyze else 'VACUUM {table};'
statements = [
statement_format.format(table=table)
for table in table_names
]
if dry:
for statement in statements:
logger.info('[DRY]: {}'.format(statement))
else:
with connection.cursor() as cursor:
for table in table_names:
statement = statement_format.format(table=table)
logger.info(statement)
cursor.execute(statement)
|
|
07cdfe31046c717bade6f61b025792478238982c
|
code/tax2rank.py
|
code/tax2rank.py
|
"""
Convert all the taxonomy IDs to rank so that we are always talking
about the same thing
"""
import sys
import taxon
try:
rank = sys.argv[1]
except:
sys.exit(sys.argv[0] + " rank to use ... eg. species or genus")
taxa = taxon.readNodes()
with open("all_host_taxid.txt", 'r') as rin:
for l in rin:
p=l.strip().split("\t")
ori = p[1]
while taxa[p[1]].rank != rank and p[1] != '1':
p[1] = taxa[p[1]].parent
if p[1] == 1:
sys.exit("Did not find " + rank + " for " + ori)
print(p[0] + "\t" + p[1])
|
Convert all the taxonomy IDs to rank so that we are always talking about the same thing
|
Convert all the taxonomy IDs to rank so that we are always talking about the same thing
|
Python
|
mit
|
linsalrob/PhageHosts,linsalrob/PhageHosts,linsalrob/PhageHosts
|
Convert all the taxonomy IDs to rank so that we are always talking about the same thing
|
"""
Convert all the taxonomy IDs to rank so that we are always talking
about the same thing
"""
import sys
import taxon
try:
rank = sys.argv[1]
except:
sys.exit(sys.argv[0] + " rank to use ... eg. species or genus")
taxa = taxon.readNodes()
with open("all_host_taxid.txt", 'r') as rin:
for l in rin:
p=l.strip().split("\t")
ori = p[1]
while taxa[p[1]].rank != rank and p[1] != '1':
p[1] = taxa[p[1]].parent
if p[1] == 1:
sys.exit("Did not find " + rank + " for " + ori)
print(p[0] + "\t" + p[1])
|
<commit_before><commit_msg>Convert all the taxonomy IDs to rank so that we are always talking about the same thing<commit_after>
|
"""
Convert all the taxonomy IDs to rank so that we are always talking
about the same thing
"""
import sys
import taxon
try:
rank = sys.argv[1]
except:
sys.exit(sys.argv[0] + " rank to use ... eg. species or genus")
taxa = taxon.readNodes()
with open("all_host_taxid.txt", 'r') as rin:
for l in rin:
p=l.strip().split("\t")
ori = p[1]
while taxa[p[1]].rank != rank and p[1] != '1':
p[1] = taxa[p[1]].parent
if p[1] == 1:
sys.exit("Did not find " + rank + " for " + ori)
print(p[0] + "\t" + p[1])
|
Convert all the taxonomy IDs to rank so that we are always talking about the same thing"""
Convert all the taxonomy IDs to rank so that we are always talking
about the same thing
"""
import sys
import taxon
try:
rank = sys.argv[1]
except:
sys.exit(sys.argv[0] + " rank to use ... eg. species or genus")
taxa = taxon.readNodes()
with open("all_host_taxid.txt", 'r') as rin:
for l in rin:
p=l.strip().split("\t")
ori = p[1]
while taxa[p[1]].rank != rank and p[1] != '1':
p[1] = taxa[p[1]].parent
if p[1] == 1:
sys.exit("Did not find " + rank + " for " + ori)
print(p[0] + "\t" + p[1])
|
<commit_before><commit_msg>Convert all the taxonomy IDs to rank so that we are always talking about the same thing<commit_after>"""
Convert all the taxonomy IDs to rank so that we are always talking
about the same thing
"""
import sys
import taxon
try:
rank = sys.argv[1]
except:
sys.exit(sys.argv[0] + " rank to use ... eg. species or genus")
taxa = taxon.readNodes()
with open("all_host_taxid.txt", 'r') as rin:
for l in rin:
p=l.strip().split("\t")
ori = p[1]
while taxa[p[1]].rank != rank and p[1] != '1':
p[1] = taxa[p[1]].parent
if p[1] == 1:
sys.exit("Did not find " + rank + " for " + ori)
print(p[0] + "\t" + p[1])
|
|
17857a746484dd49bf5805f596e6efcd23035afb
|
my_own_exercises/web_server/flaskapp.py
|
my_own_exercises/web_server/flaskapp.py
|
from flask import Flask
from flask import Response
flask_app = Flask('flaskapp')
@flask_app.route('/hello')
def hello_world():
return Response(
'Hello world from Flask!\n',
mimetype='text/plain'
)
app = flask_app.wsgi_app
|
Add configuration for Flask 'hello world' app
|
Add configuration for Flask 'hello world' app
|
Python
|
apache-2.0
|
RagBillySandstone/google-python-exercises
|
Add configuration for Flask 'hello world' app
|
from flask import Flask
from flask import Response
flask_app = Flask('flaskapp')
@flask_app.route('/hello')
def hello_world():
return Response(
'Hello world from Flask!\n',
mimetype='text/plain'
)
app = flask_app.wsgi_app
|
<commit_before><commit_msg>Add configuration for Flask 'hello world' app<commit_after>
|
from flask import Flask
from flask import Response
flask_app = Flask('flaskapp')
@flask_app.route('/hello')
def hello_world():
return Response(
'Hello world from Flask!\n',
mimetype='text/plain'
)
app = flask_app.wsgi_app
|
Add configuration for Flask 'hello world' appfrom flask import Flask
from flask import Response
flask_app = Flask('flaskapp')
@flask_app.route('/hello')
def hello_world():
return Response(
'Hello world from Flask!\n',
mimetype='text/plain'
)
app = flask_app.wsgi_app
|
<commit_before><commit_msg>Add configuration for Flask 'hello world' app<commit_after>from flask import Flask
from flask import Response
flask_app = Flask('flaskapp')
@flask_app.route('/hello')
def hello_world():
return Response(
'Hello world from Flask!\n',
mimetype='text/plain'
)
app = flask_app.wsgi_app
|
|
4a33e88cfa33ce74a870986d6514fce49c58552a
|
create_sample.py
|
create_sample.py
|
# importing modules/ libraries
import pandas as pd
import random
# create sample of order products train data
n = 1384617
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__train_sample_df = pd.read_csv('Data/order_products__train.csv',
skiprows = skip)
order_products__train_sample_df.to_csv('Data/order_products__train_sample.csv',
index = False)
# create sample of order products prior data
n = 32434489
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__prior_sample_df = pd.read_csv('Data/order_products__prior.csv',
skiprows = skip)
order_products__prior_sample_df.to_csv('Data/order_products__prior_sample.csv',
index = False)
# create sample of orders data
n = 3421083
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_sample_df = pd.read_csv('Data/orders.csv',
skiprows = skip)
order_sample_df.to_csv('Data/orders_sample.csv',
index = False)
|
Add code for creating samples
|
feat: Add code for creating samples
|
Python
|
mit
|
rjegankumar/instacart_prediction_model
|
feat: Add code for creating samples
|
# importing modules/ libraries
import pandas as pd
import random
# create sample of order products train data
n = 1384617
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__train_sample_df = pd.read_csv('Data/order_products__train.csv',
skiprows = skip)
order_products__train_sample_df.to_csv('Data/order_products__train_sample.csv',
index = False)
# create sample of order products prior data
n = 32434489
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__prior_sample_df = pd.read_csv('Data/order_products__prior.csv',
skiprows = skip)
order_products__prior_sample_df.to_csv('Data/order_products__prior_sample.csv',
index = False)
# create sample of orders data
n = 3421083
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_sample_df = pd.read_csv('Data/orders.csv',
skiprows = skip)
order_sample_df.to_csv('Data/orders_sample.csv',
index = False)
|
<commit_before><commit_msg>feat: Add code for creating samples<commit_after>
|
# importing modules/ libraries
import pandas as pd
import random
# create sample of order products train data
n = 1384617
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__train_sample_df = pd.read_csv('Data/order_products__train.csv',
skiprows = skip)
order_products__train_sample_df.to_csv('Data/order_products__train_sample.csv',
index = False)
# create sample of order products prior data
n = 32434489
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__prior_sample_df = pd.read_csv('Data/order_products__prior.csv',
skiprows = skip)
order_products__prior_sample_df.to_csv('Data/order_products__prior_sample.csv',
index = False)
# create sample of orders data
n = 3421083
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_sample_df = pd.read_csv('Data/orders.csv',
skiprows = skip)
order_sample_df.to_csv('Data/orders_sample.csv',
index = False)
|
feat: Add code for creating samples# importing modules/ libraries
import pandas as pd
import random
# create sample of order products train data
n = 1384617
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__train_sample_df = pd.read_csv('Data/order_products__train.csv',
skiprows = skip)
order_products__train_sample_df.to_csv('Data/order_products__train_sample.csv',
index = False)
# create sample of order products prior data
n = 32434489
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__prior_sample_df = pd.read_csv('Data/order_products__prior.csv',
skiprows = skip)
order_products__prior_sample_df.to_csv('Data/order_products__prior_sample.csv',
index = False)
# create sample of orders data
n = 3421083
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_sample_df = pd.read_csv('Data/orders.csv',
skiprows = skip)
order_sample_df.to_csv('Data/orders_sample.csv',
index = False)
|
<commit_before><commit_msg>feat: Add code for creating samples<commit_after># importing modules/ libraries
import pandas as pd
import random
# create sample of order products train data
n = 1384617
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__train_sample_df = pd.read_csv('Data/order_products__train.csv',
skiprows = skip)
order_products__train_sample_df.to_csv('Data/order_products__train_sample.csv',
index = False)
# create sample of order products prior data
n = 32434489
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_products__prior_sample_df = pd.read_csv('Data/order_products__prior.csv',
skiprows = skip)
order_products__prior_sample_df.to_csv('Data/order_products__prior_sample.csv',
index = False)
# create sample of orders data
n = 3421083
s = round(0.1 * n)
skip = sorted(random.sample(range(1,n), n-s))
order_sample_df = pd.read_csv('Data/orders.csv',
skiprows = skip)
order_sample_df.to_csv('Data/orders_sample.csv',
index = False)
|
|
222830648ff174700a5e40d23afa0c721086f594
|
templates/kapsi-convert-csv.py
|
templates/kapsi-convert-csv.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script is used to convert email address and home directory location of
pyfiscan result files to Kapsi's format.
"""
try:
import sys
import csv
import time
from admsikteeri import *
except ImportError, e:
sys.exit(e)
home_location = '/mnt/users2/'
def lookup_member_email(alias):
"""Fetches member email using member Unix-account ID. Return email."""
print('Looking alias: %s' % alias)
id = lookup_alias(alias)
m = get_member_details(id)
return m.email
def removePrefix(str, prefix):
"""Removes prefix of str."""
return str[len(prefix):] if str.startswith(prefix) else str
def csv_add(member_email, timestamp, appname, version_file, file_version, secure_version, cve):
# ISO 8601 with hours:minutes:seconds
name_of_logfile = 'kapsi-vulnerabilities-' + time.strftime("%Y-%m-%d") + '.csv'
try:
writer = csv.writer(open(name_of_logfile, "a"), delimiter='|', quotechar='|')
logged_data = member_email, timestamp, appname, version_file, file_version, secure_version, cve
writer.writerow(logged_data)
except Exception, error:
logging.debug('Exception in csv_add: %s' % error)
def read_csv(csv_file):
"""Reads data in from CSV-file."""
with open(csv_file[0], 'rb') as f:
reader = csv.reader(f, delimiter='|', quotechar='|')
for row in reader:
# row two is version file location
version_file_stripped = removePrefix(str(row[2]), str(home_location))
version_file_realpath = "~" + version_file_stripped
alias = version_file_stripped.split('/')[0]
"""Data to new CSV"""
member_email = lookup_member_email(alias)
print('Returned: %s' % member_email)
member_email = alias + '@kapsi.fi,' + member_email
timestamp = row[0]
appname = row[1]
# version_file_realpath
file_version = row[3]
secure_version = row[4]
cve = row[5]
if member_email:
csv_add(member_email, timestamp, appname, version_file_realpath, file_version, secure_version, cve)
read_csv(sys.argv[1:])
|
Add Kapsi result file converter.
|
Add Kapsi result file converter.
|
Python
|
bsd-3-clause
|
cyberintruder/pyfiscan
|
Add Kapsi result file converter.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script is used to convert email address and home directory location of
pyfiscan result files to Kapsi's format.
"""
try:
import sys
import csv
import time
from admsikteeri import *
except ImportError, e:
sys.exit(e)
home_location = '/mnt/users2/'
def lookup_member_email(alias):
"""Fetches member email using member Unix-account ID. Return email."""
print('Looking alias: %s' % alias)
id = lookup_alias(alias)
m = get_member_details(id)
return m.email
def removePrefix(str, prefix):
"""Removes prefix of str."""
return str[len(prefix):] if str.startswith(prefix) else str
def csv_add(member_email, timestamp, appname, version_file, file_version, secure_version, cve):
# ISO 8601 with hours:minutes:seconds
name_of_logfile = 'kapsi-vulnerabilities-' + time.strftime("%Y-%m-%d") + '.csv'
try:
writer = csv.writer(open(name_of_logfile, "a"), delimiter='|', quotechar='|')
logged_data = member_email, timestamp, appname, version_file, file_version, secure_version, cve
writer.writerow(logged_data)
except Exception, error:
logging.debug('Exception in csv_add: %s' % error)
def read_csv(csv_file):
"""Reads data in from CSV-file."""
with open(csv_file[0], 'rb') as f:
reader = csv.reader(f, delimiter='|', quotechar='|')
for row in reader:
# row two is version file location
version_file_stripped = removePrefix(str(row[2]), str(home_location))
version_file_realpath = "~" + version_file_stripped
alias = version_file_stripped.split('/')[0]
"""Data to new CSV"""
member_email = lookup_member_email(alias)
print('Returned: %s' % member_email)
member_email = alias + '@kapsi.fi,' + member_email
timestamp = row[0]
appname = row[1]
# version_file_realpath
file_version = row[3]
secure_version = row[4]
cve = row[5]
if member_email:
csv_add(member_email, timestamp, appname, version_file_realpath, file_version, secure_version, cve)
read_csv(sys.argv[1:])
|
<commit_before><commit_msg>Add Kapsi result file converter.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script is used to convert email address and home directory location of
pyfiscan result files to Kapsi's format.
"""
try:
import sys
import csv
import time
from admsikteeri import *
except ImportError, e:
sys.exit(e)
home_location = '/mnt/users2/'
def lookup_member_email(alias):
"""Fetches member email using member Unix-account ID. Return email."""
print('Looking alias: %s' % alias)
id = lookup_alias(alias)
m = get_member_details(id)
return m.email
def removePrefix(str, prefix):
"""Removes prefix of str."""
return str[len(prefix):] if str.startswith(prefix) else str
def csv_add(member_email, timestamp, appname, version_file, file_version, secure_version, cve):
# ISO 8601 with hours:minutes:seconds
name_of_logfile = 'kapsi-vulnerabilities-' + time.strftime("%Y-%m-%d") + '.csv'
try:
writer = csv.writer(open(name_of_logfile, "a"), delimiter='|', quotechar='|')
logged_data = member_email, timestamp, appname, version_file, file_version, secure_version, cve
writer.writerow(logged_data)
except Exception, error:
logging.debug('Exception in csv_add: %s' % error)
def read_csv(csv_file):
"""Reads data in from CSV-file."""
with open(csv_file[0], 'rb') as f:
reader = csv.reader(f, delimiter='|', quotechar='|')
for row in reader:
# row two is version file location
version_file_stripped = removePrefix(str(row[2]), str(home_location))
version_file_realpath = "~" + version_file_stripped
alias = version_file_stripped.split('/')[0]
"""Data to new CSV"""
member_email = lookup_member_email(alias)
print('Returned: %s' % member_email)
member_email = alias + '@kapsi.fi,' + member_email
timestamp = row[0]
appname = row[1]
# version_file_realpath
file_version = row[3]
secure_version = row[4]
cve = row[5]
if member_email:
csv_add(member_email, timestamp, appname, version_file_realpath, file_version, secure_version, cve)
read_csv(sys.argv[1:])
|
Add Kapsi result file converter.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script is used to convert email address and home directory location of
pyfiscan result files to Kapsi's format.
"""
try:
import sys
import csv
import time
from admsikteeri import *
except ImportError, e:
sys.exit(e)
home_location = '/mnt/users2/'
def lookup_member_email(alias):
"""Fetches member email using member Unix-account ID. Return email."""
print('Looking alias: %s' % alias)
id = lookup_alias(alias)
m = get_member_details(id)
return m.email
def removePrefix(str, prefix):
"""Removes prefix of str."""
return str[len(prefix):] if str.startswith(prefix) else str
def csv_add(member_email, timestamp, appname, version_file, file_version, secure_version, cve):
# ISO 8601 with hours:minutes:seconds
name_of_logfile = 'kapsi-vulnerabilities-' + time.strftime("%Y-%m-%d") + '.csv'
try:
writer = csv.writer(open(name_of_logfile, "a"), delimiter='|', quotechar='|')
logged_data = member_email, timestamp, appname, version_file, file_version, secure_version, cve
writer.writerow(logged_data)
except Exception, error:
logging.debug('Exception in csv_add: %s' % error)
def read_csv(csv_file):
"""Reads data in from CSV-file."""
with open(csv_file[0], 'rb') as f:
reader = csv.reader(f, delimiter='|', quotechar='|')
for row in reader:
# row two is version file location
version_file_stripped = removePrefix(str(row[2]), str(home_location))
version_file_realpath = "~" + version_file_stripped
alias = version_file_stripped.split('/')[0]
"""Data to new CSV"""
member_email = lookup_member_email(alias)
print('Returned: %s' % member_email)
member_email = alias + '@kapsi.fi,' + member_email
timestamp = row[0]
appname = row[1]
# version_file_realpath
file_version = row[3]
secure_version = row[4]
cve = row[5]
if member_email:
csv_add(member_email, timestamp, appname, version_file_realpath, file_version, secure_version, cve)
read_csv(sys.argv[1:])
|
<commit_before><commit_msg>Add Kapsi result file converter.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script is used to convert email address and home directory location of
pyfiscan result files to Kapsi's format.
"""
try:
import sys
import csv
import time
from admsikteeri import *
except ImportError, e:
sys.exit(e)
home_location = '/mnt/users2/'
def lookup_member_email(alias):
"""Fetches member email using member Unix-account ID. Return email."""
print('Looking alias: %s' % alias)
id = lookup_alias(alias)
m = get_member_details(id)
return m.email
def removePrefix(str, prefix):
"""Removes prefix of str."""
return str[len(prefix):] if str.startswith(prefix) else str
def csv_add(member_email, timestamp, appname, version_file, file_version, secure_version, cve):
# ISO 8601 with hours:minutes:seconds
name_of_logfile = 'kapsi-vulnerabilities-' + time.strftime("%Y-%m-%d") + '.csv'
try:
writer = csv.writer(open(name_of_logfile, "a"), delimiter='|', quotechar='|')
logged_data = member_email, timestamp, appname, version_file, file_version, secure_version, cve
writer.writerow(logged_data)
except Exception, error:
logging.debug('Exception in csv_add: %s' % error)
def read_csv(csv_file):
"""Reads data in from CSV-file."""
with open(csv_file[0], 'rb') as f:
reader = csv.reader(f, delimiter='|', quotechar='|')
for row in reader:
# row two is version file location
version_file_stripped = removePrefix(str(row[2]), str(home_location))
version_file_realpath = "~" + version_file_stripped
alias = version_file_stripped.split('/')[0]
"""Data to new CSV"""
member_email = lookup_member_email(alias)
print('Returned: %s' % member_email)
member_email = alias + '@kapsi.fi,' + member_email
timestamp = row[0]
appname = row[1]
# version_file_realpath
file_version = row[3]
secure_version = row[4]
cve = row[5]
if member_email:
csv_add(member_email, timestamp, appname, version_file_realpath, file_version, secure_version, cve)
read_csv(sys.argv[1:])
|
|
7ed869d5713e936ff1b71dab62ee3599ca23c884
|
egopowerflow/tools/pypsa_io.py
|
egopowerflow/tools/pypsa_io.py
|
import pypsa
import pandas as pd
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import create_engine
from pypsa import io
from oemof import db
from egoio.db_tables.calc_ego_mv_powerflow import Bus, Line, Generator, Load, \
Transformer, TempResolution, BusVMagSet, GeneratorPqSet, LoadPqSet
def oedb_session():
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
conn = db.connection(section='oedb')
except:
print('Please provide connection parameters to database:')
host = input('host (default 127.0.0.1): ') or '127.0.0.1'
port = input('port (default 5432): ') or '5432'
user = input('user (default postgres): ') or 'postgres'
database = input('database name: ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
Session = sessionmaker(bind=conn)
session = Session()
return session
def init_pypsa_network(time_range_lim):
"""
Instantiate PyPSA network
Parameters
----------
time_range_lim:
Returns
-------
network: PyPSA network object
Contains powerflow problem
snapshots: iterable
Contains snapshots to be analyzed by powerplow calculation
"""
network = pypsa.Network()
network.set_snapshots(time_range_lim)
snapshots = network.snapshots
return network, snapshots
def get_pq_sets(session, table, columns=None, index_col=None, slicer=None):
"""
Parameters
----------
session: SQLAlchemy sessino object
table: SQLAlchemy orm table object
Specified pq-sets table
columns: list of strings
Columns to be selected from pq-sets table (default None)
index_col: string
Column to set index on (default None)
slicer: list of int's
Slices array of time-dependent p/q-values to apply in PF (default None)
Returns
-------
pq_set: pandas DataFrame
Table with pq-Values to be applied in PF analysis
"""
# retrieve table
if columns is not None:
pq_query = session.query(table).options(load_only(*columns))
else:
pq_query = session.query(table)
pq_set = pd.read_sql_query(pq_query.statement,
session.bind,
index_col=index_col)
# slice relevant part by given slicer
#TODO: implement slicing of p,q-array
return pq_set
if __name__ == '__main__':
session = oedb_session()
gen_cols = ['temp_id', 'p_set', 'q_set']
gen_pq_set = get_pq_sets(session, GeneratorPqSet, index_col='generator_id',
columns=gen_cols)
print(gen_pq_set)
|
Add pypsa-db-io file with first functions
|
Add pypsa-db-io file with first functions
|
Python
|
agpl-3.0
|
openego/ego.powerflow
|
Add pypsa-db-io file with first functions
|
import pypsa
import pandas as pd
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import create_engine
from pypsa import io
from oemof import db
from egoio.db_tables.calc_ego_mv_powerflow import Bus, Line, Generator, Load, \
Transformer, TempResolution, BusVMagSet, GeneratorPqSet, LoadPqSet
def oedb_session():
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
conn = db.connection(section='oedb')
except:
print('Please provide connection parameters to database:')
host = input('host (default 127.0.0.1): ') or '127.0.0.1'
port = input('port (default 5432): ') or '5432'
user = input('user (default postgres): ') or 'postgres'
database = input('database name: ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
Session = sessionmaker(bind=conn)
session = Session()
return session
def init_pypsa_network(time_range_lim):
"""
Instantiate PyPSA network
Parameters
----------
time_range_lim:
Returns
-------
network: PyPSA network object
Contains powerflow problem
snapshots: iterable
Contains snapshots to be analyzed by powerplow calculation
"""
network = pypsa.Network()
network.set_snapshots(time_range_lim)
snapshots = network.snapshots
return network, snapshots
def get_pq_sets(session, table, columns=None, index_col=None, slicer=None):
"""
Parameters
----------
session: SQLAlchemy sessino object
table: SQLAlchemy orm table object
Specified pq-sets table
columns: list of strings
Columns to be selected from pq-sets table (default None)
index_col: string
Column to set index on (default None)
slicer: list of int's
Slices array of time-dependent p/q-values to apply in PF (default None)
Returns
-------
pq_set: pandas DataFrame
Table with pq-Values to be applied in PF analysis
"""
# retrieve table
if columns is not None:
pq_query = session.query(table).options(load_only(*columns))
else:
pq_query = session.query(table)
pq_set = pd.read_sql_query(pq_query.statement,
session.bind,
index_col=index_col)
# slice relevant part by given slicer
#TODO: implement slicing of p,q-array
return pq_set
if __name__ == '__main__':
session = oedb_session()
gen_cols = ['temp_id', 'p_set', 'q_set']
gen_pq_set = get_pq_sets(session, GeneratorPqSet, index_col='generator_id',
columns=gen_cols)
print(gen_pq_set)
|
<commit_before><commit_msg>Add pypsa-db-io file with first functions<commit_after>
|
import pypsa
import pandas as pd
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import create_engine
from pypsa import io
from oemof import db
from egoio.db_tables.calc_ego_mv_powerflow import Bus, Line, Generator, Load, \
Transformer, TempResolution, BusVMagSet, GeneratorPqSet, LoadPqSet
def oedb_session():
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
conn = db.connection(section='oedb')
except:
print('Please provide connection parameters to database:')
host = input('host (default 127.0.0.1): ') or '127.0.0.1'
port = input('port (default 5432): ') or '5432'
user = input('user (default postgres): ') or 'postgres'
database = input('database name: ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
Session = sessionmaker(bind=conn)
session = Session()
return session
def init_pypsa_network(time_range_lim):
"""
Instantiate PyPSA network
Parameters
----------
time_range_lim:
Returns
-------
network: PyPSA network object
Contains powerflow problem
snapshots: iterable
Contains snapshots to be analyzed by powerplow calculation
"""
network = pypsa.Network()
network.set_snapshots(time_range_lim)
snapshots = network.snapshots
return network, snapshots
def get_pq_sets(session, table, columns=None, index_col=None, slicer=None):
"""
Parameters
----------
session: SQLAlchemy sessino object
table: SQLAlchemy orm table object
Specified pq-sets table
columns: list of strings
Columns to be selected from pq-sets table (default None)
index_col: string
Column to set index on (default None)
slicer: list of int's
Slices array of time-dependent p/q-values to apply in PF (default None)
Returns
-------
pq_set: pandas DataFrame
Table with pq-Values to be applied in PF analysis
"""
# retrieve table
if columns is not None:
pq_query = session.query(table).options(load_only(*columns))
else:
pq_query = session.query(table)
pq_set = pd.read_sql_query(pq_query.statement,
session.bind,
index_col=index_col)
# slice relevant part by given slicer
#TODO: implement slicing of p,q-array
return pq_set
if __name__ == '__main__':
session = oedb_session()
gen_cols = ['temp_id', 'p_set', 'q_set']
gen_pq_set = get_pq_sets(session, GeneratorPqSet, index_col='generator_id',
columns=gen_cols)
print(gen_pq_set)
|
Add pypsa-db-io file with first functionsimport pypsa
import pandas as pd
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import create_engine
from pypsa import io
from oemof import db
from egoio.db_tables.calc_ego_mv_powerflow import Bus, Line, Generator, Load, \
Transformer, TempResolution, BusVMagSet, GeneratorPqSet, LoadPqSet
def oedb_session():
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
conn = db.connection(section='oedb')
except:
print('Please provide connection parameters to database:')
host = input('host (default 127.0.0.1): ') or '127.0.0.1'
port = input('port (default 5432): ') or '5432'
user = input('user (default postgres): ') or 'postgres'
database = input('database name: ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
Session = sessionmaker(bind=conn)
session = Session()
return session
def init_pypsa_network(time_range_lim):
"""
Instantiate PyPSA network
Parameters
----------
time_range_lim:
Returns
-------
network: PyPSA network object
Contains powerflow problem
snapshots: iterable
Contains snapshots to be analyzed by powerplow calculation
"""
network = pypsa.Network()
network.set_snapshots(time_range_lim)
snapshots = network.snapshots
return network, snapshots
def get_pq_sets(session, table, columns=None, index_col=None, slicer=None):
"""
Parameters
----------
session: SQLAlchemy sessino object
table: SQLAlchemy orm table object
Specified pq-sets table
columns: list of strings
Columns to be selected from pq-sets table (default None)
index_col: string
Column to set index on (default None)
slicer: list of int's
Slices array of time-dependent p/q-values to apply in PF (default None)
Returns
-------
pq_set: pandas DataFrame
Table with pq-Values to be applied in PF analysis
"""
# retrieve table
if columns is not None:
pq_query = session.query(table).options(load_only(*columns))
else:
pq_query = session.query(table)
pq_set = pd.read_sql_query(pq_query.statement,
session.bind,
index_col=index_col)
# slice relevant part by given slicer
#TODO: implement slicing of p,q-array
return pq_set
if __name__ == '__main__':
session = oedb_session()
gen_cols = ['temp_id', 'p_set', 'q_set']
gen_pq_set = get_pq_sets(session, GeneratorPqSet, index_col='generator_id',
columns=gen_cols)
print(gen_pq_set)
|
<commit_before><commit_msg>Add pypsa-db-io file with first functions<commit_after>import pypsa
import pandas as pd
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy import create_engine
from pypsa import io
from oemof import db
from egoio.db_tables.calc_ego_mv_powerflow import Bus, Line, Generator, Load, \
Transformer, TempResolution, BusVMagSet, GeneratorPqSet, LoadPqSet
def oedb_session():
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
conn = db.connection(section='oedb')
except:
print('Please provide connection parameters to database:')
host = input('host (default 127.0.0.1): ') or '127.0.0.1'
port = input('port (default 5432): ') or '5432'
user = input('user (default postgres): ') or 'postgres'
database = input('database name: ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
Session = sessionmaker(bind=conn)
session = Session()
return session
def init_pypsa_network(time_range_lim):
"""
Instantiate PyPSA network
Parameters
----------
time_range_lim:
Returns
-------
network: PyPSA network object
Contains powerflow problem
snapshots: iterable
Contains snapshots to be analyzed by powerplow calculation
"""
network = pypsa.Network()
network.set_snapshots(time_range_lim)
snapshots = network.snapshots
return network, snapshots
def get_pq_sets(session, table, columns=None, index_col=None, slicer=None):
"""
Parameters
----------
session: SQLAlchemy sessino object
table: SQLAlchemy orm table object
Specified pq-sets table
columns: list of strings
Columns to be selected from pq-sets table (default None)
index_col: string
Column to set index on (default None)
slicer: list of int's
Slices array of time-dependent p/q-values to apply in PF (default None)
Returns
-------
pq_set: pandas DataFrame
Table with pq-Values to be applied in PF analysis
"""
# retrieve table
if columns is not None:
pq_query = session.query(table).options(load_only(*columns))
else:
pq_query = session.query(table)
pq_set = pd.read_sql_query(pq_query.statement,
session.bind,
index_col=index_col)
# slice relevant part by given slicer
#TODO: implement slicing of p,q-array
return pq_set
if __name__ == '__main__':
session = oedb_session()
gen_cols = ['temp_id', 'p_set', 'q_set']
gen_pq_set = get_pq_sets(session, GeneratorPqSet, index_col='generator_id',
columns=gen_cols)
print(gen_pq_set)
|
|
40149ad648fa84f4597fcb3f3182b83ee62de035
|
tests/test_fuzzy_completion.py
|
tests/test_fuzzy_completion.py
|
from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['"user"', 'user_action']
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['user_group', 'api_user']
|
Add tests for fuzzy ranking adjustments.
|
Add tests for fuzzy ranking adjustments.
|
Python
|
bsd-3-clause
|
n-someya/pgcli,TamasNo1/pgcli,janusnic/pgcli,w4ngyi/pgcli,j-bennet/pgcli,n-someya/pgcli,lk1ngaa7/pgcli,darikg/pgcli,thedrow/pgcli,dbcli/pgcli,d33tah/pgcli,darikg/pgcli,bitemyapp/pgcli,koljonen/pgcli,MattOates/pgcli,d33tah/pgcli,MattOates/pgcli,zhiyuanshi/pgcli,bitmonk/pgcli,johshoff/pgcli,bitmonk/pgcli,zhiyuanshi/pgcli,j-bennet/pgcli,lk1ngaa7/pgcli,joewalnes/pgcli,johshoff/pgcli,nosun/pgcli,dbcli/vcli,w4ngyi/pgcli,janusnic/pgcli,suzukaze/pgcli,bitemyapp/pgcli,dbcli/vcli,nosun/pgcli,joewalnes/pgcli,yx91490/pgcli,suzukaze/pgcli,TamasNo1/pgcli,dbcli/pgcli,thedrow/pgcli,koljonen/pgcli,yx91490/pgcli
|
Add tests for fuzzy ranking adjustments.
|
from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['"user"', 'user_action']
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['user_group', 'api_user']
|
<commit_before><commit_msg>Add tests for fuzzy ranking adjustments.<commit_after>
|
from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['"user"', 'user_action']
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['user_group', 'api_user']
|
Add tests for fuzzy ranking adjustments.from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['"user"', 'user_action']
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['user_group', 'api_user']
|
<commit_before><commit_msg>Add tests for fuzzy ranking adjustments.<commit_after>from __future__ import unicode_literals
import pytest
from prompt_toolkit.completion import Completion
from prompt_toolkit.document import Document
@pytest.fixture
def completer():
import pgcli.pgcompleter as pgcompleter
return pgcompleter.PGCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['"user"', 'user_action']
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
result = [match.text for match in completer.find_matches(text, collection)]
assert result == ['user_group', 'api_user']
|
|
e0695ca25c4f9f51233ee006c2a3e00bee473203
|
all-domains/algorithms/sorting/insertion-sort-part-1/solution.py
|
all-domains/algorithms/sorting/insertion-sort-part-1/solution.py
|
# https://www.hackerrank.com/challenges/insertionsort1
# Python 3
def formatted_print(items):
formatted = ' '.join([str(item) for item in items])
print(formatted)
def insertionSort(items):
# The value to insert is the right most element
length = len(items)-1
value_to_insert = items[length]
start = length-1 # we start at the second last item
for index in range(start, -1, -1):
item = items[index]
items[index+1] = item
if item < value_to_insert:
items[index+1] = value_to_insert
formatted_print(items)
return
formatted_print(items)
# If all the elements are greater than the value to insert,
# insert value at the start of the list
items[0] = value_to_insert
formatted_print(items)
n = input()
x = input()
# x = '2 4 6 8 3'
# x = '2 3 4 5 6 7 8 9 10 1'
items = [int(item) for item in x.split(' ')]
insertionSort(items)
|
Implement the beginning of insertion sort
|
Implement the beginning of insertion sort
https://www.hackerrank.com/challenges/insertionsort1
|
Python
|
mit
|
arvinsim/hackerrank-solutions
|
Implement the beginning of insertion sort
https://www.hackerrank.com/challenges/insertionsort1
|
# https://www.hackerrank.com/challenges/insertionsort1
# Python 3
def formatted_print(items):
formatted = ' '.join([str(item) for item in items])
print(formatted)
def insertionSort(items):
# The value to insert is the right most element
length = len(items)-1
value_to_insert = items[length]
start = length-1 # we start at the second last item
for index in range(start, -1, -1):
item = items[index]
items[index+1] = item
if item < value_to_insert:
items[index+1] = value_to_insert
formatted_print(items)
return
formatted_print(items)
# If all the elements are greater than the value to insert,
# insert value at the start of the list
items[0] = value_to_insert
formatted_print(items)
n = input()
x = input()
# x = '2 4 6 8 3'
# x = '2 3 4 5 6 7 8 9 10 1'
items = [int(item) for item in x.split(' ')]
insertionSort(items)
|
<commit_before><commit_msg>Implement the beginning of insertion sort
https://www.hackerrank.com/challenges/insertionsort1<commit_after>
|
# https://www.hackerrank.com/challenges/insertionsort1
# Python 3
def formatted_print(items):
formatted = ' '.join([str(item) for item in items])
print(formatted)
def insertionSort(items):
# The value to insert is the right most element
length = len(items)-1
value_to_insert = items[length]
start = length-1 # we start at the second last item
for index in range(start, -1, -1):
item = items[index]
items[index+1] = item
if item < value_to_insert:
items[index+1] = value_to_insert
formatted_print(items)
return
formatted_print(items)
# If all the elements are greater than the value to insert,
# insert value at the start of the list
items[0] = value_to_insert
formatted_print(items)
n = input()
x = input()
# x = '2 4 6 8 3'
# x = '2 3 4 5 6 7 8 9 10 1'
items = [int(item) for item in x.split(' ')]
insertionSort(items)
|
Implement the beginning of insertion sort
https://www.hackerrank.com/challenges/insertionsort1# https://www.hackerrank.com/challenges/insertionsort1
# Python 3
def formatted_print(items):
formatted = ' '.join([str(item) for item in items])
print(formatted)
def insertionSort(items):
# The value to insert is the right most element
length = len(items)-1
value_to_insert = items[length]
start = length-1 # we start at the second last item
for index in range(start, -1, -1):
item = items[index]
items[index+1] = item
if item < value_to_insert:
items[index+1] = value_to_insert
formatted_print(items)
return
formatted_print(items)
# If all the elements are greater than the value to insert,
# insert value at the start of the list
items[0] = value_to_insert
formatted_print(items)
n = input()
x = input()
# x = '2 4 6 8 3'
# x = '2 3 4 5 6 7 8 9 10 1'
items = [int(item) for item in x.split(' ')]
insertionSort(items)
|
<commit_before><commit_msg>Implement the beginning of insertion sort
https://www.hackerrank.com/challenges/insertionsort1<commit_after># https://www.hackerrank.com/challenges/insertionsort1
# Python 3
def formatted_print(items):
formatted = ' '.join([str(item) for item in items])
print(formatted)
def insertionSort(items):
# The value to insert is the right most element
length = len(items)-1
value_to_insert = items[length]
start = length-1 # we start at the second last item
for index in range(start, -1, -1):
item = items[index]
items[index+1] = item
if item < value_to_insert:
items[index+1] = value_to_insert
formatted_print(items)
return
formatted_print(items)
# If all the elements are greater than the value to insert,
# insert value at the start of the list
items[0] = value_to_insert
formatted_print(items)
n = input()
x = input()
# x = '2 4 6 8 3'
# x = '2 3 4 5 6 7 8 9 10 1'
items = [int(item) for item in x.split(' ')]
insertionSort(items)
|
|
0c669b20b021b07fde251b7b005f2877da1b7202
|
test/integration/ggrc/models/test_cad.py
|
test/integration/ggrc/models/test_cad.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for custom attribute definitions model."""
from sqlalchemy.exc import IntegrityError
from ggrc import db
from ggrc import models
from integration.ggrc import TestCase
class TestCAD(TestCase):
"""Tests for basic functionality of cad model."""
def test_setting_reserved_words(self):
"""Test setting any of the existing attribute names."""
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.definition_type = "Section"
cad.title = "title"
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.title = "title"
cad.definition_type = "Section"
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="title",
definition_type="Assessment",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="TITLE",
definition_type="Program",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="Secondary CONTACT",
definition_type="Program",
)
cad = models.CustomAttributeDefinition(
title="non existing title",
definition_type="Program",
)
self.assertEqual(cad.title, "non existing title")
def test_setting_global_cad_names(self):
"""Test duplicates with global attribute names."""
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=1,
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
with self.assertRaises(IntegrityError):
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
db.session.rollback()
with self.assertRaises(ValueError):
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
def test_different_models(self):
"""Test unique names over on different models."""
db.session.add(models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Section",
attribute_type="Text",
))
db.session.commit()
cad = models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Program",
attribute_type="Text",
)
self.assertEqual(cad.title, "my custom attribute title")
|
Add unique cad name tests
|
Add unique cad name tests
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core
|
Add unique cad name tests
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for custom attribute definitions model."""
from sqlalchemy.exc import IntegrityError
from ggrc import db
from ggrc import models
from integration.ggrc import TestCase
class TestCAD(TestCase):
"""Tests for basic functionality of cad model."""
def test_setting_reserved_words(self):
"""Test setting any of the existing attribute names."""
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.definition_type = "Section"
cad.title = "title"
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.title = "title"
cad.definition_type = "Section"
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="title",
definition_type="Assessment",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="TITLE",
definition_type="Program",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="Secondary CONTACT",
definition_type="Program",
)
cad = models.CustomAttributeDefinition(
title="non existing title",
definition_type="Program",
)
self.assertEqual(cad.title, "non existing title")
def test_setting_global_cad_names(self):
"""Test duplicates with global attribute names."""
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=1,
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
with self.assertRaises(IntegrityError):
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
db.session.rollback()
with self.assertRaises(ValueError):
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
def test_different_models(self):
"""Test unique names over on different models."""
db.session.add(models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Section",
attribute_type="Text",
))
db.session.commit()
cad = models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Program",
attribute_type="Text",
)
self.assertEqual(cad.title, "my custom attribute title")
|
<commit_before><commit_msg>Add unique cad name tests<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for custom attribute definitions model."""
from sqlalchemy.exc import IntegrityError
from ggrc import db
from ggrc import models
from integration.ggrc import TestCase
class TestCAD(TestCase):
"""Tests for basic functionality of cad model."""
def test_setting_reserved_words(self):
"""Test setting any of the existing attribute names."""
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.definition_type = "Section"
cad.title = "title"
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.title = "title"
cad.definition_type = "Section"
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="title",
definition_type="Assessment",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="TITLE",
definition_type="Program",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="Secondary CONTACT",
definition_type="Program",
)
cad = models.CustomAttributeDefinition(
title="non existing title",
definition_type="Program",
)
self.assertEqual(cad.title, "non existing title")
def test_setting_global_cad_names(self):
"""Test duplicates with global attribute names."""
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=1,
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
with self.assertRaises(IntegrityError):
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
db.session.rollback()
with self.assertRaises(ValueError):
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
def test_different_models(self):
"""Test unique names over on different models."""
db.session.add(models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Section",
attribute_type="Text",
))
db.session.commit()
cad = models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Program",
attribute_type="Text",
)
self.assertEqual(cad.title, "my custom attribute title")
|
Add unique cad name tests# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for custom attribute definitions model."""
from sqlalchemy.exc import IntegrityError
from ggrc import db
from ggrc import models
from integration.ggrc import TestCase
class TestCAD(TestCase):
"""Tests for basic functionality of cad model."""
def test_setting_reserved_words(self):
"""Test setting any of the existing attribute names."""
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.definition_type = "Section"
cad.title = "title"
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.title = "title"
cad.definition_type = "Section"
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="title",
definition_type="Assessment",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="TITLE",
definition_type="Program",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="Secondary CONTACT",
definition_type="Program",
)
cad = models.CustomAttributeDefinition(
title="non existing title",
definition_type="Program",
)
self.assertEqual(cad.title, "non existing title")
def test_setting_global_cad_names(self):
"""Test duplicates with global attribute names."""
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=1,
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
with self.assertRaises(IntegrityError):
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
db.session.rollback()
with self.assertRaises(ValueError):
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
def test_different_models(self):
"""Test unique names over on different models."""
db.session.add(models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Section",
attribute_type="Text",
))
db.session.commit()
cad = models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Program",
attribute_type="Text",
)
self.assertEqual(cad.title, "my custom attribute title")
|
<commit_before><commit_msg>Add unique cad name tests<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for custom attribute definitions model."""
from sqlalchemy.exc import IntegrityError
from ggrc import db
from ggrc import models
from integration.ggrc import TestCase
class TestCAD(TestCase):
"""Tests for basic functionality of cad model."""
def test_setting_reserved_words(self):
"""Test setting any of the existing attribute names."""
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.definition_type = "Section"
cad.title = "title"
with self.assertRaises(ValueError):
cad = models.CustomAttributeDefinition()
cad.title = "title"
cad.definition_type = "Section"
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="title",
definition_type="Assessment",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="TITLE",
definition_type="Program",
)
with self.assertRaises(ValueError):
models.CustomAttributeDefinition(
title="Secondary CONTACT",
definition_type="Program",
)
cad = models.CustomAttributeDefinition(
title="non existing title",
definition_type="Program",
)
self.assertEqual(cad.title, "non existing title")
def test_setting_global_cad_names(self):
"""Test duplicates with global attribute names."""
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=1,
attribute_type="Text",
))
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
with self.assertRaises(IntegrityError):
db.session.add(models.CustomAttributeDefinition(
title="non existing title",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
db.session.rollback()
with self.assertRaises(ValueError):
db.session.add(models.CustomAttributeDefinition(
title="global cad",
definition_type="Section",
definition_id=2,
attribute_type="Text",
))
db.session.commit()
def test_different_models(self):
"""Test unique names over on different models."""
db.session.add(models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Section",
attribute_type="Text",
))
db.session.commit()
cad = models.CustomAttributeDefinition(
title="my custom attribute title",
definition_type="Program",
attribute_type="Text",
)
self.assertEqual(cad.title, "my custom attribute title")
|
|
09c42c54816659a19b33c4e0a746f586749b2f93
|
sweettooth/extensions/management/commands/searchextensions.py
|
sweettooth/extensions/management/commands/searchextensions.py
|
from django.core.management.base import BaseCommand, make_option
from extensions.models import Extension
from extensions.search import enquire
def append_version(option, opt_str, value, parser):
values = parser.values
if getattr(values, "versions", None) is None:
values.versions = []
values.versions.append(value)
class Command(BaseCommand):
args = '<query>'
help = 'Test the search engine'
option_list = BaseCommand.option_list + (
make_option('-V', action="callback", callback=append_version, type="string"),
)
def handle(self, *args, **kwargs):
query = ' '.join(args)
versions = kwargs.get('versions')
db, enquiry = enquire(query, versions)
mset = enquiry.get_mset(0, db.get_doccount())
pks = [match.document.get_data() for match in mset]
# filter doesn't guarantee an order, so we need to get all the
# possible models then look them up to get the ordering
# returned by xapian. This hits the database all at once, rather
# than pagesize times.
extension_lookup = {}
for extension in Extension.objects.filter(pk__in=pks):
extension_lookup[str(extension.pk)] = extension
extensions = [extension_lookup[pk] for pk in pks]
for ext in extensions:
print ext.name
|
Add a simple command to test search
|
search: Add a simple command to test search
|
Python
|
agpl-3.0
|
GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,magcius/sweettooth,magcius/sweettooth,GNOME/extensions-web
|
search: Add a simple command to test search
|
from django.core.management.base import BaseCommand, make_option
from extensions.models import Extension
from extensions.search import enquire
def append_version(option, opt_str, value, parser):
values = parser.values
if getattr(values, "versions", None) is None:
values.versions = []
values.versions.append(value)
class Command(BaseCommand):
args = '<query>'
help = 'Test the search engine'
option_list = BaseCommand.option_list + (
make_option('-V', action="callback", callback=append_version, type="string"),
)
def handle(self, *args, **kwargs):
query = ' '.join(args)
versions = kwargs.get('versions')
db, enquiry = enquire(query, versions)
mset = enquiry.get_mset(0, db.get_doccount())
pks = [match.document.get_data() for match in mset]
# filter doesn't guarantee an order, so we need to get all the
# possible models then look them up to get the ordering
# returned by xapian. This hits the database all at once, rather
# than pagesize times.
extension_lookup = {}
for extension in Extension.objects.filter(pk__in=pks):
extension_lookup[str(extension.pk)] = extension
extensions = [extension_lookup[pk] for pk in pks]
for ext in extensions:
print ext.name
|
<commit_before><commit_msg>search: Add a simple command to test search<commit_after>
|
from django.core.management.base import BaseCommand, make_option
from extensions.models import Extension
from extensions.search import enquire
def append_version(option, opt_str, value, parser):
values = parser.values
if getattr(values, "versions", None) is None:
values.versions = []
values.versions.append(value)
class Command(BaseCommand):
args = '<query>'
help = 'Test the search engine'
option_list = BaseCommand.option_list + (
make_option('-V', action="callback", callback=append_version, type="string"),
)
def handle(self, *args, **kwargs):
query = ' '.join(args)
versions = kwargs.get('versions')
db, enquiry = enquire(query, versions)
mset = enquiry.get_mset(0, db.get_doccount())
pks = [match.document.get_data() for match in mset]
# filter doesn't guarantee an order, so we need to get all the
# possible models then look them up to get the ordering
# returned by xapian. This hits the database all at once, rather
# than pagesize times.
extension_lookup = {}
for extension in Extension.objects.filter(pk__in=pks):
extension_lookup[str(extension.pk)] = extension
extensions = [extension_lookup[pk] for pk in pks]
for ext in extensions:
print ext.name
|
search: Add a simple command to test search
from django.core.management.base import BaseCommand, make_option
from extensions.models import Extension
from extensions.search import enquire
def append_version(option, opt_str, value, parser):
values = parser.values
if getattr(values, "versions", None) is None:
values.versions = []
values.versions.append(value)
class Command(BaseCommand):
args = '<query>'
help = 'Test the search engine'
option_list = BaseCommand.option_list + (
make_option('-V', action="callback", callback=append_version, type="string"),
)
def handle(self, *args, **kwargs):
query = ' '.join(args)
versions = kwargs.get('versions')
db, enquiry = enquire(query, versions)
mset = enquiry.get_mset(0, db.get_doccount())
pks = [match.document.get_data() for match in mset]
# filter doesn't guarantee an order, so we need to get all the
# possible models then look them up to get the ordering
# returned by xapian. This hits the database all at once, rather
# than pagesize times.
extension_lookup = {}
for extension in Extension.objects.filter(pk__in=pks):
extension_lookup[str(extension.pk)] = extension
extensions = [extension_lookup[pk] for pk in pks]
for ext in extensions:
print ext.name
|
<commit_before><commit_msg>search: Add a simple command to test search<commit_after>
from django.core.management.base import BaseCommand, make_option
from extensions.models import Extension
from extensions.search import enquire
def append_version(option, opt_str, value, parser):
values = parser.values
if getattr(values, "versions", None) is None:
values.versions = []
values.versions.append(value)
class Command(BaseCommand):
args = '<query>'
help = 'Test the search engine'
option_list = BaseCommand.option_list + (
make_option('-V', action="callback", callback=append_version, type="string"),
)
def handle(self, *args, **kwargs):
query = ' '.join(args)
versions = kwargs.get('versions')
db, enquiry = enquire(query, versions)
mset = enquiry.get_mset(0, db.get_doccount())
pks = [match.document.get_data() for match in mset]
# filter doesn't guarantee an order, so we need to get all the
# possible models then look them up to get the ordering
# returned by xapian. This hits the database all at once, rather
# than pagesize times.
extension_lookup = {}
for extension in Extension.objects.filter(pk__in=pks):
extension_lookup[str(extension.pk)] = extension
extensions = [extension_lookup[pk] for pk in pks]
for ext in extensions:
print ext.name
|
|
7531ed0c9ae25f04884250c84b39a630ae7ef34b
|
raiden/storage/migrations/v20_to_v21.py
|
raiden/storage/migrations/v20_to_v21.py
|
import json
from raiden.storage.sqlite import SQLiteStorage
SOURCE_VERSION = 20
TARGET_VERSION = 21
def _transform_snapshot(raw_snapshot: str) -> str:
snapshot = json.loads(raw_snapshot)
for task in snapshot['payment_mapping']['secrethashes_to_task'].values():
if 'raiden.transfer.state.InitiatorTask' in task['_type']:
for initiator in task['manager_task']['initiator_transfers'].values():
initiator['transfer_description']['allocated_fee'] = 0
ids_to_addrs = dict()
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
ids_to_addrs[payment_network['address']] = token_network['token_address']
snapshot['tokennetworkaddresses_to_paymentnetworkaddresses'] = ids_to_addrs
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
for channel_state in token_network['channelidentifiers_to_channels'].values():
channel_state['mediation_fee'] = 0
return json.dumps(snapshot)
def _update_snapshots(storage: SQLiteStorage):
updated_snapshots_data = []
for snapshot in storage.get_snapshots():
new_snapshot = _transform_snapshot(snapshot.data)
updated_snapshots_data.append((new_snapshot, snapshot.identifier))
storage.update_snapshots(updated_snapshots_data)
def _update_statechanges(storage: SQLiteStorage):
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['channel_state']['mediation_fee'] = 0
storage.update_state_changes(state_changes_batch)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['transfer']['allocated_fee'] = 0
storage.update_state_changes(state_changes_batch)
def upgrade_v19_to_v20(
storage: SQLiteStorage,
old_version: int,
**kwargs, # pylint: disable=unused-argument
) -> int:
if old_version == SOURCE_VERSION:
_update_snapshots(storage)
_update_statechanges(storage)
return TARGET_VERSION
|
Move migration 21 to it's proper file
|
Move migration 21 to it's proper file
|
Python
|
mit
|
hackaugusto/raiden,hackaugusto/raiden
|
Move migration 21 to it's proper file
|
import json
from raiden.storage.sqlite import SQLiteStorage
SOURCE_VERSION = 20
TARGET_VERSION = 21
def _transform_snapshot(raw_snapshot: str) -> str:
snapshot = json.loads(raw_snapshot)
for task in snapshot['payment_mapping']['secrethashes_to_task'].values():
if 'raiden.transfer.state.InitiatorTask' in task['_type']:
for initiator in task['manager_task']['initiator_transfers'].values():
initiator['transfer_description']['allocated_fee'] = 0
ids_to_addrs = dict()
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
ids_to_addrs[payment_network['address']] = token_network['token_address']
snapshot['tokennetworkaddresses_to_paymentnetworkaddresses'] = ids_to_addrs
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
for channel_state in token_network['channelidentifiers_to_channels'].values():
channel_state['mediation_fee'] = 0
return json.dumps(snapshot)
def _update_snapshots(storage: SQLiteStorage):
updated_snapshots_data = []
for snapshot in storage.get_snapshots():
new_snapshot = _transform_snapshot(snapshot.data)
updated_snapshots_data.append((new_snapshot, snapshot.identifier))
storage.update_snapshots(updated_snapshots_data)
def _update_statechanges(storage: SQLiteStorage):
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['channel_state']['mediation_fee'] = 0
storage.update_state_changes(state_changes_batch)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['transfer']['allocated_fee'] = 0
storage.update_state_changes(state_changes_batch)
def upgrade_v19_to_v20(
storage: SQLiteStorage,
old_version: int,
**kwargs, # pylint: disable=unused-argument
) -> int:
if old_version == SOURCE_VERSION:
_update_snapshots(storage)
_update_statechanges(storage)
return TARGET_VERSION
|
<commit_before><commit_msg>Move migration 21 to it's proper file<commit_after>
|
import json
from raiden.storage.sqlite import SQLiteStorage
SOURCE_VERSION = 20
TARGET_VERSION = 21
def _transform_snapshot(raw_snapshot: str) -> str:
snapshot = json.loads(raw_snapshot)
for task in snapshot['payment_mapping']['secrethashes_to_task'].values():
if 'raiden.transfer.state.InitiatorTask' in task['_type']:
for initiator in task['manager_task']['initiator_transfers'].values():
initiator['transfer_description']['allocated_fee'] = 0
ids_to_addrs = dict()
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
ids_to_addrs[payment_network['address']] = token_network['token_address']
snapshot['tokennetworkaddresses_to_paymentnetworkaddresses'] = ids_to_addrs
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
for channel_state in token_network['channelidentifiers_to_channels'].values():
channel_state['mediation_fee'] = 0
return json.dumps(snapshot)
def _update_snapshots(storage: SQLiteStorage):
updated_snapshots_data = []
for snapshot in storage.get_snapshots():
new_snapshot = _transform_snapshot(snapshot.data)
updated_snapshots_data.append((new_snapshot, snapshot.identifier))
storage.update_snapshots(updated_snapshots_data)
def _update_statechanges(storage: SQLiteStorage):
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['channel_state']['mediation_fee'] = 0
storage.update_state_changes(state_changes_batch)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['transfer']['allocated_fee'] = 0
storage.update_state_changes(state_changes_batch)
def upgrade_v19_to_v20(
storage: SQLiteStorage,
old_version: int,
**kwargs, # pylint: disable=unused-argument
) -> int:
if old_version == SOURCE_VERSION:
_update_snapshots(storage)
_update_statechanges(storage)
return TARGET_VERSION
|
Move migration 21 to it's proper fileimport json
from raiden.storage.sqlite import SQLiteStorage
SOURCE_VERSION = 20
TARGET_VERSION = 21
def _transform_snapshot(raw_snapshot: str) -> str:
snapshot = json.loads(raw_snapshot)
for task in snapshot['payment_mapping']['secrethashes_to_task'].values():
if 'raiden.transfer.state.InitiatorTask' in task['_type']:
for initiator in task['manager_task']['initiator_transfers'].values():
initiator['transfer_description']['allocated_fee'] = 0
ids_to_addrs = dict()
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
ids_to_addrs[payment_network['address']] = token_network['token_address']
snapshot['tokennetworkaddresses_to_paymentnetworkaddresses'] = ids_to_addrs
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
for channel_state in token_network['channelidentifiers_to_channels'].values():
channel_state['mediation_fee'] = 0
return json.dumps(snapshot)
def _update_snapshots(storage: SQLiteStorage):
updated_snapshots_data = []
for snapshot in storage.get_snapshots():
new_snapshot = _transform_snapshot(snapshot.data)
updated_snapshots_data.append((new_snapshot, snapshot.identifier))
storage.update_snapshots(updated_snapshots_data)
def _update_statechanges(storage: SQLiteStorage):
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['channel_state']['mediation_fee'] = 0
storage.update_state_changes(state_changes_batch)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['transfer']['allocated_fee'] = 0
storage.update_state_changes(state_changes_batch)
def upgrade_v19_to_v20(
storage: SQLiteStorage,
old_version: int,
**kwargs, # pylint: disable=unused-argument
) -> int:
if old_version == SOURCE_VERSION:
_update_snapshots(storage)
_update_statechanges(storage)
return TARGET_VERSION
|
<commit_before><commit_msg>Move migration 21 to it's proper file<commit_after>import json
from raiden.storage.sqlite import SQLiteStorage
SOURCE_VERSION = 20
TARGET_VERSION = 21
def _transform_snapshot(raw_snapshot: str) -> str:
snapshot = json.loads(raw_snapshot)
for task in snapshot['payment_mapping']['secrethashes_to_task'].values():
if 'raiden.transfer.state.InitiatorTask' in task['_type']:
for initiator in task['manager_task']['initiator_transfers'].values():
initiator['transfer_description']['allocated_fee'] = 0
ids_to_addrs = dict()
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
ids_to_addrs[payment_network['address']] = token_network['token_address']
snapshot['tokennetworkaddresses_to_paymentnetworkaddresses'] = ids_to_addrs
for payment_network in snapshot['identifiers_to_paymentnetworks'].values():
for token_network in payment_network['tokenidentifiers_to_tokennetworks'].values():
for channel_state in token_network['channelidentifiers_to_channels'].values():
channel_state['mediation_fee'] = 0
return json.dumps(snapshot)
def _update_snapshots(storage: SQLiteStorage):
updated_snapshots_data = []
for snapshot in storage.get_snapshots():
new_snapshot = _transform_snapshot(snapshot.data)
updated_snapshots_data.append((new_snapshot, snapshot.identifier))
storage.update_snapshots(updated_snapshots_data)
def _update_statechanges(storage: SQLiteStorage):
batch_size = 50
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.state_change.ContractReceiveChannelNew'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['channel_state']['mediation_fee'] = 0
storage.update_state_changes(state_changes_batch)
batch_query = storage.batch_query_state_changes(
batch_size=batch_size,
filters=[
('_type', 'raiden.transfer.mediated_transfer.state_change.ActionInitInitiator'),
],
)
for state_changes_batch in batch_query:
for state_change in state_changes_batch:
state_change['transfer']['allocated_fee'] = 0
storage.update_state_changes(state_changes_batch)
def upgrade_v19_to_v20(
storage: SQLiteStorage,
old_version: int,
**kwargs, # pylint: disable=unused-argument
) -> int:
if old_version == SOURCE_VERSION:
_update_snapshots(storage)
_update_statechanges(storage)
return TARGET_VERSION
|
|
d3fdefb173b0fc3ab6a6479883a77049a4b8af16
|
trypython/stdlib/sys_/sys03.py
|
trypython/stdlib/sys_/sys03.py
|
"""
sys モジュールについてのサンプルです.
venv 環境での
- sys.prefix
- sys.exec_prefix
- sys.base_prefix
- sys.base_exec_prefix
の値について.
REFERENCES:: http://bit.ly/2Vun6U9
http://bit.ly/2Vuvqn6
"""
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------
# venv で仮想環境を作り、 activate している状態だと
# sys モジュールの prefixとbase_prefixの値が異なる
# 状態となる。仮想環境を使っていない場合、同じ値となる.
# --------------------------------------------
pr('prefix', sys.prefix)
pr('exec_prefix', sys.exec_prefix)
hr()
pr('base_prefix', sys.base_prefix)
pr('base_exec_prefix', sys.base_exec_prefix)
pr('venv 利用している?', sys.prefix != sys.base_prefix)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加
|
Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加
|
Python
|
mit
|
devlights/try-python
|
Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加
|
"""
sys モジュールについてのサンプルです.
venv 環境での
- sys.prefix
- sys.exec_prefix
- sys.base_prefix
- sys.base_exec_prefix
の値について.
REFERENCES:: http://bit.ly/2Vun6U9
http://bit.ly/2Vuvqn6
"""
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------
# venv で仮想環境を作り、 activate している状態だと
# sys モジュールの prefixとbase_prefixの値が異なる
# 状態となる。仮想環境を使っていない場合、同じ値となる.
# --------------------------------------------
pr('prefix', sys.prefix)
pr('exec_prefix', sys.exec_prefix)
hr()
pr('base_prefix', sys.base_prefix)
pr('base_exec_prefix', sys.base_exec_prefix)
pr('venv 利用している?', sys.prefix != sys.base_prefix)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
<commit_before><commit_msg>Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加<commit_after>
|
"""
sys モジュールについてのサンプルです.
venv 環境での
- sys.prefix
- sys.exec_prefix
- sys.base_prefix
- sys.base_exec_prefix
の値について.
REFERENCES:: http://bit.ly/2Vun6U9
http://bit.ly/2Vuvqn6
"""
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------
# venv で仮想環境を作り、 activate している状態だと
# sys モジュールの prefixとbase_prefixの値が異なる
# 状態となる。仮想環境を使っていない場合、同じ値となる.
# --------------------------------------------
pr('prefix', sys.prefix)
pr('exec_prefix', sys.exec_prefix)
hr()
pr('base_prefix', sys.base_prefix)
pr('base_exec_prefix', sys.base_exec_prefix)
pr('venv 利用している?', sys.prefix != sys.base_prefix)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加"""
sys モジュールについてのサンプルです.
venv 環境での
- sys.prefix
- sys.exec_prefix
- sys.base_prefix
- sys.base_exec_prefix
の値について.
REFERENCES:: http://bit.ly/2Vun6U9
http://bit.ly/2Vuvqn6
"""
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------
# venv で仮想環境を作り、 activate している状態だと
# sys モジュールの prefixとbase_prefixの値が異なる
# 状態となる。仮想環境を使っていない場合、同じ値となる.
# --------------------------------------------
pr('prefix', sys.prefix)
pr('exec_prefix', sys.exec_prefix)
hr()
pr('base_prefix', sys.base_prefix)
pr('base_exec_prefix', sys.base_exec_prefix)
pr('venv 利用している?', sys.prefix != sys.base_prefix)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
<commit_before><commit_msg>Add venv利用時のsys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix の値の違いについてのサンプル追加<commit_after>"""
sys モジュールについてのサンプルです.
venv 環境での
- sys.prefix
- sys.exec_prefix
- sys.base_prefix
- sys.base_exec_prefix
の値について.
REFERENCES:: http://bit.ly/2Vun6U9
http://bit.ly/2Vuvqn6
"""
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
class Sample(SampleBase):
def exec(self):
# --------------------------------------------
# venv で仮想環境を作り、 activate している状態だと
# sys モジュールの prefixとbase_prefixの値が異なる
# 状態となる。仮想環境を使っていない場合、同じ値となる.
# --------------------------------------------
pr('prefix', sys.prefix)
pr('exec_prefix', sys.exec_prefix)
hr()
pr('base_prefix', sys.base_prefix)
pr('base_exec_prefix', sys.base_exec_prefix)
pr('venv 利用している?', sys.prefix != sys.base_prefix)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
|
2fc3767b42b556afcd5d0f34b6cb86d71a79aa51
|
sandbox/to-casava-1.8-fastq.py
|
sandbox/to-casava-1.8-fastq.py
|
#!/usr/bin/env python
import functools
import re
import argparse
from khmer import ReadParser
resub_read_1 = functools.partial( re.sub, r"^(.*)/1$", r"\1 1:N:0:NNNNN" )
resub_read_2 = functools.partial( re.sub, r"^(.*)/2$", r"\1 2:N:0:NNNNN" )
def setup_cl_parser( ):
parser = \
argparse.ArgumentParser(
description = \
"Convert the older FASTQ format to the Casava >= 1.8 FASTQ format."
)
parser.add_argument( "input_filename" )
parser.add_argument( "output_filename" )
return parser
def main( ):
cl_parser = setup_cl_parser( )
cl_args = cl_parser.parse_args( )
# Note: Only use 1 thread to ensure same ordering of reads.
rparser = ReadParser( cl_args.input_filename, 1 )
with open( cl_args.output_filename, "w" ) as output_file:
for read in rparser:
new_name = resub_read_1( read.name )
new_name = resub_read_2( new_name )
output_file.write(
"@{name}\n{sequence}\n+\n{accuracy}\n".format(
name = new_name,
sequence = read.sequence,
accuracy = read.accuracy,
)
)
if "__main__" == __name__:
main( )
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).
|
Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).
|
Python
|
bsd-3-clause
|
kdmurray91/khmer,jas14/khmer,F1000Research/khmer,souravsingh/khmer,F1000Research/khmer,souravsingh/khmer,souravsingh/khmer,jas14/khmer,Winterflower/khmer,ged-lab/khmer,ged-lab/khmer,jas14/khmer,Winterflower/khmer,F1000Research/khmer,Winterflower/khmer,kdmurray91/khmer,ged-lab/khmer,kdmurray91/khmer
|
Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).
|
#!/usr/bin/env python
import functools
import re
import argparse
from khmer import ReadParser
resub_read_1 = functools.partial( re.sub, r"^(.*)/1$", r"\1 1:N:0:NNNNN" )
resub_read_2 = functools.partial( re.sub, r"^(.*)/2$", r"\1 2:N:0:NNNNN" )
def setup_cl_parser( ):
parser = \
argparse.ArgumentParser(
description = \
"Convert the older FASTQ format to the Casava >= 1.8 FASTQ format."
)
parser.add_argument( "input_filename" )
parser.add_argument( "output_filename" )
return parser
def main( ):
cl_parser = setup_cl_parser( )
cl_args = cl_parser.parse_args( )
# Note: Only use 1 thread to ensure same ordering of reads.
rparser = ReadParser( cl_args.input_filename, 1 )
with open( cl_args.output_filename, "w" ) as output_file:
for read in rparser:
new_name = resub_read_1( read.name )
new_name = resub_read_2( new_name )
output_file.write(
"@{name}\n{sequence}\n+\n{accuracy}\n".format(
name = new_name,
sequence = read.sequence,
accuracy = read.accuracy,
)
)
if "__main__" == __name__:
main( )
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
<commit_before><commit_msg>Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).<commit_after>
|
#!/usr/bin/env python
import functools
import re
import argparse
from khmer import ReadParser
resub_read_1 = functools.partial( re.sub, r"^(.*)/1$", r"\1 1:N:0:NNNNN" )
resub_read_2 = functools.partial( re.sub, r"^(.*)/2$", r"\1 2:N:0:NNNNN" )
def setup_cl_parser( ):
parser = \
argparse.ArgumentParser(
description = \
"Convert the older FASTQ format to the Casava >= 1.8 FASTQ format."
)
parser.add_argument( "input_filename" )
parser.add_argument( "output_filename" )
return parser
def main( ):
cl_parser = setup_cl_parser( )
cl_args = cl_parser.parse_args( )
# Note: Only use 1 thread to ensure same ordering of reads.
rparser = ReadParser( cl_args.input_filename, 1 )
with open( cl_args.output_filename, "w" ) as output_file:
for read in rparser:
new_name = resub_read_1( read.name )
new_name = resub_read_2( new_name )
output_file.write(
"@{name}\n{sequence}\n+\n{accuracy}\n".format(
name = new_name,
sequence = read.sequence,
accuracy = read.accuracy,
)
)
if "__main__" == __name__:
main( )
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).#!/usr/bin/env python
import functools
import re
import argparse
from khmer import ReadParser
resub_read_1 = functools.partial( re.sub, r"^(.*)/1$", r"\1 1:N:0:NNNNN" )
resub_read_2 = functools.partial( re.sub, r"^(.*)/2$", r"\1 2:N:0:NNNNN" )
def setup_cl_parser( ):
parser = \
argparse.ArgumentParser(
description = \
"Convert the older FASTQ format to the Casava >= 1.8 FASTQ format."
)
parser.add_argument( "input_filename" )
parser.add_argument( "output_filename" )
return parser
def main( ):
cl_parser = setup_cl_parser( )
cl_args = cl_parser.parse_args( )
# Note: Only use 1 thread to ensure same ordering of reads.
rparser = ReadParser( cl_args.input_filename, 1 )
with open( cl_args.output_filename, "w" ) as output_file:
for read in rparser:
new_name = resub_read_1( read.name )
new_name = resub_read_2( new_name )
output_file.write(
"@{name}\n{sequence}\n+\n{accuracy}\n".format(
name = new_name,
sequence = read.sequence,
accuracy = read.accuracy,
)
)
if "__main__" == __name__:
main( )
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
<commit_before><commit_msg>Create script to convert from old FAST{A,Q} name format to new one (as of Casava 1.8).<commit_after>#!/usr/bin/env python
import functools
import re
import argparse
from khmer import ReadParser
resub_read_1 = functools.partial( re.sub, r"^(.*)/1$", r"\1 1:N:0:NNNNN" )
resub_read_2 = functools.partial( re.sub, r"^(.*)/2$", r"\1 2:N:0:NNNNN" )
def setup_cl_parser( ):
parser = \
argparse.ArgumentParser(
description = \
"Convert the older FASTQ format to the Casava >= 1.8 FASTQ format."
)
parser.add_argument( "input_filename" )
parser.add_argument( "output_filename" )
return parser
def main( ):
cl_parser = setup_cl_parser( )
cl_args = cl_parser.parse_args( )
# Note: Only use 1 thread to ensure same ordering of reads.
rparser = ReadParser( cl_args.input_filename, 1 )
with open( cl_args.output_filename, "w" ) as output_file:
for read in rparser:
new_name = resub_read_1( read.name )
new_name = resub_read_2( new_name )
output_file.write(
"@{name}\n{sequence}\n+\n{accuracy}\n".format(
name = new_name,
sequence = read.sequence,
accuracy = read.accuracy,
)
)
if "__main__" == __name__:
main( )
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
|
15db0f300b23693160872754a57cb3afc1944a07
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description='Zoomba',
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell, Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("docs/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description=long_description,
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell,'
' Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
Use readme as the long description in pypi
|
Use readme as the long description in pypi
|
Python
|
apache-2.0
|
Accruent/zoomba,Accruent/zoomba
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description='Zoomba',
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell, Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
Use readme as the long description in pypi
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("docs/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description=long_description,
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell,'
' Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
<commit_before>#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description='Zoomba',
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell, Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
<commit_msg>Use readme as the long description in pypi<commit_after>
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("docs/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description=long_description,
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell,'
' Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description='Zoomba',
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell, Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
Use readme as the long description in pypi#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("docs/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description=long_description,
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell,'
' Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
<commit_before>#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description='Zoomba',
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell, Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
<commit_msg>Use readme as the long description in pypi<commit_after>#!/usr/bin/env python3
import version
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("docs/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(name='robotframework-zoomba',
version=version.VERSION,
description='Robot Framework mini-framework.',
long_description=long_description,
url='https://github.com/Accruent/zoomba',
maintainer='Alex Calandra, Michael Hintz, Keith Smoland, Matthew Giardina, Brandon Wolfe, Neil Howell,'
' Tommy Hoang',
maintainer_email='robosquad@accruent.com',
license='GPL-3.0',
keywords='Robot Framework robot-framework selenium requests appium soap winappdriver appium robotframework'
'desktop windows zoomba python robotframework-library appium-windows appiumlibrary api-rest api '
'soap-api',
platforms='any',
install_requires=requirements,
extras_require={
'testing': [
'Appium-Python-Client'
]
},
classifiers="""
Development Status :: 5 - Production/Stable
Operating System :: OS Independent
Programming Language :: Python :: 3
Topic :: Software Development :: Testing
Framework :: Robot Framework :: Library
""".strip().splitlines(),
package_dir={'': 'src'},
packages=['Zoomba']
)
|
30ceb2772b3f6084a613a625160b8fc0da4890b0
|
tests/test_writelatex.py
|
tests/test_writelatex.py
|
"""
unit tests of the latex writer
"""
import unittest
import subprocess
import tempfile
import os
import sys
import BeautifulSoup
from pyth.plugins.latex.writer import LatexWriter
from pyth.plugins.python.reader import *
class TestWriteLatex(unittest.TestCase):
def test_basic(self):
"""
Try to create an empty latex document
"""
doc = PythonReader.read([])
latex = LatexWriter.write(doc).getvalue()
def test_paragraph(self):
"""
Try a single paragraph document
"""
doc = PythonReader.read(P[u"the text"])
latex = LatexWriter.write(doc).getvalue()
assert "the text" in latex
def test_bold(self):
doc = PythonReader.read([P[T(BOLD)[u"bold text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\textbf{bold text}" in latex, latex
def test_italic(self):
doc = PythonReader.read([P[T(ITALIC)[u"italic text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\emph{italic text}" in latex, latex
|
Add unit tests for latex output
|
Add unit tests for latex output
|
Python
|
mit
|
kippr/pyth,pombredanne/pyth,kippr/pyth,prechelt/pyth,brendonh/pyth,prechelt/pyth,eriol/pyth,sheepeatingtaz/pyth
|
Add unit tests for latex output
|
"""
unit tests of the latex writer
"""
import unittest
import subprocess
import tempfile
import os
import sys
import BeautifulSoup
from pyth.plugins.latex.writer import LatexWriter
from pyth.plugins.python.reader import *
class TestWriteLatex(unittest.TestCase):
def test_basic(self):
"""
Try to create an empty latex document
"""
doc = PythonReader.read([])
latex = LatexWriter.write(doc).getvalue()
def test_paragraph(self):
"""
Try a single paragraph document
"""
doc = PythonReader.read(P[u"the text"])
latex = LatexWriter.write(doc).getvalue()
assert "the text" in latex
def test_bold(self):
doc = PythonReader.read([P[T(BOLD)[u"bold text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\textbf{bold text}" in latex, latex
def test_italic(self):
doc = PythonReader.read([P[T(ITALIC)[u"italic text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\emph{italic text}" in latex, latex
|
<commit_before><commit_msg>Add unit tests for latex output<commit_after>
|
"""
unit tests of the latex writer
"""
import unittest
import subprocess
import tempfile
import os
import sys
import BeautifulSoup
from pyth.plugins.latex.writer import LatexWriter
from pyth.plugins.python.reader import *
class TestWriteLatex(unittest.TestCase):
def test_basic(self):
"""
Try to create an empty latex document
"""
doc = PythonReader.read([])
latex = LatexWriter.write(doc).getvalue()
def test_paragraph(self):
"""
Try a single paragraph document
"""
doc = PythonReader.read(P[u"the text"])
latex = LatexWriter.write(doc).getvalue()
assert "the text" in latex
def test_bold(self):
doc = PythonReader.read([P[T(BOLD)[u"bold text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\textbf{bold text}" in latex, latex
def test_italic(self):
doc = PythonReader.read([P[T(ITALIC)[u"italic text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\emph{italic text}" in latex, latex
|
Add unit tests for latex output"""
unit tests of the latex writer
"""
import unittest
import subprocess
import tempfile
import os
import sys
import BeautifulSoup
from pyth.plugins.latex.writer import LatexWriter
from pyth.plugins.python.reader import *
class TestWriteLatex(unittest.TestCase):
def test_basic(self):
"""
Try to create an empty latex document
"""
doc = PythonReader.read([])
latex = LatexWriter.write(doc).getvalue()
def test_paragraph(self):
"""
Try a single paragraph document
"""
doc = PythonReader.read(P[u"the text"])
latex = LatexWriter.write(doc).getvalue()
assert "the text" in latex
def test_bold(self):
doc = PythonReader.read([P[T(BOLD)[u"bold text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\textbf{bold text}" in latex, latex
def test_italic(self):
doc = PythonReader.read([P[T(ITALIC)[u"italic text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\emph{italic text}" in latex, latex
|
<commit_before><commit_msg>Add unit tests for latex output<commit_after>"""
unit tests of the latex writer
"""
import unittest
import subprocess
import tempfile
import os
import sys
import BeautifulSoup
from pyth.plugins.latex.writer import LatexWriter
from pyth.plugins.python.reader import *
class TestWriteLatex(unittest.TestCase):
def test_basic(self):
"""
Try to create an empty latex document
"""
doc = PythonReader.read([])
latex = LatexWriter.write(doc).getvalue()
def test_paragraph(self):
"""
Try a single paragraph document
"""
doc = PythonReader.read(P[u"the text"])
latex = LatexWriter.write(doc).getvalue()
assert "the text" in latex
def test_bold(self):
doc = PythonReader.read([P[T(BOLD)[u"bold text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\textbf{bold text}" in latex, latex
def test_italic(self):
doc = PythonReader.read([P[T(ITALIC)[u"italic text"]]])
latex = LatexWriter.write(doc).getvalue()
assert r"\emph{italic text}" in latex, latex
|
|
0cf6a1ecf280132f8051c48468eddf2886dfc581
|
tests/test_table.py
|
tests/test_table.py
|
from cycli.table import isnumeric, col_widths
import pytest
@pytest.mark.parametrize(("number", "answer"), [
("12", True),
("12.3", True),
("0", True),
("1.1.0", False),
("Hell0", False),
("Hello", False)
])
def test_isnumeric(number, answer):
assert isnumeric(number) == answer
@pytest.mark.parametrize(("headers", "rows", "answer"), [
(["a", "bc"], [["a", "b"], ["a", "b"]], [1, 2]),
(["ab", "c"], [["a", "b"], ["a", "b"]], [2, 1]),
(["a", "b"], [["a", "bc"], ["a", "b"]], [1, 2]),
(["a", "b"], [["ab", "c"], ["a", "b"]], [2, 1]),
(["a", "b"], [["", ""], ["a", "a"]], [1, 1])
])
def test_col_widths(headers, rows, answer):
assert col_widths(headers, rows) == answer
|
Add tests for functions in table.py :construction:
|
Add tests for functions in table.py :construction:
|
Python
|
mit
|
nicolewhite/cycli,nicolewhite/cycli
|
Add tests for functions in table.py :construction:
|
from cycli.table import isnumeric, col_widths
import pytest
@pytest.mark.parametrize(("number", "answer"), [
("12", True),
("12.3", True),
("0", True),
("1.1.0", False),
("Hell0", False),
("Hello", False)
])
def test_isnumeric(number, answer):
assert isnumeric(number) == answer
@pytest.mark.parametrize(("headers", "rows", "answer"), [
(["a", "bc"], [["a", "b"], ["a", "b"]], [1, 2]),
(["ab", "c"], [["a", "b"], ["a", "b"]], [2, 1]),
(["a", "b"], [["a", "bc"], ["a", "b"]], [1, 2]),
(["a", "b"], [["ab", "c"], ["a", "b"]], [2, 1]),
(["a", "b"], [["", ""], ["a", "a"]], [1, 1])
])
def test_col_widths(headers, rows, answer):
assert col_widths(headers, rows) == answer
|
<commit_before><commit_msg>Add tests for functions in table.py :construction:<commit_after>
|
from cycli.table import isnumeric, col_widths
import pytest
@pytest.mark.parametrize(("number", "answer"), [
("12", True),
("12.3", True),
("0", True),
("1.1.0", False),
("Hell0", False),
("Hello", False)
])
def test_isnumeric(number, answer):
assert isnumeric(number) == answer
@pytest.mark.parametrize(("headers", "rows", "answer"), [
(["a", "bc"], [["a", "b"], ["a", "b"]], [1, 2]),
(["ab", "c"], [["a", "b"], ["a", "b"]], [2, 1]),
(["a", "b"], [["a", "bc"], ["a", "b"]], [1, 2]),
(["a", "b"], [["ab", "c"], ["a", "b"]], [2, 1]),
(["a", "b"], [["", ""], ["a", "a"]], [1, 1])
])
def test_col_widths(headers, rows, answer):
assert col_widths(headers, rows) == answer
|
Add tests for functions in table.py :construction:from cycli.table import isnumeric, col_widths
import pytest
@pytest.mark.parametrize(("number", "answer"), [
("12", True),
("12.3", True),
("0", True),
("1.1.0", False),
("Hell0", False),
("Hello", False)
])
def test_isnumeric(number, answer):
assert isnumeric(number) == answer
@pytest.mark.parametrize(("headers", "rows", "answer"), [
(["a", "bc"], [["a", "b"], ["a", "b"]], [1, 2]),
(["ab", "c"], [["a", "b"], ["a", "b"]], [2, 1]),
(["a", "b"], [["a", "bc"], ["a", "b"]], [1, 2]),
(["a", "b"], [["ab", "c"], ["a", "b"]], [2, 1]),
(["a", "b"], [["", ""], ["a", "a"]], [1, 1])
])
def test_col_widths(headers, rows, answer):
assert col_widths(headers, rows) == answer
|
<commit_before><commit_msg>Add tests for functions in table.py :construction:<commit_after>from cycli.table import isnumeric, col_widths
import pytest
@pytest.mark.parametrize(("number", "answer"), [
("12", True),
("12.3", True),
("0", True),
("1.1.0", False),
("Hell0", False),
("Hello", False)
])
def test_isnumeric(number, answer):
assert isnumeric(number) == answer
@pytest.mark.parametrize(("headers", "rows", "answer"), [
(["a", "bc"], [["a", "b"], ["a", "b"]], [1, 2]),
(["ab", "c"], [["a", "b"], ["a", "b"]], [2, 1]),
(["a", "b"], [["a", "bc"], ["a", "b"]], [1, 2]),
(["a", "b"], [["ab", "c"], ["a", "b"]], [2, 1]),
(["a", "b"], [["", ""], ["a", "a"]], [1, 1])
])
def test_col_widths(headers, rows, answer):
assert col_widths(headers, rows) == answer
|
|
bf621217d9ccb605ec16afb20852485b1708ce79
|
tests/test_utils.py
|
tests/test_utils.py
|
from bugsnag.utils import sanitize_object
def test_sanitize_object():
filters = ["password", "credit_card"]
crazy_dict = {
"password": "123456",
"metadata": {
"another_password": "123456",
"regular": "text"
},
"bad_utf8": "a test of \xe9 char"
}
# Sanitize our object
sane_dict = sanitize_object(crazy_dict, filters=filters)
# Check the values have been sanitized
assert(sane_dict["password"] == "[FILTERED]")
assert(sane_dict["metadata"]["another_password"] == "[FILTERED]")
assert(sane_dict["metadata"]["regular"] == "text")
|
Add a couple of basic tests for sanitize_object
|
Add a couple of basic tests for sanitize_object
|
Python
|
mit
|
overplumbum/bugsnag-python,overplumbum/bugsnag-python,bugsnag/bugsnag-python,bugsnag/bugsnag-python
|
Add a couple of basic tests for sanitize_object
|
from bugsnag.utils import sanitize_object
def test_sanitize_object():
filters = ["password", "credit_card"]
crazy_dict = {
"password": "123456",
"metadata": {
"another_password": "123456",
"regular": "text"
},
"bad_utf8": "a test of \xe9 char"
}
# Sanitize our object
sane_dict = sanitize_object(crazy_dict, filters=filters)
# Check the values have been sanitized
assert(sane_dict["password"] == "[FILTERED]")
assert(sane_dict["metadata"]["another_password"] == "[FILTERED]")
assert(sane_dict["metadata"]["regular"] == "text")
|
<commit_before><commit_msg>Add a couple of basic tests for sanitize_object<commit_after>
|
from bugsnag.utils import sanitize_object
def test_sanitize_object():
filters = ["password", "credit_card"]
crazy_dict = {
"password": "123456",
"metadata": {
"another_password": "123456",
"regular": "text"
},
"bad_utf8": "a test of \xe9 char"
}
# Sanitize our object
sane_dict = sanitize_object(crazy_dict, filters=filters)
# Check the values have been sanitized
assert(sane_dict["password"] == "[FILTERED]")
assert(sane_dict["metadata"]["another_password"] == "[FILTERED]")
assert(sane_dict["metadata"]["regular"] == "text")
|
Add a couple of basic tests for sanitize_objectfrom bugsnag.utils import sanitize_object
def test_sanitize_object():
filters = ["password", "credit_card"]
crazy_dict = {
"password": "123456",
"metadata": {
"another_password": "123456",
"regular": "text"
},
"bad_utf8": "a test of \xe9 char"
}
# Sanitize our object
sane_dict = sanitize_object(crazy_dict, filters=filters)
# Check the values have been sanitized
assert(sane_dict["password"] == "[FILTERED]")
assert(sane_dict["metadata"]["another_password"] == "[FILTERED]")
assert(sane_dict["metadata"]["regular"] == "text")
|
<commit_before><commit_msg>Add a couple of basic tests for sanitize_object<commit_after>from bugsnag.utils import sanitize_object
def test_sanitize_object():
filters = ["password", "credit_card"]
crazy_dict = {
"password": "123456",
"metadata": {
"another_password": "123456",
"regular": "text"
},
"bad_utf8": "a test of \xe9 char"
}
# Sanitize our object
sane_dict = sanitize_object(crazy_dict, filters=filters)
# Check the values have been sanitized
assert(sane_dict["password"] == "[FILTERED]")
assert(sane_dict["metadata"]["another_password"] == "[FILTERED]")
assert(sane_dict["metadata"]["regular"] == "text")
|
|
fef896afa8a763fdd93bc52e6d47a69f7dda5f9b
|
tests/test_format.py
|
tests/test_format.py
|
from unittest.mock import MagicMock, patch
import pytest
from hypothesis_auto import auto_pytest_magic
import isort.format
auto_pytest_magic(isort.format.show_unified_diff)
def test_ask_whether_to_apply_changes_to_file():
with patch("isort.format.input", MagicMock(return_value="y")):
assert isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="n")):
assert not isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="q")):
with pytest.raises(SystemExit):
assert isort.format.ask_whether_to_apply_changes_to_file("")
|
Add test cases for format module
|
Add test cases for format module
|
Python
|
mit
|
PyCQA/isort,PyCQA/isort
|
Add test cases for format module
|
from unittest.mock import MagicMock, patch
import pytest
from hypothesis_auto import auto_pytest_magic
import isort.format
auto_pytest_magic(isort.format.show_unified_diff)
def test_ask_whether_to_apply_changes_to_file():
with patch("isort.format.input", MagicMock(return_value="y")):
assert isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="n")):
assert not isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="q")):
with pytest.raises(SystemExit):
assert isort.format.ask_whether_to_apply_changes_to_file("")
|
<commit_before><commit_msg>Add test cases for format module<commit_after>
|
from unittest.mock import MagicMock, patch
import pytest
from hypothesis_auto import auto_pytest_magic
import isort.format
auto_pytest_magic(isort.format.show_unified_diff)
def test_ask_whether_to_apply_changes_to_file():
with patch("isort.format.input", MagicMock(return_value="y")):
assert isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="n")):
assert not isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="q")):
with pytest.raises(SystemExit):
assert isort.format.ask_whether_to_apply_changes_to_file("")
|
Add test cases for format modulefrom unittest.mock import MagicMock, patch
import pytest
from hypothesis_auto import auto_pytest_magic
import isort.format
auto_pytest_magic(isort.format.show_unified_diff)
def test_ask_whether_to_apply_changes_to_file():
with patch("isort.format.input", MagicMock(return_value="y")):
assert isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="n")):
assert not isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="q")):
with pytest.raises(SystemExit):
assert isort.format.ask_whether_to_apply_changes_to_file("")
|
<commit_before><commit_msg>Add test cases for format module<commit_after>from unittest.mock import MagicMock, patch
import pytest
from hypothesis_auto import auto_pytest_magic
import isort.format
auto_pytest_magic(isort.format.show_unified_diff)
def test_ask_whether_to_apply_changes_to_file():
with patch("isort.format.input", MagicMock(return_value="y")):
assert isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="n")):
assert not isort.format.ask_whether_to_apply_changes_to_file("")
with patch("isort.format.input", MagicMock(return_value="q")):
with pytest.raises(SystemExit):
assert isort.format.ask_whether_to_apply_changes_to_file("")
|
|
4f793a31f238adce95fd8e2830a7d5b85724bdd2
|
elections/st_paul_municipal_2015/settings.py
|
elections/st_paul_municipal_2015/settings.py
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'Burkina Open Data Initiative (BODI)'
COPYRIGHT_HOLDER = 'Burkina Open Data Initiative (BODI)'
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'DataMade'
COPYRIGHT_HOLDER = 'DataMade'
|
Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct
|
Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct
|
Python
|
agpl-3.0
|
mysociety/yournextrepresentative,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,datamade/yournextmp-popit
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'Burkina Open Data Initiative (BODI)'
COPYRIGHT_HOLDER = 'Burkina Open Data Initiative (BODI)'
Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'DataMade'
COPYRIGHT_HOLDER = 'DataMade'
|
<commit_before># -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'Burkina Open Data Initiative (BODI)'
COPYRIGHT_HOLDER = 'Burkina Open Data Initiative (BODI)'
<commit_msg>Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct<commit_after>
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'DataMade'
COPYRIGHT_HOLDER = 'DataMade'
|
# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'Burkina Open Data Initiative (BODI)'
COPYRIGHT_HOLDER = 'Burkina Open Data Initiative (BODI)'
Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct# -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'DataMade'
COPYRIGHT_HOLDER = 'DataMade'
|
<commit_before># -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'Burkina Open Data Initiative (BODI)'
COPYRIGHT_HOLDER = 'Burkina Open Data Initiative (BODI)'
<commit_msg>Make the SITE_OWNER and COPYRIGHT_HOLDER for St Paul (more) correct<commit_after># -*- coding: utf-8 -*-
from datetime import date
MAPIT_BASE_URL = 'http://international.mapit.mysociety.org/'
ELECTION_RE = '(?P<election>council-member-2015|school-board-2015)'
# OCD_BOUNDARIES_URL = 'http://127.0.0.1:8001'
OCD_BOUNDARIES_URL = 'http://ocd.datamade.us'
SITE_OWNER = 'DataMade'
COPYRIGHT_HOLDER = 'DataMade'
|
c84bd98971aa5839cb6aa999889a92071890f579
|
test/requests/test_login_local.py
|
test/requests/test_login_local.py
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
Add test for local login
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.
|
Python
|
agpl-3.0
|
DannyArends/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.<commit_after>
|
import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add test for local login
* Add an integration test to test that the login process for users
registered locally to genenetwork2 works as expected.<commit_after>import requests
from wqflask import user_manager
from parametrized_test import ParametrizedTest
class TestLoginLocal(ParametrizedTest):
def setUp(self):
super(TestLoginLocal, self).setUp()
self.login_url = self.gn2_url +"/n/login"
data = {
"es_connection": self.es,
"email_address": "test@user.com",
"full_name": "Test User",
"organization": "Test Organisation",
"password": "test_password",
"password_confirm": "test_password"
}
user_manager.basic_info = lambda : { "basic_info": "basic" }
user_manager.RegisterUser(data)
def testLoginNonRegisteredUser(self):
data = {
"email_address": "non@existent.email",
"password": "doesitmatter?"
}
result = requests.post(self.login_url, data=data)
self.assertEqual(result.url, self.login_url, "")
def testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse(self):
data = {
"email_address": "test@user.com",
"password": "test_password"
}
result = requests.post(self.login_url, data=data)
print("THE COOKIES? ", result.cookies)
self.assertEqual(
result.url
, self.gn2_url+"/?import_collections=false"
, "Login should have been successful")
def main(gn2, es):
import unittest
suite = unittest.TestSuite()
suite.addTest(TestLoginLocal(methodName="testLoginNonRegisteredUser", gn2_url=gn2, es_url=es))
suite.addTest(TestLoginLocal(methodName="testLoginWithRegisteredUserBothRememberMeAndImportCollectionsFalse", gn2_url=gn2, es_url=es))
runner = unittest.TextTestRunner()
runner.run(suite)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
raise Exception("Required arguments missing")
else:
main(sys.argv[1], sys.argv[2])
|
|
52a483fd26fee44e387067f7efa270ad1c8df571
|
toolset/mogreltk.py
|
toolset/mogreltk.py
|
""" This module offers a number tools for processing modern Greek text. """
def normalize(text):
""" Remove intonation from Greek text.
Args:
text (str): The text that will be normalized.
Returns:
text (str): The original text in lowercase without intonation.
"""
vowel_dict = {
u'ά': u'α',
u'έ': u'ε',
u'ή': u'η',
u'ί': u'ι',
u'ό': u'ο',
u'ύ': u'υ',
u'ώ': u'ω',
u'ϊ': u'ι',
u'ϋ': u'υ'
}
text = text.lower()
for key, value in vowel_dict.items():
text = text.replace(key, value)
return text
|
Add modern Greek language toolkit module.
|
Add modern Greek language toolkit module.
|
Python
|
mit
|
theovasi/browsewiki,theovasi/browsewiki,theovasi/browsewiki
|
Add modern Greek language toolkit module.
|
""" This module offers a number tools for processing modern Greek text. """
def normalize(text):
""" Remove intonation from Greek text.
Args:
text (str): The text that will be normalized.
Returns:
text (str): The original text in lowercase without intonation.
"""
vowel_dict = {
u'ά': u'α',
u'έ': u'ε',
u'ή': u'η',
u'ί': u'ι',
u'ό': u'ο',
u'ύ': u'υ',
u'ώ': u'ω',
u'ϊ': u'ι',
u'ϋ': u'υ'
}
text = text.lower()
for key, value in vowel_dict.items():
text = text.replace(key, value)
return text
|
<commit_before><commit_msg>Add modern Greek language toolkit module.<commit_after>
|
""" This module offers a number tools for processing modern Greek text. """
def normalize(text):
""" Remove intonation from Greek text.
Args:
text (str): The text that will be normalized.
Returns:
text (str): The original text in lowercase without intonation.
"""
vowel_dict = {
u'ά': u'α',
u'έ': u'ε',
u'ή': u'η',
u'ί': u'ι',
u'ό': u'ο',
u'ύ': u'υ',
u'ώ': u'ω',
u'ϊ': u'ι',
u'ϋ': u'υ'
}
text = text.lower()
for key, value in vowel_dict.items():
text = text.replace(key, value)
return text
|
Add modern Greek language toolkit module.""" This module offers a number tools for processing modern Greek text. """
def normalize(text):
""" Remove intonation from Greek text.
Args:
text (str): The text that will be normalized.
Returns:
text (str): The original text in lowercase without intonation.
"""
vowel_dict = {
u'ά': u'α',
u'έ': u'ε',
u'ή': u'η',
u'ί': u'ι',
u'ό': u'ο',
u'ύ': u'υ',
u'ώ': u'ω',
u'ϊ': u'ι',
u'ϋ': u'υ'
}
text = text.lower()
for key, value in vowel_dict.items():
text = text.replace(key, value)
return text
|
<commit_before><commit_msg>Add modern Greek language toolkit module.<commit_after>""" This module offers a number tools for processing modern Greek text. """
def normalize(text):
""" Remove intonation from Greek text.
Args:
text (str): The text that will be normalized.
Returns:
text (str): The original text in lowercase without intonation.
"""
vowel_dict = {
u'ά': u'α',
u'έ': u'ε',
u'ή': u'η',
u'ί': u'ι',
u'ό': u'ο',
u'ύ': u'υ',
u'ώ': u'ω',
u'ϊ': u'ι',
u'ϋ': u'υ'
}
text = text.lower()
for key, value in vowel_dict.items():
text = text.replace(key, value)
return text
|
|
cc283447dc2c7d70387a8d11f1cc7fdc06bf9dbe
|
chapter02/evenNumbersBetweenTwoNumbers.py
|
chapter02/evenNumbersBetweenTwoNumbers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
first = input("Write a number: ")
second = input("Write another number: ")
for x in range(first, second):
if (x % 2 == 0):
print x
|
Add even numbers between two numbers exercise
|
Add even numbers between two numbers exercise
|
Python
|
apache-2.0
|
MindCookin/python-exercises
|
Add even numbers between two numbers exercise
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
first = input("Write a number: ")
second = input("Write another number: ")
for x in range(first, second):
if (x % 2 == 0):
print x
|
<commit_before><commit_msg>Add even numbers between two numbers exercise<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
first = input("Write a number: ")
second = input("Write another number: ")
for x in range(first, second):
if (x % 2 == 0):
print x
|
Add even numbers between two numbers exercise#!/usr/bin/env python
# -*- coding: utf-8 -*-
first = input("Write a number: ")
second = input("Write another number: ")
for x in range(first, second):
if (x % 2 == 0):
print x
|
<commit_before><commit_msg>Add even numbers between two numbers exercise<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
first = input("Write a number: ")
second = input("Write another number: ")
for x in range(first, second):
if (x % 2 == 0):
print x
|
|
3a64972a6220764582b31a6ad3f354e9b1259264
|
numpy/core/tests/test_scalarinherit.py
|
numpy/core/tests/test_scalarinherit.py
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object): pass
class B(A, np.float64): pass
class C(B): pass
class D(C, B): pass
class B0(np.float64, A): pass
class C0(B0): pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
Add test-case for multiple-inheritance on an array-scalar.
|
Add test-case for multiple-inheritance on an array-scalar.
|
Python
|
bsd-3-clause
|
leifdenby/numpy,gmcastil/numpy,BabeNovelty/numpy,kiwifb/numpy,groutr/numpy,tynn/numpy,njase/numpy,mhvk/numpy,Dapid/numpy,rgommers/numpy,sinhrks/numpy,WarrenWeckesser/numpy,rudimeier/numpy,larsmans/numpy,mhvk/numpy,GrimDerp/numpy,gfyoung/numpy,SiccarPoint/numpy,AustereCuriosity/numpy,Eric89GXL/numpy,WarrenWeckesser/numpy,ChanderG/numpy,mhvk/numpy,KaelChen/numpy,AustereCuriosity/numpy,jschueller/numpy,grlee77/numpy,rajathkumarmp/numpy,larsmans/numpy,rmcgibbo/numpy,brandon-rhodes/numpy,jschueller/numpy,grlee77/numpy,CMartelLML/numpy,naritta/numpy,madphysicist/numpy,empeeu/numpy,abalkin/numpy,MSeifert04/numpy,SunghanKim/numpy,andsor/numpy,naritta/numpy,hainm/numpy,MSeifert04/numpy,ajdawson/numpy,sinhrks/numpy,shoyer/numpy,ssanderson/numpy,immerrr/numpy,trankmichael/numpy,Dapid/numpy,mingwpy/numpy,BMJHayward/numpy,brandon-rhodes/numpy,pizzathief/numpy,ekalosak/numpy,skymanaditya1/numpy,jakirkham/numpy,anntzer/numpy,brandon-rhodes/numpy,hainm/numpy,GrimDerp/numpy,joferkington/numpy,bertrand-l/numpy,rhythmsosad/numpy,BabeNovelty/numpy,drasmuss/numpy,cjermain/numpy,mattip/numpy,chiffa/numpy,naritta/numpy,WillieMaddox/numpy,gfyoung/numpy,anntzer/numpy,tacaswell/numpy,Linkid/numpy,dato-code/numpy,shoyer/numpy,utke1/numpy,argriffing/numpy,stuarteberg/numpy,andsor/numpy,rgommers/numpy,yiakwy/numpy,tynn/numpy,chiffa/numpy,sigma-random/numpy,immerrr/numpy,jankoslavic/numpy,Dapid/numpy,GaZ3ll3/numpy,rudimeier/numpy,jorisvandenbossche/numpy,b-carter/numpy,SiccarPoint/numpy,cowlicks/numpy,ahaldane/numpy,jschueller/numpy,seberg/numpy,CMartelLML/numpy,SiccarPoint/numpy,mortada/numpy,drasmuss/numpy,mindw/numpy,moreati/numpy,sonnyhu/numpy,musically-ut/numpy,CMartelLML/numpy,dwillmer/numpy,gmcastil/numpy,mattip/numpy,solarjoe/numpy,musically-ut/numpy,Anwesh43/numpy,WillieMaddox/numpy,bmorris3/numpy,mindw/numpy,dch312/numpy,behzadnouri/numpy,jorisvandenbossche/numpy,cjermain/numpy,chatcannon/numpy,rgommers/numpy,nguyentu1602/numpy,has2k1/numpy,githubmlai/numpy,skymanaditya1/numpy,solarjoe/numpy,dimasad/numpy,njase/numpy,pyparallel/numpy,Anwesh43/numpy,pdebuyl/numpy,GaZ3ll3/numpy,charris/numpy,jorisvandenbossche/numpy,behzadnouri/numpy,musically-ut/numpy,ahaldane/numpy,nguyentu1602/numpy,dimasad/numpy,mattip/numpy,trankmichael/numpy,githubmlai/numpy,tdsmith/numpy,charris/numpy,skymanaditya1/numpy,ekalosak/numpy,mhvk/numpy,maniteja123/numpy,mingwpy/numpy,maniteja123/numpy,felipebetancur/numpy,Linkid/numpy,MichaelAquilina/numpy,charris/numpy,ahaldane/numpy,rajathkumarmp/numpy,pdebuyl/numpy,moreati/numpy,Anwesh43/numpy,rudimeier/numpy,felipebetancur/numpy,jakirkham/numpy,simongibbons/numpy,abalkin/numpy,b-carter/numpy,jankoslavic/numpy,MSeifert04/numpy,pyparallel/numpy,immerrr/numpy,ContinuumIO/numpy,BabeNovelty/numpy,groutr/numpy,joferkington/numpy,gmcastil/numpy,tynn/numpy,jakirkham/numpy,Srisai85/numpy,hainm/numpy,ChanderG/numpy,mwiebe/numpy,AustereCuriosity/numpy,Eric89GXL/numpy,BMJHayward/numpy,bmorris3/numpy,endolith/numpy,pbrod/numpy,leifdenby/numpy,ajdawson/numpy,rherault-insa/numpy,BabeNovelty/numpy,ssanderson/numpy,dch312/numpy,rhythmsosad/numpy,njase/numpy,immerrr/numpy,MSeifert04/numpy,mathdd/numpy,kirillzhuravlev/numpy,charris/numpy,simongibbons/numpy,BMJHayward/numpy,dch312/numpy,ddasilva/numpy,pdebuyl/numpy,jorisvandenbossche/numpy,Yusa95/numpy,KaelChen/numpy,mortada/numpy,madphysicist/numpy,bringingheavendown/numpy,githubmlai/numpy,has2k1/numpy,ahaldane/numpy,MaPePeR/numpy,endolith/numpy,tacaswell/numpy,jonathanunderwood/numpy,bertrand-l/numpy,jonathanunderwood/numpy,embray/numpy,embray/numpy,ssanderson/numpy,numpy/numpy,ekalosak/numpy,KaelChen/numpy,yiakwy/numpy,ESSS/numpy,sinhrks/numpy,larsmans/numpy,rhythmsosad/numpy,brandon-rhodes/numpy,rudimeier/numpy,moreati/numpy,NextThought/pypy-numpy,sinhrks/numpy,mattip/numpy,maniteja123/numpy,hainm/numpy,kiwifb/numpy,stuarteberg/numpy,pyparallel/numpy,dimasad/numpy,pizzathief/numpy,dwillmer/numpy,joferkington/numpy,naritta/numpy,ContinuumIO/numpy,pbrod/numpy,argriffing/numpy,argriffing/numpy,madphysicist/numpy,anntzer/numpy,andsor/numpy,ajdawson/numpy,simongibbons/numpy,seberg/numpy,solarjoe/numpy,bmorris3/numpy,simongibbons/numpy,pbrod/numpy,mathdd/numpy,ChristopherHogan/numpy,sigma-random/numpy,KaelChen/numpy,ekalosak/numpy,felipebetancur/numpy,tacaswell/numpy,has2k1/numpy,rmcgibbo/numpy,ViralLeadership/numpy,empeeu/numpy,rgommers/numpy,kirillzhuravlev/numpy,nguyentu1602/numpy,MaPePeR/numpy,seberg/numpy,ESSS/numpy,CMartelLML/numpy,Eric89GXL/numpy,Anwesh43/numpy,jschueller/numpy,leifdenby/numpy,chatcannon/numpy,kirillzhuravlev/numpy,numpy/numpy,dato-code/numpy,MichaelAquilina/numpy,anntzer/numpy,tdsmith/numpy,Srisai85/numpy,ajdawson/numpy,Yusa95/numpy,GrimDerp/numpy,pbrod/numpy,empeeu/numpy,skymanaditya1/numpy,madphysicist/numpy,nbeaver/numpy,dwillmer/numpy,b-carter/numpy,rajathkumarmp/numpy,rhythmsosad/numpy,ChristopherHogan/numpy,Linkid/numpy,WarrenWeckesser/numpy,embray/numpy,kirillzhuravlev/numpy,mwiebe/numpy,simongibbons/numpy,bertrand-l/numpy,NextThought/pypy-numpy,grlee77/numpy,MichaelAquilina/numpy,numpy/numpy,Linkid/numpy,sonnyhu/numpy,mwiebe/numpy,shoyer/numpy,pdebuyl/numpy,embray/numpy,pizzathief/numpy,bringingheavendown/numpy,rherault-insa/numpy,jonathanunderwood/numpy,ChristopherHogan/numpy,mhvk/numpy,Yusa95/numpy,ChristopherHogan/numpy,GaZ3ll3/numpy,kiwifb/numpy,sigma-random/numpy,pizzathief/numpy,utke1/numpy,rajathkumarmp/numpy,andsor/numpy,sonnyhu/numpy,jakirkham/numpy,has2k1/numpy,MichaelAquilina/numpy,githubmlai/numpy,stuarteberg/numpy,MaPePeR/numpy,musically-ut/numpy,nguyentu1602/numpy,trankmichael/numpy,dimasad/numpy,cowlicks/numpy,mortada/numpy,Yusa95/numpy,SunghanKim/numpy,rmcgibbo/numpy,nbeaver/numpy,bringingheavendown/numpy,jankoslavic/numpy,stuarteberg/numpy,mathdd/numpy,dato-code/numpy,skwbc/numpy,shoyer/numpy,ViralLeadership/numpy,cowlicks/numpy,WillieMaddox/numpy,WarrenWeckesser/numpy,mingwpy/numpy,sonnyhu/numpy,Eric89GXL/numpy,sigma-random/numpy,Srisai85/numpy,Srisai85/numpy,cjermain/numpy,mortada/numpy,jankoslavic/numpy,SunghanKim/numpy,empeeu/numpy,skwbc/numpy,ChanderG/numpy,bmorris3/numpy,jorisvandenbossche/numpy,ContinuumIO/numpy,pbrod/numpy,ahaldane/numpy,grlee77/numpy,cowlicks/numpy,ddasilva/numpy,yiakwy/numpy,skwbc/numpy,dato-code/numpy,mindw/numpy,shoyer/numpy,ddasilva/numpy,joferkington/numpy,SiccarPoint/numpy,tdsmith/numpy,NextThought/pypy-numpy,drasmuss/numpy,NextThought/pypy-numpy,mingwpy/numpy,trankmichael/numpy,cjermain/numpy,pizzathief/numpy,embray/numpy,rherault-insa/numpy,ESSS/numpy,SunghanKim/numpy,ChanderG/numpy,gfyoung/numpy,madphysicist/numpy,groutr/numpy,rmcgibbo/numpy,larsmans/numpy,numpy/numpy,mathdd/numpy,dwillmer/numpy,chiffa/numpy,ViralLeadership/numpy,MaPePeR/numpy,GaZ3ll3/numpy,endolith/numpy,abalkin/numpy,MSeifert04/numpy,tdsmith/numpy,jakirkham/numpy,seberg/numpy,chatcannon/numpy,yiakwy/numpy,GrimDerp/numpy,BMJHayward/numpy,mindw/numpy,WarrenWeckesser/numpy,behzadnouri/numpy,grlee77/numpy,nbeaver/numpy,felipebetancur/numpy,dch312/numpy,endolith/numpy,utke1/numpy
|
Add test-case for multiple-inheritance on an array-scalar.
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object): pass
class B(A, np.float64): pass
class C(B): pass
class D(C, B): pass
class B0(np.float64, A): pass
class C0(B0): pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add test-case for multiple-inheritance on an array-scalar.<commit_after>
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object): pass
class B(A, np.float64): pass
class C(B): pass
class D(C, B): pass
class B0(np.float64, A): pass
class C0(B0): pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
Add test-case for multiple-inheritance on an array-scalar.# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object): pass
class B(A, np.float64): pass
class C(B): pass
class D(C, B): pass
class B0(np.float64, A): pass
class C0(B0): pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add test-case for multiple-inheritance on an array-scalar.<commit_after># -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, run_module_suite
class A(object): pass
class B(A, np.float64): pass
class C(B): pass
class D(C, B): pass
class B0(np.float64, A): pass
class C0(B0): pass
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
assert str(x) == '1.0'
y = C(2.0)
assert str(y) == '2.0'
z = D(3.0)
assert str(z) == '3.0'
def test_init2(self):
x = B0(1.0)
assert str(x) == '1.0'
y = C0(2.0)
assert str(y) == '2.0'
if __name__ == "__main__":
run_module_suite()
|
|
821911ae6e46bf8e180c9f582b5112dc1942777f
|
test/unit/test_id_iterators.py
|
test/unit/test_id_iterators.py
|
from unittest import TestCase, main
import re
from uuid import UUID
from jsonrpcclient.id_iterators import hex_iterator, uuid_iterator, \
random_iterator
class TestHexIterator(TestCase):
def test(self):
i = hex_iterator()
self.assertEqual('1', next(i))
i = hex_iterator(9)
self.assertEqual('9', next(i))
self.assertEqual('a', next(i))
class TestUUIDIterator(TestCase):
def test(self):
i = uuid_iterator()
# Raise ValueError if badly formed hexadecimal UUID string
UUID(next(i), version=4)
class TestRandomIterator(TestCase):
def test(self):
i = random_iterator()
self.assertTrue(re.match('^[0-9,a-z]{8}$', next(i)))
|
Move iterator tests into separate file
|
Move iterator tests into separate file
|
Python
|
mit
|
bcb/jsonrpcclient
|
Move iterator tests into separate file
|
from unittest import TestCase, main
import re
from uuid import UUID
from jsonrpcclient.id_iterators import hex_iterator, uuid_iterator, \
random_iterator
class TestHexIterator(TestCase):
def test(self):
i = hex_iterator()
self.assertEqual('1', next(i))
i = hex_iterator(9)
self.assertEqual('9', next(i))
self.assertEqual('a', next(i))
class TestUUIDIterator(TestCase):
def test(self):
i = uuid_iterator()
# Raise ValueError if badly formed hexadecimal UUID string
UUID(next(i), version=4)
class TestRandomIterator(TestCase):
def test(self):
i = random_iterator()
self.assertTrue(re.match('^[0-9,a-z]{8}$', next(i)))
|
<commit_before><commit_msg>Move iterator tests into separate file<commit_after>
|
from unittest import TestCase, main
import re
from uuid import UUID
from jsonrpcclient.id_iterators import hex_iterator, uuid_iterator, \
random_iterator
class TestHexIterator(TestCase):
def test(self):
i = hex_iterator()
self.assertEqual('1', next(i))
i = hex_iterator(9)
self.assertEqual('9', next(i))
self.assertEqual('a', next(i))
class TestUUIDIterator(TestCase):
def test(self):
i = uuid_iterator()
# Raise ValueError if badly formed hexadecimal UUID string
UUID(next(i), version=4)
class TestRandomIterator(TestCase):
def test(self):
i = random_iterator()
self.assertTrue(re.match('^[0-9,a-z]{8}$', next(i)))
|
Move iterator tests into separate filefrom unittest import TestCase, main
import re
from uuid import UUID
from jsonrpcclient.id_iterators import hex_iterator, uuid_iterator, \
random_iterator
class TestHexIterator(TestCase):
def test(self):
i = hex_iterator()
self.assertEqual('1', next(i))
i = hex_iterator(9)
self.assertEqual('9', next(i))
self.assertEqual('a', next(i))
class TestUUIDIterator(TestCase):
def test(self):
i = uuid_iterator()
# Raise ValueError if badly formed hexadecimal UUID string
UUID(next(i), version=4)
class TestRandomIterator(TestCase):
def test(self):
i = random_iterator()
self.assertTrue(re.match('^[0-9,a-z]{8}$', next(i)))
|
<commit_before><commit_msg>Move iterator tests into separate file<commit_after>from unittest import TestCase, main
import re
from uuid import UUID
from jsonrpcclient.id_iterators import hex_iterator, uuid_iterator, \
random_iterator
class TestHexIterator(TestCase):
def test(self):
i = hex_iterator()
self.assertEqual('1', next(i))
i = hex_iterator(9)
self.assertEqual('9', next(i))
self.assertEqual('a', next(i))
class TestUUIDIterator(TestCase):
def test(self):
i = uuid_iterator()
# Raise ValueError if badly formed hexadecimal UUID string
UUID(next(i), version=4)
class TestRandomIterator(TestCase):
def test(self):
i = random_iterator()
self.assertTrue(re.match('^[0-9,a-z]{8}$', next(i)))
|
|
74e08aa31375c0d368d9cb4719e8c3766ea20b78
|
repomgmt/management/commands/add-key-to-user.py
|
repomgmt/management/commands/add-key-to-user.py
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.management.base import BaseCommand
from repomgmt.models import UploaderKey
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<username> <key id>'
help = 'Associates key with user'
def handle(self, username, key_id, **options):
user = User.objects.get(username=username)
key = UploaderKey(uploader=user, key_id=key_id)
key.save()
|
Fix User object attribute reference
|
Fix User object attribute reference
|
Python
|
apache-2.0
|
sorenh/python-django-repomgmt,sorenh/python-django-repomgmt
|
Fix User object attribute reference
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.management.base import BaseCommand
from repomgmt.models import UploaderKey
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<username> <key id>'
help = 'Associates key with user'
def handle(self, username, key_id, **options):
user = User.objects.get(username=username)
key = UploaderKey(uploader=user, key_id=key_id)
key.save()
|
<commit_before><commit_msg>Fix User object attribute reference<commit_after>
|
#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.management.base import BaseCommand
from repomgmt.models import UploaderKey
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<username> <key id>'
help = 'Associates key with user'
def handle(self, username, key_id, **options):
user = User.objects.get(username=username)
key = UploaderKey(uploader=user, key_id=key_id)
key.save()
|
Fix User object attribute reference#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.management.base import BaseCommand
from repomgmt.models import UploaderKey
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<username> <key id>'
help = 'Associates key with user'
def handle(self, username, key_id, **options):
user = User.objects.get(username=username)
key = UploaderKey(uploader=user, key_id=key_id)
key.save()
|
<commit_before><commit_msg>Fix User object attribute reference<commit_after>#
# Copyright 2012 Cisco Systems, Inc.
#
# Author: Soren Hansen <sorhanse@cisco.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.core.management.base import BaseCommand
from repomgmt.models import UploaderKey
from django.contrib.auth.models import User
class Command(BaseCommand):
args = '<username> <key id>'
help = 'Associates key with user'
def handle(self, username, key_id, **options):
user = User.objects.get(username=username)
key = UploaderKey(uploader=user, key_id=key_id)
key.save()
|
|
7eaef25771840ceb9974814a1e68916017016082
|
CodeFights/swapAdjacentBits.py
|
CodeFights/swapAdjacentBits.py
|
#!/usr/local/bin/python
# Code Fights Swap Adjacent Bits (Core) Problem
def swapAdjacentBits(n):
# even = n & 0xaaaaaaaa # mask to keep only even bits with 10 16x
# odd = n & 0x55555555 # mask to keep only odd bits with 01 16x
# even = even >> 1 # left shift even bits
# odd = odd << 1 # right shift odd bits
# return even | odd # combine
return ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)
def main():
tests = [
[13, 14],
[74, 133],
[1073741823, 1073741823],
[0, 0],
[1, 2],
[83748, 166680]
]
for t in tests:
res = swapAdjacentBits(t[0])
# print("Input: {}\nBits: {}".format(t[0], bin(t[0])[2:]))
# print("Answer: {}\nBits: {}".format(t[1], bin(t[1])[2:]))
# print("Output: {}\nBits: {}".format(res, bin(res)[2:]))
if t[1] == res:
print("PASSED: swapAdjacentBits({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: swapAdjacentBits({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights swap adjacent bits problem
|
Solve Code Fights swap adjacent bits problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights swap adjacent bits problem
|
#!/usr/local/bin/python
# Code Fights Swap Adjacent Bits (Core) Problem
def swapAdjacentBits(n):
# even = n & 0xaaaaaaaa # mask to keep only even bits with 10 16x
# odd = n & 0x55555555 # mask to keep only odd bits with 01 16x
# even = even >> 1 # left shift even bits
# odd = odd << 1 # right shift odd bits
# return even | odd # combine
return ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)
def main():
tests = [
[13, 14],
[74, 133],
[1073741823, 1073741823],
[0, 0],
[1, 2],
[83748, 166680]
]
for t in tests:
res = swapAdjacentBits(t[0])
# print("Input: {}\nBits: {}".format(t[0], bin(t[0])[2:]))
# print("Answer: {}\nBits: {}".format(t[1], bin(t[1])[2:]))
# print("Output: {}\nBits: {}".format(res, bin(res)[2:]))
if t[1] == res:
print("PASSED: swapAdjacentBits({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: swapAdjacentBits({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights swap adjacent bits problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Swap Adjacent Bits (Core) Problem
def swapAdjacentBits(n):
# even = n & 0xaaaaaaaa # mask to keep only even bits with 10 16x
# odd = n & 0x55555555 # mask to keep only odd bits with 01 16x
# even = even >> 1 # left shift even bits
# odd = odd << 1 # right shift odd bits
# return even | odd # combine
return ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)
def main():
tests = [
[13, 14],
[74, 133],
[1073741823, 1073741823],
[0, 0],
[1, 2],
[83748, 166680]
]
for t in tests:
res = swapAdjacentBits(t[0])
# print("Input: {}\nBits: {}".format(t[0], bin(t[0])[2:]))
# print("Answer: {}\nBits: {}".format(t[1], bin(t[1])[2:]))
# print("Output: {}\nBits: {}".format(res, bin(res)[2:]))
if t[1] == res:
print("PASSED: swapAdjacentBits({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: swapAdjacentBits({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights swap adjacent bits problem#!/usr/local/bin/python
# Code Fights Swap Adjacent Bits (Core) Problem
def swapAdjacentBits(n):
# even = n & 0xaaaaaaaa # mask to keep only even bits with 10 16x
# odd = n & 0x55555555 # mask to keep only odd bits with 01 16x
# even = even >> 1 # left shift even bits
# odd = odd << 1 # right shift odd bits
# return even | odd # combine
return ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)
def main():
tests = [
[13, 14],
[74, 133],
[1073741823, 1073741823],
[0, 0],
[1, 2],
[83748, 166680]
]
for t in tests:
res = swapAdjacentBits(t[0])
# print("Input: {}\nBits: {}".format(t[0], bin(t[0])[2:]))
# print("Answer: {}\nBits: {}".format(t[1], bin(t[1])[2:]))
# print("Output: {}\nBits: {}".format(res, bin(res)[2:]))
if t[1] == res:
print("PASSED: swapAdjacentBits({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: swapAdjacentBits({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights swap adjacent bits problem<commit_after>#!/usr/local/bin/python
# Code Fights Swap Adjacent Bits (Core) Problem
def swapAdjacentBits(n):
# even = n & 0xaaaaaaaa # mask to keep only even bits with 10 16x
# odd = n & 0x55555555 # mask to keep only odd bits with 01 16x
# even = even >> 1 # left shift even bits
# odd = odd << 1 # right shift odd bits
# return even | odd # combine
return ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)
def main():
tests = [
[13, 14],
[74, 133],
[1073741823, 1073741823],
[0, 0],
[1, 2],
[83748, 166680]
]
for t in tests:
res = swapAdjacentBits(t[0])
# print("Input: {}\nBits: {}".format(t[0], bin(t[0])[2:]))
# print("Answer: {}\nBits: {}".format(t[1], bin(t[1])[2:]))
# print("Output: {}\nBits: {}".format(res, bin(res)[2:]))
if t[1] == res:
print("PASSED: swapAdjacentBits({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: swapAdjacentBits({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
d324d9dd8d738c59cc27e4b429107a0218310d6c
|
bitket/wsgi.py
|
bitket/wsgi.py
|
"""
WSGI config for Bitket project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
|
Revert "Removed unneeded WSGI file"
|
Revert "Removed unneeded WSGI file"
This reverts commit 078f9ecdba5eedd957fa88af8bc45ed1ad8a3e6e.
|
Python
|
mit
|
ovidner/bitket,ovidner/bitket,ovidner/bitket,ovidner/bitket
|
Revert "Removed unneeded WSGI file"
This reverts commit 078f9ecdba5eedd957fa88af8bc45ed1ad8a3e6e.
|
"""
WSGI config for Bitket project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
|
<commit_before><commit_msg>Revert "Removed unneeded WSGI file"
This reverts commit 078f9ecdba5eedd957fa88af8bc45ed1ad8a3e6e.<commit_after>
|
"""
WSGI config for Bitket project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
|
Revert "Removed unneeded WSGI file"
This reverts commit 078f9ecdba5eedd957fa88af8bc45ed1ad8a3e6e."""
WSGI config for Bitket project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
|
<commit_before><commit_msg>Revert "Removed unneeded WSGI file"
This reverts commit 078f9ecdba5eedd957fa88af8bc45ed1ad8a3e6e.<commit_after>"""
WSGI config for Bitket project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
from django.core.wsgi import get_wsgi_application
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
|
|
e36e1fb1358123d70d1d580fdd9c00a0a6712f59
|
bcbio/project-summary-to-csv.py
|
bcbio/project-summary-to-csv.py
|
from __future__ import print_function
from argparse import ArgumentParser
import yaml
if __name__ == "__main__":
parser = ArgumentParser(description="Convert metrics from a bcbio run to CSV.")
parser.add_argument("yamlfile",
help="project-summary.yaml from a bcbio project")
args = parser.parse_args()
with open(args.yamlfile) as in_handle:
dat = yaml.load(in_handle)
summaries = [x["summary"] for x in dat["samples"]]
metrics = [x["metrics"] for x in summaries]
samples = [x["description"] for x in dat["samples"]]
header = ["sample"] + metrics[0].keys()
print(",".join(header))
for i, sample in enumerate(samples):
print(",".join(map(str, [sample] + metrics[i].values())))
|
Add bcbio project summary metrics -> CSV converter.
|
Add bcbio project summary metrics -> CSV converter.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Add bcbio project summary metrics -> CSV converter.
|
from __future__ import print_function
from argparse import ArgumentParser
import yaml
if __name__ == "__main__":
parser = ArgumentParser(description="Convert metrics from a bcbio run to CSV.")
parser.add_argument("yamlfile",
help="project-summary.yaml from a bcbio project")
args = parser.parse_args()
with open(args.yamlfile) as in_handle:
dat = yaml.load(in_handle)
summaries = [x["summary"] for x in dat["samples"]]
metrics = [x["metrics"] for x in summaries]
samples = [x["description"] for x in dat["samples"]]
header = ["sample"] + metrics[0].keys()
print(",".join(header))
for i, sample in enumerate(samples):
print(",".join(map(str, [sample] + metrics[i].values())))
|
<commit_before><commit_msg>Add bcbio project summary metrics -> CSV converter.<commit_after>
|
from __future__ import print_function
from argparse import ArgumentParser
import yaml
if __name__ == "__main__":
parser = ArgumentParser(description="Convert metrics from a bcbio run to CSV.")
parser.add_argument("yamlfile",
help="project-summary.yaml from a bcbio project")
args = parser.parse_args()
with open(args.yamlfile) as in_handle:
dat = yaml.load(in_handle)
summaries = [x["summary"] for x in dat["samples"]]
metrics = [x["metrics"] for x in summaries]
samples = [x["description"] for x in dat["samples"]]
header = ["sample"] + metrics[0].keys()
print(",".join(header))
for i, sample in enumerate(samples):
print(",".join(map(str, [sample] + metrics[i].values())))
|
Add bcbio project summary metrics -> CSV converter.from __future__ import print_function
from argparse import ArgumentParser
import yaml
if __name__ == "__main__":
parser = ArgumentParser(description="Convert metrics from a bcbio run to CSV.")
parser.add_argument("yamlfile",
help="project-summary.yaml from a bcbio project")
args = parser.parse_args()
with open(args.yamlfile) as in_handle:
dat = yaml.load(in_handle)
summaries = [x["summary"] for x in dat["samples"]]
metrics = [x["metrics"] for x in summaries]
samples = [x["description"] for x in dat["samples"]]
header = ["sample"] + metrics[0].keys()
print(",".join(header))
for i, sample in enumerate(samples):
print(",".join(map(str, [sample] + metrics[i].values())))
|
<commit_before><commit_msg>Add bcbio project summary metrics -> CSV converter.<commit_after>from __future__ import print_function
from argparse import ArgumentParser
import yaml
if __name__ == "__main__":
parser = ArgumentParser(description="Convert metrics from a bcbio run to CSV.")
parser.add_argument("yamlfile",
help="project-summary.yaml from a bcbio project")
args = parser.parse_args()
with open(args.yamlfile) as in_handle:
dat = yaml.load(in_handle)
summaries = [x["summary"] for x in dat["samples"]]
metrics = [x["metrics"] for x in summaries]
samples = [x["description"] for x in dat["samples"]]
header = ["sample"] + metrics[0].keys()
print(",".join(header))
for i, sample in enumerate(samples):
print(",".join(map(str, [sample] + metrics[i].values())))
|
|
b1ed5c94d3ba4f47202c4f4e23ff0d2837894c85
|
vumi_http_retry/tests/utils.py
|
vumi_http_retry/tests/utils.py
|
from twisted.web.server import Site
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from klein import Klein
class ToyServer(object):
@inlineCallbacks
def setup(self):
self.app = Klein()
self.server = yield reactor.listenTCP(0, Site(self.app.resource()))
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
def teardown(self):
self.server.loseConnection()
@classmethod
@inlineCallbacks
def from_test(cls, test):
server = cls()
yield server.setup()
test.addCleanup(server.teardown)
returnValue(server)
|
Add a toy server to use in tests
|
Add a toy server to use in tests
|
Python
|
bsd-3-clause
|
praekelt/vumi-http-retry-api,praekelt/vumi-http-retry-api
|
Add a toy server to use in tests
|
from twisted.web.server import Site
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from klein import Klein
class ToyServer(object):
@inlineCallbacks
def setup(self):
self.app = Klein()
self.server = yield reactor.listenTCP(0, Site(self.app.resource()))
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
def teardown(self):
self.server.loseConnection()
@classmethod
@inlineCallbacks
def from_test(cls, test):
server = cls()
yield server.setup()
test.addCleanup(server.teardown)
returnValue(server)
|
<commit_before><commit_msg>Add a toy server to use in tests<commit_after>
|
from twisted.web.server import Site
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from klein import Klein
class ToyServer(object):
@inlineCallbacks
def setup(self):
self.app = Klein()
self.server = yield reactor.listenTCP(0, Site(self.app.resource()))
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
def teardown(self):
self.server.loseConnection()
@classmethod
@inlineCallbacks
def from_test(cls, test):
server = cls()
yield server.setup()
test.addCleanup(server.teardown)
returnValue(server)
|
Add a toy server to use in testsfrom twisted.web.server import Site
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from klein import Klein
class ToyServer(object):
@inlineCallbacks
def setup(self):
self.app = Klein()
self.server = yield reactor.listenTCP(0, Site(self.app.resource()))
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
def teardown(self):
self.server.loseConnection()
@classmethod
@inlineCallbacks
def from_test(cls, test):
server = cls()
yield server.setup()
test.addCleanup(server.teardown)
returnValue(server)
|
<commit_before><commit_msg>Add a toy server to use in tests<commit_after>from twisted.web.server import Site
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from klein import Klein
class ToyServer(object):
@inlineCallbacks
def setup(self):
self.app = Klein()
self.server = yield reactor.listenTCP(0, Site(self.app.resource()))
addr = self.server.getHost()
self.url = "http://%s:%s" % (addr.host, addr.port)
def teardown(self):
self.server.loseConnection()
@classmethod
@inlineCallbacks
def from_test(cls, test):
server = cls()
yield server.setup()
test.addCleanup(server.teardown)
returnValue(server)
|
|
403c6ac844685bc3d3a16207d71d01384260e0e0
|
tests/unit/test_dr.py
|
tests/unit/test_dr.py
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.dr
# XA
def test_xa_parse_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_parse_bad_reserved():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInvalidISO) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x01')
assert(str(excinfo.value) == 'Unused fields should be 0')
def test_xa_new_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.new()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.new()
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_record_not_initialized():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.record()
assert(str(excinfo.value) == 'This XARecord is not initialized')
|
Add in unit tests for DirectoryRecord.
|
Add in unit tests for DirectoryRecord.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>
|
Python
|
lgpl-2.1
|
clalancette/pycdlib,clalancette/pyiso
|
Add in unit tests for DirectoryRecord.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.dr
# XA
def test_xa_parse_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_parse_bad_reserved():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInvalidISO) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x01')
assert(str(excinfo.value) == 'Unused fields should be 0')
def test_xa_new_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.new()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.new()
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_record_not_initialized():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.record()
assert(str(excinfo.value) == 'This XARecord is not initialized')
|
<commit_before><commit_msg>Add in unit tests for DirectoryRecord.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com><commit_after>
|
from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.dr
# XA
def test_xa_parse_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_parse_bad_reserved():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInvalidISO) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x01')
assert(str(excinfo.value) == 'Unused fields should be 0')
def test_xa_new_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.new()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.new()
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_record_not_initialized():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.record()
assert(str(excinfo.value) == 'This XARecord is not initialized')
|
Add in unit tests for DirectoryRecord.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.dr
# XA
def test_xa_parse_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_parse_bad_reserved():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInvalidISO) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x01')
assert(str(excinfo.value) == 'Unused fields should be 0')
def test_xa_new_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.new()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.new()
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_record_not_initialized():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.record()
assert(str(excinfo.value) == 'This XARecord is not initialized')
|
<commit_before><commit_msg>Add in unit tests for DirectoryRecord.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com><commit_after>from __future__ import absolute_import
import pytest
import os
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
prefix = '.'
for i in range(0, 3):
if os.path.isdir(os.path.join(prefix, 'pycdlib')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pycdlib.dr
# XA
def test_xa_parse_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x00')
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_parse_bad_reserved():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInvalidISO) as excinfo:
xa.parse(b'\x00\x00\x00\x00\x00\x00\x58\x41\x00\x00\x00\x00\x00\x01')
assert(str(excinfo.value) == 'Unused fields should be 0')
def test_xa_new_initialized_twice():
xa = pycdlib.dr.XARecord()
xa.new()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.new()
assert(str(excinfo.value) == 'This XARecord is already initialized')
def test_xa_record_not_initialized():
xa = pycdlib.dr.XARecord()
with pytest.raises(pycdlib.pycdlibexception.PyCdlibInternalError) as excinfo:
xa.record()
assert(str(excinfo.value) == 'This XARecord is not initialized')
|
|
cd2d14a1b405709c7cc06108e499fcae711e9115
|
src/nyc_trees/apps/survey/migrations/0013_auto_20150309_1712.py
|
src/nyc_trees/apps/survey/migrations/0013_auto_20150309_1712.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0012_merge'),
]
operations = [
migrations.AlterField(
model_name='tree',
name='curb_location',
field=models.CharField(max_length=25, choices=[('OnCurb', 'Along the curb'), ('OffsetFromCurb', 'Offset from the curb')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='guards',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Not installed'), ('Helpful', 'Helpful'), ('Harmful', 'Harmful'), ('Unsure', 'Unsure')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='sidewalk_damage',
field=models.CharField(blank=True, max_length=15, choices=[('NoDamage', 'No damage'), ('Damage', 'Cracks or raised')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='status',
field=models.CharField(max_length=15, choices=[('Alive', 'Tree is alive'), ('Dead', 'Tree is dead'), ('Stump', 'Stump < 24"')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='stewardship',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Zero'), ('1or2', '1-2'), ('3or4', '3-4'), ('4orMore', '4+')]),
preserve_default=True,
),
]
|
Add migrations for changed choice labels
|
Add migrations for changed choice labels
|
Python
|
agpl-3.0
|
kdeloach/nyc-trees,RickMohr/nyc-trees,azavea/nyc-trees,RickMohr/nyc-trees,RickMohr/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,azavea/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,RickMohr/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,maurizi/nyc-trees
|
Add migrations for changed choice labels
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0012_merge'),
]
operations = [
migrations.AlterField(
model_name='tree',
name='curb_location',
field=models.CharField(max_length=25, choices=[('OnCurb', 'Along the curb'), ('OffsetFromCurb', 'Offset from the curb')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='guards',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Not installed'), ('Helpful', 'Helpful'), ('Harmful', 'Harmful'), ('Unsure', 'Unsure')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='sidewalk_damage',
field=models.CharField(blank=True, max_length=15, choices=[('NoDamage', 'No damage'), ('Damage', 'Cracks or raised')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='status',
field=models.CharField(max_length=15, choices=[('Alive', 'Tree is alive'), ('Dead', 'Tree is dead'), ('Stump', 'Stump < 24"')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='stewardship',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Zero'), ('1or2', '1-2'), ('3or4', '3-4'), ('4orMore', '4+')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migrations for changed choice labels<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0012_merge'),
]
operations = [
migrations.AlterField(
model_name='tree',
name='curb_location',
field=models.CharField(max_length=25, choices=[('OnCurb', 'Along the curb'), ('OffsetFromCurb', 'Offset from the curb')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='guards',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Not installed'), ('Helpful', 'Helpful'), ('Harmful', 'Harmful'), ('Unsure', 'Unsure')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='sidewalk_damage',
field=models.CharField(blank=True, max_length=15, choices=[('NoDamage', 'No damage'), ('Damage', 'Cracks or raised')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='status',
field=models.CharField(max_length=15, choices=[('Alive', 'Tree is alive'), ('Dead', 'Tree is dead'), ('Stump', 'Stump < 24"')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='stewardship',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Zero'), ('1or2', '1-2'), ('3or4', '3-4'), ('4orMore', '4+')]),
preserve_default=True,
),
]
|
Add migrations for changed choice labels# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0012_merge'),
]
operations = [
migrations.AlterField(
model_name='tree',
name='curb_location',
field=models.CharField(max_length=25, choices=[('OnCurb', 'Along the curb'), ('OffsetFromCurb', 'Offset from the curb')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='guards',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Not installed'), ('Helpful', 'Helpful'), ('Harmful', 'Harmful'), ('Unsure', 'Unsure')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='sidewalk_damage',
field=models.CharField(blank=True, max_length=15, choices=[('NoDamage', 'No damage'), ('Damage', 'Cracks or raised')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='status',
field=models.CharField(max_length=15, choices=[('Alive', 'Tree is alive'), ('Dead', 'Tree is dead'), ('Stump', 'Stump < 24"')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='stewardship',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Zero'), ('1or2', '1-2'), ('3or4', '3-4'), ('4orMore', '4+')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migrations for changed choice labels<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0012_merge'),
]
operations = [
migrations.AlterField(
model_name='tree',
name='curb_location',
field=models.CharField(max_length=25, choices=[('OnCurb', 'Along the curb'), ('OffsetFromCurb', 'Offset from the curb')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='guards',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Not installed'), ('Helpful', 'Helpful'), ('Harmful', 'Harmful'), ('Unsure', 'Unsure')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='sidewalk_damage',
field=models.CharField(blank=True, max_length=15, choices=[('NoDamage', 'No damage'), ('Damage', 'Cracks or raised')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='status',
field=models.CharField(max_length=15, choices=[('Alive', 'Tree is alive'), ('Dead', 'Tree is dead'), ('Stump', 'Stump < 24"')]),
preserve_default=True,
),
migrations.AlterField(
model_name='tree',
name='stewardship',
field=models.CharField(blank=True, max_length=15, choices=[('None', 'Zero'), ('1or2', '1-2'), ('3or4', '3-4'), ('4orMore', '4+')]),
preserve_default=True,
),
]
|
|
ee0745bee8a73b5ee579d7e34f2975295b72e51b
|
py/find-all-anagrams-in-a-string.py
|
py/find-all-anagrams-in-a-string.py
|
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
ls, lp = len(s), len(p)
P = 100000007
inv = dict()
for o in xrange(ord('a'), ord('z') + 1):
inv[o] = pow(o, P - 2, P)
if ls < lp:
return []
pxored = 0
psubsum = 0
pproduct = 1
for c in p:
pxored ^= ord(c)
psubsum += ord(c)
pproduct = (pproduct * ord(c)) % P
ans = []
sxored = 0
ssubsum = 0
sproduct = 1
for i, c in enumerate(s, -(lp - 1)):
sxored ^= ord(c)
ssubsum += ord(c)
sproduct = (sproduct * ord(c)) % P
if i >= 0:
if (pxored, psubsum, pproduct) == (sxored, ssubsum, sproduct):
ans.append(i)
ord_head = ord(s[i])
sxored ^= ord_head
ssubsum -= ord_head
sproduct = (sproduct * inv[ord_head]) % P
return ans
|
Add py solution for 438. Find All Anagrams in a String
|
Add py solution for 438. Find All Anagrams in a String
438. Find All Anagrams in a String: https://leetcode.com/problems/find-all-anagrams-in-a-string/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 438. Find All Anagrams in a String
438. Find All Anagrams in a String: https://leetcode.com/problems/find-all-anagrams-in-a-string/
|
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
ls, lp = len(s), len(p)
P = 100000007
inv = dict()
for o in xrange(ord('a'), ord('z') + 1):
inv[o] = pow(o, P - 2, P)
if ls < lp:
return []
pxored = 0
psubsum = 0
pproduct = 1
for c in p:
pxored ^= ord(c)
psubsum += ord(c)
pproduct = (pproduct * ord(c)) % P
ans = []
sxored = 0
ssubsum = 0
sproduct = 1
for i, c in enumerate(s, -(lp - 1)):
sxored ^= ord(c)
ssubsum += ord(c)
sproduct = (sproduct * ord(c)) % P
if i >= 0:
if (pxored, psubsum, pproduct) == (sxored, ssubsum, sproduct):
ans.append(i)
ord_head = ord(s[i])
sxored ^= ord_head
ssubsum -= ord_head
sproduct = (sproduct * inv[ord_head]) % P
return ans
|
<commit_before><commit_msg>Add py solution for 438. Find All Anagrams in a String
438. Find All Anagrams in a String: https://leetcode.com/problems/find-all-anagrams-in-a-string/<commit_after>
|
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
ls, lp = len(s), len(p)
P = 100000007
inv = dict()
for o in xrange(ord('a'), ord('z') + 1):
inv[o] = pow(o, P - 2, P)
if ls < lp:
return []
pxored = 0
psubsum = 0
pproduct = 1
for c in p:
pxored ^= ord(c)
psubsum += ord(c)
pproduct = (pproduct * ord(c)) % P
ans = []
sxored = 0
ssubsum = 0
sproduct = 1
for i, c in enumerate(s, -(lp - 1)):
sxored ^= ord(c)
ssubsum += ord(c)
sproduct = (sproduct * ord(c)) % P
if i >= 0:
if (pxored, psubsum, pproduct) == (sxored, ssubsum, sproduct):
ans.append(i)
ord_head = ord(s[i])
sxored ^= ord_head
ssubsum -= ord_head
sproduct = (sproduct * inv[ord_head]) % P
return ans
|
Add py solution for 438. Find All Anagrams in a String
438. Find All Anagrams in a String: https://leetcode.com/problems/find-all-anagrams-in-a-string/class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
ls, lp = len(s), len(p)
P = 100000007
inv = dict()
for o in xrange(ord('a'), ord('z') + 1):
inv[o] = pow(o, P - 2, P)
if ls < lp:
return []
pxored = 0
psubsum = 0
pproduct = 1
for c in p:
pxored ^= ord(c)
psubsum += ord(c)
pproduct = (pproduct * ord(c)) % P
ans = []
sxored = 0
ssubsum = 0
sproduct = 1
for i, c in enumerate(s, -(lp - 1)):
sxored ^= ord(c)
ssubsum += ord(c)
sproduct = (sproduct * ord(c)) % P
if i >= 0:
if (pxored, psubsum, pproduct) == (sxored, ssubsum, sproduct):
ans.append(i)
ord_head = ord(s[i])
sxored ^= ord_head
ssubsum -= ord_head
sproduct = (sproduct * inv[ord_head]) % P
return ans
|
<commit_before><commit_msg>Add py solution for 438. Find All Anagrams in a String
438. Find All Anagrams in a String: https://leetcode.com/problems/find-all-anagrams-in-a-string/<commit_after>class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
ls, lp = len(s), len(p)
P = 100000007
inv = dict()
for o in xrange(ord('a'), ord('z') + 1):
inv[o] = pow(o, P - 2, P)
if ls < lp:
return []
pxored = 0
psubsum = 0
pproduct = 1
for c in p:
pxored ^= ord(c)
psubsum += ord(c)
pproduct = (pproduct * ord(c)) % P
ans = []
sxored = 0
ssubsum = 0
sproduct = 1
for i, c in enumerate(s, -(lp - 1)):
sxored ^= ord(c)
ssubsum += ord(c)
sproduct = (sproduct * ord(c)) % P
if i >= 0:
if (pxored, psubsum, pproduct) == (sxored, ssubsum, sproduct):
ans.append(i)
ord_head = ord(s[i])
sxored ^= ord_head
ssubsum -= ord_head
sproduct = (sproduct * inv[ord_head]) % P
return ans
|
|
c2116148b8e1d7cfbf6c260c878d199bc6d39ac5
|
blaze/tests/test_disk_dimension.py
|
blaze/tests/test_disk_dimension.py
|
import os.path
import shutil
import numpy as np
import blaze as blz
def test_perserve():
shape = (3,4)
arr = np.ones(shape)
dshape = "%s,%s, float64" % (shape[0], shape[1])
path = "p.blz"
if os.path.exists(path):
shutil.rmtree(path)
bparams = blz.params(storage=path)
barray = blz.Array(arr, dshape, params=bparams)
print "barray:", repr(barray)
barray2 = blz.open(path)
print "barray2:", repr(barray2)
assert(str(barray.datashape) == str(barray2.datashape))
|
Test dimension perserving for persistence.
|
Test dimension perserving for persistence.
|
Python
|
bsd-2-clause
|
seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core
|
Test dimension perserving for persistence.
|
import os.path
import shutil
import numpy as np
import blaze as blz
def test_perserve():
shape = (3,4)
arr = np.ones(shape)
dshape = "%s,%s, float64" % (shape[0], shape[1])
path = "p.blz"
if os.path.exists(path):
shutil.rmtree(path)
bparams = blz.params(storage=path)
barray = blz.Array(arr, dshape, params=bparams)
print "barray:", repr(barray)
barray2 = blz.open(path)
print "barray2:", repr(barray2)
assert(str(barray.datashape) == str(barray2.datashape))
|
<commit_before><commit_msg>Test dimension perserving for persistence.<commit_after>
|
import os.path
import shutil
import numpy as np
import blaze as blz
def test_perserve():
shape = (3,4)
arr = np.ones(shape)
dshape = "%s,%s, float64" % (shape[0], shape[1])
path = "p.blz"
if os.path.exists(path):
shutil.rmtree(path)
bparams = blz.params(storage=path)
barray = blz.Array(arr, dshape, params=bparams)
print "barray:", repr(barray)
barray2 = blz.open(path)
print "barray2:", repr(barray2)
assert(str(barray.datashape) == str(barray2.datashape))
|
Test dimension perserving for persistence.import os.path
import shutil
import numpy as np
import blaze as blz
def test_perserve():
shape = (3,4)
arr = np.ones(shape)
dshape = "%s,%s, float64" % (shape[0], shape[1])
path = "p.blz"
if os.path.exists(path):
shutil.rmtree(path)
bparams = blz.params(storage=path)
barray = blz.Array(arr, dshape, params=bparams)
print "barray:", repr(barray)
barray2 = blz.open(path)
print "barray2:", repr(barray2)
assert(str(barray.datashape) == str(barray2.datashape))
|
<commit_before><commit_msg>Test dimension perserving for persistence.<commit_after>import os.path
import shutil
import numpy as np
import blaze as blz
def test_perserve():
shape = (3,4)
arr = np.ones(shape)
dshape = "%s,%s, float64" % (shape[0], shape[1])
path = "p.blz"
if os.path.exists(path):
shutil.rmtree(path)
bparams = blz.params(storage=path)
barray = blz.Array(arr, dshape, params=bparams)
print "barray:", repr(barray)
barray2 = blz.open(path)
print "barray2:", repr(barray2)
assert(str(barray.datashape) == str(barray2.datashape))
|
|
96223421604171d2c98deab03de2256c20ad1c38
|
paas_manager/hadoop_modules.py
|
paas_manager/hadoop_modules.py
|
import threading, subprocess
hostname = "star@192.168.122.10"
def start_hadoop(path, args, callback):
command = ["ssh", hostname, "hadoop", "jar", path]
command.extend(args)
t = threading.Thread(target=exec_hadoop,args=(command, callback))
t.setDaemon(True)
t.start()
return t
def exec_hadoop(command, callback):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
callback(bytes.decode(out), bytes.decode(err))
|
Add module for hadoop execution
|
Add module for hadoop execution
|
Python
|
mit
|
pbl-cloud/paas-manager,pbl-cloud/paas-manager,pbl-cloud/paas-manager
|
Add module for hadoop execution
|
import threading, subprocess
hostname = "star@192.168.122.10"
def start_hadoop(path, args, callback):
command = ["ssh", hostname, "hadoop", "jar", path]
command.extend(args)
t = threading.Thread(target=exec_hadoop,args=(command, callback))
t.setDaemon(True)
t.start()
return t
def exec_hadoop(command, callback):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
callback(bytes.decode(out), bytes.decode(err))
|
<commit_before><commit_msg>Add module for hadoop execution<commit_after>
|
import threading, subprocess
hostname = "star@192.168.122.10"
def start_hadoop(path, args, callback):
command = ["ssh", hostname, "hadoop", "jar", path]
command.extend(args)
t = threading.Thread(target=exec_hadoop,args=(command, callback))
t.setDaemon(True)
t.start()
return t
def exec_hadoop(command, callback):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
callback(bytes.decode(out), bytes.decode(err))
|
Add module for hadoop executionimport threading, subprocess
hostname = "star@192.168.122.10"
def start_hadoop(path, args, callback):
command = ["ssh", hostname, "hadoop", "jar", path]
command.extend(args)
t = threading.Thread(target=exec_hadoop,args=(command, callback))
t.setDaemon(True)
t.start()
return t
def exec_hadoop(command, callback):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
callback(bytes.decode(out), bytes.decode(err))
|
<commit_before><commit_msg>Add module for hadoop execution<commit_after>import threading, subprocess
hostname = "star@192.168.122.10"
def start_hadoop(path, args, callback):
command = ["ssh", hostname, "hadoop", "jar", path]
command.extend(args)
t = threading.Thread(target=exec_hadoop,args=(command, callback))
t.setDaemon(True)
t.start()
return t
def exec_hadoop(command, callback):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
callback(bytes.decode(out), bytes.decode(err))
|
|
d628837000db5cc50ff114ab376a0dc0f112ab44
|
locale-to-csv.py
|
locale-to-csv.py
|
#!/usr/bin/env python
import yaml
import yaml.constructor
try:
# included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError:
# try importing the backported drop-in replacement
# it's available on PyPI
from ordereddict import OrderedDict
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
f = open('locales/app.yaml')
d = yaml.load(f.read(), OrderedDictYAMLLoader)
def print_keys(path, out, lang, data):
for k, val in data.items():
k = str(k)
k_path = path + [k]
if isinstance(val, dict):
print_keys(k_path, out, lang, val)
else:
out_key = '.'.join(k_path)
if not out_key in out:
out[out_key] = {}
out[out_key][lang] = val
out = OrderedDict()
for section in d.keys():
for lang in d[section].keys():
print_keys([section], out, lang, d[section][lang])
LANG_CODES = ['fi', 'en', 'sv']
import csv
f = open('translations.csv', 'w')
writer = csv.writer(f)
for key, langs in out.items():
row = []
row.append(key)
for l_code in LANG_CODES:
row.append(langs.get(l_code, ''))
print(row)
writer.writerow(row)
f.close()
|
Add script for outputting translations as CSV
|
Add script for outputting translations as CSV
|
Python
|
agpl-3.0
|
vaaralav/servicemap,City-of-Helsinki/servicemap,Zeukkari/servicemap,vaaralav/servicemap,City-of-Helsinki/servicemap,City-of-Helsinki/servicemap,vaaralav/servicemap,Zeukkari/servicemap
|
Add script for outputting translations as CSV
|
#!/usr/bin/env python
import yaml
import yaml.constructor
try:
# included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError:
# try importing the backported drop-in replacement
# it's available on PyPI
from ordereddict import OrderedDict
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
f = open('locales/app.yaml')
d = yaml.load(f.read(), OrderedDictYAMLLoader)
def print_keys(path, out, lang, data):
for k, val in data.items():
k = str(k)
k_path = path + [k]
if isinstance(val, dict):
print_keys(k_path, out, lang, val)
else:
out_key = '.'.join(k_path)
if not out_key in out:
out[out_key] = {}
out[out_key][lang] = val
out = OrderedDict()
for section in d.keys():
for lang in d[section].keys():
print_keys([section], out, lang, d[section][lang])
LANG_CODES = ['fi', 'en', 'sv']
import csv
f = open('translations.csv', 'w')
writer = csv.writer(f)
for key, langs in out.items():
row = []
row.append(key)
for l_code in LANG_CODES:
row.append(langs.get(l_code, ''))
print(row)
writer.writerow(row)
f.close()
|
<commit_before><commit_msg>Add script for outputting translations as CSV<commit_after>
|
#!/usr/bin/env python
import yaml
import yaml.constructor
try:
# included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError:
# try importing the backported drop-in replacement
# it's available on PyPI
from ordereddict import OrderedDict
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
f = open('locales/app.yaml')
d = yaml.load(f.read(), OrderedDictYAMLLoader)
def print_keys(path, out, lang, data):
for k, val in data.items():
k = str(k)
k_path = path + [k]
if isinstance(val, dict):
print_keys(k_path, out, lang, val)
else:
out_key = '.'.join(k_path)
if not out_key in out:
out[out_key] = {}
out[out_key][lang] = val
out = OrderedDict()
for section in d.keys():
for lang in d[section].keys():
print_keys([section], out, lang, d[section][lang])
LANG_CODES = ['fi', 'en', 'sv']
import csv
f = open('translations.csv', 'w')
writer = csv.writer(f)
for key, langs in out.items():
row = []
row.append(key)
for l_code in LANG_CODES:
row.append(langs.get(l_code, ''))
print(row)
writer.writerow(row)
f.close()
|
Add script for outputting translations as CSV#!/usr/bin/env python
import yaml
import yaml.constructor
try:
# included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError:
# try importing the backported drop-in replacement
# it's available on PyPI
from ordereddict import OrderedDict
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
f = open('locales/app.yaml')
d = yaml.load(f.read(), OrderedDictYAMLLoader)
def print_keys(path, out, lang, data):
for k, val in data.items():
k = str(k)
k_path = path + [k]
if isinstance(val, dict):
print_keys(k_path, out, lang, val)
else:
out_key = '.'.join(k_path)
if not out_key in out:
out[out_key] = {}
out[out_key][lang] = val
out = OrderedDict()
for section in d.keys():
for lang in d[section].keys():
print_keys([section], out, lang, d[section][lang])
LANG_CODES = ['fi', 'en', 'sv']
import csv
f = open('translations.csv', 'w')
writer = csv.writer(f)
for key, langs in out.items():
row = []
row.append(key)
for l_code in LANG_CODES:
row.append(langs.get(l_code, ''))
print(row)
writer.writerow(row)
f.close()
|
<commit_before><commit_msg>Add script for outputting translations as CSV<commit_after>#!/usr/bin/env python
import yaml
import yaml.constructor
try:
# included in standard lib from Python 2.7
from collections import OrderedDict
except ImportError:
# try importing the backported drop-in replacement
# it's available on PyPI
from ordereddict import OrderedDict
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
f = open('locales/app.yaml')
d = yaml.load(f.read(), OrderedDictYAMLLoader)
def print_keys(path, out, lang, data):
for k, val in data.items():
k = str(k)
k_path = path + [k]
if isinstance(val, dict):
print_keys(k_path, out, lang, val)
else:
out_key = '.'.join(k_path)
if not out_key in out:
out[out_key] = {}
out[out_key][lang] = val
out = OrderedDict()
for section in d.keys():
for lang in d[section].keys():
print_keys([section], out, lang, d[section][lang])
LANG_CODES = ['fi', 'en', 'sv']
import csv
f = open('translations.csv', 'w')
writer = csv.writer(f)
for key, langs in out.items():
row = []
row.append(key)
for l_code in LANG_CODES:
row.append(langs.get(l_code, ''))
print(row)
writer.writerow(row)
f.close()
|
|
90e6dd52a202c2503c1c4881f70b1c13b68147dc
|
pitchfork/setup_application.py
|
pitchfork/setup_application.py
|
from flask import Flask, g
from inspect import getmembers, isfunction
from happymongo import HapPyMongo
from config import config
from adminbp import bp as admin_bp
from manage_globals import bp as manage_bp
from engine import bp as engine_bp
import context_functions
import views
import template_filters
def create_app(testing=None):
app = Flask(__name__)
if testing:
config.TESTING = True
config.MONGO_DATABASE = '%s_test' % config.MONGO_DATABASE
config.ADMIN = 'rusty.shackelford'
app.config.from_object(config)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(manage_bp, url_prefix='/manage')
app.register_blueprint(engine_bp, url_prefix='/engine')
mongo, db = HapPyMongo(config)
views.ProductsView.register(app)
views.MiscView.register(app)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(context_functions.utility_processor)
@app.before_request
def before_request():
g.db = db
return app, db
|
Add in setup application file to handle the app setup so that it can be used with browser testing as well
|
Add in setup application file to handle the app setup so that it can be used with browser testing as well
|
Python
|
apache-2.0
|
rackerlabs/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork,oldarmyc/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork
|
Add in setup application file to handle the app setup so that it can be used with browser testing as well
|
from flask import Flask, g
from inspect import getmembers, isfunction
from happymongo import HapPyMongo
from config import config
from adminbp import bp as admin_bp
from manage_globals import bp as manage_bp
from engine import bp as engine_bp
import context_functions
import views
import template_filters
def create_app(testing=None):
app = Flask(__name__)
if testing:
config.TESTING = True
config.MONGO_DATABASE = '%s_test' % config.MONGO_DATABASE
config.ADMIN = 'rusty.shackelford'
app.config.from_object(config)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(manage_bp, url_prefix='/manage')
app.register_blueprint(engine_bp, url_prefix='/engine')
mongo, db = HapPyMongo(config)
views.ProductsView.register(app)
views.MiscView.register(app)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(context_functions.utility_processor)
@app.before_request
def before_request():
g.db = db
return app, db
|
<commit_before><commit_msg>Add in setup application file to handle the app setup so that it can be used with browser testing as well<commit_after>
|
from flask import Flask, g
from inspect import getmembers, isfunction
from happymongo import HapPyMongo
from config import config
from adminbp import bp as admin_bp
from manage_globals import bp as manage_bp
from engine import bp as engine_bp
import context_functions
import views
import template_filters
def create_app(testing=None):
app = Flask(__name__)
if testing:
config.TESTING = True
config.MONGO_DATABASE = '%s_test' % config.MONGO_DATABASE
config.ADMIN = 'rusty.shackelford'
app.config.from_object(config)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(manage_bp, url_prefix='/manage')
app.register_blueprint(engine_bp, url_prefix='/engine')
mongo, db = HapPyMongo(config)
views.ProductsView.register(app)
views.MiscView.register(app)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(context_functions.utility_processor)
@app.before_request
def before_request():
g.db = db
return app, db
|
Add in setup application file to handle the app setup so that it can be used with browser testing as wellfrom flask import Flask, g
from inspect import getmembers, isfunction
from happymongo import HapPyMongo
from config import config
from adminbp import bp as admin_bp
from manage_globals import bp as manage_bp
from engine import bp as engine_bp
import context_functions
import views
import template_filters
def create_app(testing=None):
app = Flask(__name__)
if testing:
config.TESTING = True
config.MONGO_DATABASE = '%s_test' % config.MONGO_DATABASE
config.ADMIN = 'rusty.shackelford'
app.config.from_object(config)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(manage_bp, url_prefix='/manage')
app.register_blueprint(engine_bp, url_prefix='/engine')
mongo, db = HapPyMongo(config)
views.ProductsView.register(app)
views.MiscView.register(app)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(context_functions.utility_processor)
@app.before_request
def before_request():
g.db = db
return app, db
|
<commit_before><commit_msg>Add in setup application file to handle the app setup so that it can be used with browser testing as well<commit_after>from flask import Flask, g
from inspect import getmembers, isfunction
from happymongo import HapPyMongo
from config import config
from adminbp import bp as admin_bp
from manage_globals import bp as manage_bp
from engine import bp as engine_bp
import context_functions
import views
import template_filters
def create_app(testing=None):
app = Flask(__name__)
if testing:
config.TESTING = True
config.MONGO_DATABASE = '%s_test' % config.MONGO_DATABASE
config.ADMIN = 'rusty.shackelford'
app.config.from_object(config)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(manage_bp, url_prefix='/manage')
app.register_blueprint(engine_bp, url_prefix='/engine')
mongo, db = HapPyMongo(config)
views.ProductsView.register(app)
views.MiscView.register(app)
custom_filters = {
name: function for name, function in getmembers(template_filters)
if isfunction(function)
}
app.jinja_env.filters.update(custom_filters)
app.context_processor(context_functions.utility_processor)
@app.before_request
def before_request():
g.db = db
return app, db
|
|
aedde845b3af828053e56337719579b7808d13aa
|
onetime/views.py
|
onetime/views.py
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, redirect_invalid_to=None):
user = auth.authenticate(key=key)
if user is None:
if redirect_invalid_to is not None:
return HttpResponseRedirect(redirect_invalid_to)
else:
return HttpResponseGone()
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, login_url=None):
next = request.GET.get('next', None)
if next is None:
next = settings.LOGIN_REDIRECT_URL
user = auth.authenticate(key=key)
if user is None:
url = settings.LOGIN_URL
if next is not None:
url = '%s?next=%s' % (url, next)
return HttpResponseRedirect(url)
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
if data.next is not None:
next = data.next
return HttpResponseRedirect(next)
|
Remove redirect_invalid_to option and use settings.LOGIN_URL instead. When redirect the user, also forward the next parameter.
|
Remove redirect_invalid_to option and use settings.LOGIN_URL instead.
When redirect the user, also forward the next parameter.
|
Python
|
agpl-3.0
|
ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,fajran/django-loginurl,vanschelven/cmsplugin-journal,ISIFoundation/influenzanet-website,uploadcare/django-loginurl,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, redirect_invalid_to=None):
user = auth.authenticate(key=key)
if user is None:
if redirect_invalid_to is not None:
return HttpResponseRedirect(redirect_invalid_to)
else:
return HttpResponseGone()
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
Remove redirect_invalid_to option and use settings.LOGIN_URL instead.
When redirect the user, also forward the next parameter.
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, login_url=None):
next = request.GET.get('next', None)
if next is None:
next = settings.LOGIN_REDIRECT_URL
user = auth.authenticate(key=key)
if user is None:
url = settings.LOGIN_URL
if next is not None:
url = '%s?next=%s' % (url, next)
return HttpResponseRedirect(url)
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
if data.next is not None:
next = data.next
return HttpResponseRedirect(next)
|
<commit_before>from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, redirect_invalid_to=None):
user = auth.authenticate(key=key)
if user is None:
if redirect_invalid_to is not None:
return HttpResponseRedirect(redirect_invalid_to)
else:
return HttpResponseGone()
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
<commit_msg>Remove redirect_invalid_to option and use settings.LOGIN_URL instead.
When redirect the user, also forward the next parameter.<commit_after>
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, login_url=None):
next = request.GET.get('next', None)
if next is None:
next = settings.LOGIN_REDIRECT_URL
user = auth.authenticate(key=key)
if user is None:
url = settings.LOGIN_URL
if next is not None:
url = '%s?next=%s' % (url, next)
return HttpResponseRedirect(url)
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
if data.next is not None:
next = data.next
return HttpResponseRedirect(next)
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, redirect_invalid_to=None):
user = auth.authenticate(key=key)
if user is None:
if redirect_invalid_to is not None:
return HttpResponseRedirect(redirect_invalid_to)
else:
return HttpResponseGone()
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
Remove redirect_invalid_to option and use settings.LOGIN_URL instead.
When redirect the user, also forward the next parameter.from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, login_url=None):
next = request.GET.get('next', None)
if next is None:
next = settings.LOGIN_REDIRECT_URL
user = auth.authenticate(key=key)
if user is None:
url = settings.LOGIN_URL
if next is not None:
url = '%s?next=%s' % (url, next)
return HttpResponseRedirect(url)
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
if data.next is not None:
next = data.next
return HttpResponseRedirect(next)
|
<commit_before>from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, redirect_invalid_to=None):
user = auth.authenticate(key=key)
if user is None:
if redirect_invalid_to is not None:
return HttpResponseRedirect(redirect_invalid_to)
else:
return HttpResponseGone()
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
<commit_msg>Remove redirect_invalid_to option and use settings.LOGIN_URL instead.
When redirect the user, also forward the next parameter.<commit_after>from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseGone
from django.contrib import auth
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
return HttpResponse('ok', content_type='text/plain')
def login(request, key, login_url=None):
next = request.GET.get('next', None)
if next is None:
next = settings.LOGIN_REDIRECT_URL
user = auth.authenticate(key=key)
if user is None:
url = settings.LOGIN_URL
if next is not None:
url = '%s?next=%s' % (url, next)
return HttpResponseRedirect(url)
auth.login(request, user)
data = Key.objects.get(key=key)
data.update_usage()
if data.next is not None:
next = data.next
return HttpResponseRedirect(next)
|
8f34d5a5dd0b7f774d452c75065b64d4c28d4082
|
ObjectTracking/testThreading.py
|
ObjectTracking/testThreading.py
|
import threading
def affiche(nb, nom = ''):
for i in range(nb): print nom, i
a = threading.Thread(None, affiche, None, (200,), {'nom':'thread a'})
b = threading.Thread(None, affiche, None, (200,), {'nom':'thread b'})
a.start()
b.start()
|
Add a test for the threading mechanism
|
Add a test for the threading mechanism
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a test for the threading mechanism
|
import threading
def affiche(nb, nom = ''):
for i in range(nb): print nom, i
a = threading.Thread(None, affiche, None, (200,), {'nom':'thread a'})
b = threading.Thread(None, affiche, None, (200,), {'nom':'thread b'})
a.start()
b.start()
|
<commit_before><commit_msg>Add a test for the threading mechanism<commit_after>
|
import threading
def affiche(nb, nom = ''):
for i in range(nb): print nom, i
a = threading.Thread(None, affiche, None, (200,), {'nom':'thread a'})
b = threading.Thread(None, affiche, None, (200,), {'nom':'thread b'})
a.start()
b.start()
|
Add a test for the threading mechanismimport threading
def affiche(nb, nom = ''):
for i in range(nb): print nom, i
a = threading.Thread(None, affiche, None, (200,), {'nom':'thread a'})
b = threading.Thread(None, affiche, None, (200,), {'nom':'thread b'})
a.start()
b.start()
|
<commit_before><commit_msg>Add a test for the threading mechanism<commit_after>import threading
def affiche(nb, nom = ''):
for i in range(nb): print nom, i
a = threading.Thread(None, affiche, None, (200,), {'nom':'thread a'})
b = threading.Thread(None, affiche, None, (200,), {'nom':'thread b'})
a.start()
b.start()
|
|
ae3150cb5c7d39da1a3b4cd70fdfb92d36b9ce57
|
tests/float/cmath_fun.py
|
tests/float/cmath_fun.py
|
# test the functions imported from cmath
try:
from cmath import *
except ImportError:
print("SKIP")
import sys
sys.exit()
# make sure these constants exist in cmath
print("%.5g" % e)
print("%.5g" % pi)
test_values_non_zero = []
base_values = (0.0, 0.5, 1.23456, 10.)
for r in base_values:
for i in base_values:
if r != 0. or i != 0.:
test_values_non_zero.append(complex(r, i))
if r != 0.:
test_values_non_zero.append(complex(-r, i))
if i != 0.:
test_values_non_zero.append(complex(r, -i))
if r != 0. and i != 0.:
test_values_non_zero.append(complex(-r, -i))
test_values = [complex(0., 0.),] + test_values_non_zero
print(test_values)
functions = [
('phase', phase, test_values),
('polar', polar, test_values),
('rect', rect, ((0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (123., -456.))),
('exp', exp, test_values),
('log', log, test_values_non_zero),
('log10', log10, test_values_non_zero),
('sqrt', sqrt, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
]
for f_name, f, test_vals in functions:
print(f_name)
for val in test_vals:
if type(val) == tuple:
ret = f(*val)
else:
ret = f(val)
if type(ret) == float:
print("%.5g" % ret)
elif type(ret) == tuple:
print("%.5g %.5g" % ret)
else:
# some test (eg cmath.sqrt(-0.5)) disagree with CPython with tiny real part
real = ret.real
if abs(real) < 1e15:
real = 0.
print("complex(%.5g, %.5g)" % (real, ret.imag))
|
Add test for cmath module.
|
tests: Add test for cmath module.
|
Python
|
mit
|
slzatz/micropython,hiway/micropython,kostyll/micropython,pozetroninc/micropython,jimkmc/micropython,dmazzella/micropython,tobbad/micropython,orionrobots/micropython,puuu/micropython,AriZuu/micropython,turbinenreiter/micropython,paul-xxx/micropython,warner83/micropython,matthewelse/micropython,puuu/micropython,trezor/micropython,xyb/micropython,pramasoul/micropython,xuxiaoxin/micropython,PappaPeppar/micropython,torwag/micropython,blmorris/micropython,MrSurly/micropython-esp32,pozetroninc/micropython,kostyll/micropython,dmazzella/micropython,deshipu/micropython,ahotam/micropython,tuc-osg/micropython,hosaka/micropython,mhoffma/micropython,SungEun-Steve-Kim/test-mp,swegener/micropython,pfalcon/micropython,mpalomer/micropython,mpalomer/micropython,tdautc19841202/micropython,Vogtinator/micropython,mianos/micropython,selste/micropython,adafruit/circuitpython,slzatz/micropython,puuu/micropython,alex-robbins/micropython,lowRISC/micropython,supergis/micropython,MrSurly/micropython,xhat/micropython,blazewicz/micropython,jimkmc/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,pramasoul/micropython,alex-robbins/micropython,paul-xxx/micropython,xuxiaoxin/micropython,MrSurly/micropython-esp32,mgyenik/micropython,henriknelson/micropython,ChuckM/micropython,dxxb/micropython,blmorris/micropython,micropython/micropython-esp32,infinnovation/micropython,alex-march/micropython,dinau/micropython,PappaPeppar/micropython,galenhz/micropython,noahwilliamsson/micropython,paul-xxx/micropython,bvernoux/micropython,utopiaprince/micropython,aethaniel/micropython,mianos/micropython,noahchense/micropython,mgyenik/micropython,cnoviello/micropython,rubencabrera/micropython,MrSurly/micropython-esp32,drrk/micropython,heisewangluo/micropython,drrk/micropython,martinribelotta/micropython,infinnovation/micropython,firstval/micropython,ernesto-g/micropython,tdautc19841202/micropython,suda/micropython,mgyenik/micropython,pfalcon/micropython,TDAbboud/micropython,adafruit/circuitpython,AriZuu/micropython,xhat/micropython,skybird6672/micropython,adafruit/circuitpython,EcmaXp/micropython,trezor/micropython,SungEun-Steve-Kim/test-mp,tuc-osg/micropython,mhoffma/micropython,cloudformdesign/micropython,xuxiaoxin/micropython,danicampora/micropython,lbattraw/micropython,drrk/micropython,tobbad/micropython,alex-march/micropython,firstval/micropython,jimkmc/micropython,HenrikSolver/micropython,firstval/micropython,martinribelotta/micropython,galenhz/micropython,matthewelse/micropython,xhat/micropython,PappaPeppar/micropython,orionrobots/micropython,tobbad/micropython,praemdonck/micropython,suda/micropython,xyb/micropython,torwag/micropython,MrSurly/micropython,ruffy91/micropython,chrisdearman/micropython,oopy/micropython,dinau/micropython,turbinenreiter/micropython,jlillest/micropython,jmarcelino/pycom-micropython,emfcamp/micropython,utopiaprince/micropython,noahwilliamsson/micropython,EcmaXp/micropython,AriZuu/micropython,aethaniel/micropython,mhoffma/micropython,jmarcelino/pycom-micropython,hosaka/micropython,mpalomer/micropython,vriera/micropython,dhylands/micropython,tuc-osg/micropython,heisewangluo/micropython,redbear/micropython,vriera/micropython,pfalcon/micropython,Timmenem/micropython,ganshun666/micropython,xuxiaoxin/micropython,adafruit/micropython,bvernoux/micropython,emfcamp/micropython,utopiaprince/micropython,Vogtinator/micropython,noahwilliamsson/micropython,omtinez/micropython,aethaniel/micropython,mhoffma/micropython,paul-xxx/micropython,ganshun666/micropython,ernesto-g/micropython,ceramos/micropython,tobbad/micropython,xyb/micropython,ruffy91/micropython,TDAbboud/micropython,adamkh/micropython,jimkmc/micropython,tdautc19841202/micropython,ceramos/micropython,SungEun-Steve-Kim/test-mp,oopy/micropython,mhoffma/micropython,lbattraw/micropython,cloudformdesign/micropython,praemdonck/micropython,praemdonck/micropython,Peetz0r/micropython-esp32,blazewicz/micropython,hiway/micropython,Peetz0r/micropython-esp32,swegener/micropython,jlillest/micropython,oopy/micropython,puuu/micropython,ceramos/micropython,torwag/micropython,rubencabrera/micropython,toolmacher/micropython,micropython/micropython-esp32,ChuckM/micropython,jlillest/micropython,ruffy91/micropython,alex-march/micropython,rubencabrera/micropython,rubencabrera/micropython,SHA2017-badge/micropython-esp32,selste/micropython,aethaniel/micropython,mianos/micropython,feilongfl/micropython,mgyenik/micropython,dxxb/micropython,MrSurly/micropython,adamkh/micropython,skybird6672/micropython,adamkh/micropython,kerneltask/micropython,toolmacher/micropython,ahotam/micropython,supergis/micropython,tralamazza/micropython,MrSurly/micropython-esp32,ruffy91/micropython,omtinez/micropython,cloudformdesign/micropython,mgyenik/micropython,ceramos/micropython,matthewelse/micropython,vitiral/micropython,skybird6672/micropython,firstval/micropython,swegener/micropython,kerneltask/micropython,pozetroninc/micropython,hiway/micropython,ryannathans/micropython,puuu/micropython,HenrikSolver/micropython,xyb/micropython,redbear/micropython,omtinez/micropython,neilh10/micropython,xhat/micropython,kostyll/micropython,tdautc19841202/micropython,infinnovation/micropython,bvernoux/micropython,ericsnowcurrently/micropython,xhat/micropython,skybird6672/micropython,KISSMonX/micropython,chrisdearman/micropython,redbear/micropython,lowRISC/micropython,martinribelotta/micropython,ryannathans/micropython,TDAbboud/micropython,dinau/micropython,vriera/micropython,feilongfl/micropython,tuc-osg/micropython,adafruit/micropython,ganshun666/micropython,rubencabrera/micropython,tdautc19841202/micropython,ganshun666/micropython,torwag/micropython,cloudformdesign/micropython,adafruit/circuitpython,noahchense/micropython,stonegithubs/micropython,ernesto-g/micropython,Timmenem/micropython,ceramos/micropython,xyb/micropython,jmarcelino/pycom-micropython,adafruit/micropython,dinau/micropython,micropython/micropython-esp32,ernesto-g/micropython,tobbad/micropython,feilongfl/micropython,omtinez/micropython,blazewicz/micropython,redbear/micropython,Vogtinator/micropython,vriera/micropython,Peetz0r/micropython-esp32,SHA2017-badge/micropython-esp32,vitiral/micropython,misterdanb/micropython,praemdonck/micropython,heisewangluo/micropython,orionrobots/micropython,bvernoux/micropython,EcmaXp/micropython,HenrikSolver/micropython,vitiral/micropython,cwyark/micropython,swegener/micropython,ahotam/micropython,neilh10/micropython,pfalcon/micropython,drrk/micropython,pozetroninc/micropython,hosaka/micropython,ericsnowcurrently/micropython,infinnovation/micropython,cwyark/micropython,stonegithubs/micropython,supergis/micropython,pfalcon/micropython,suda/micropython,deshipu/micropython,feilongfl/micropython,matthewelse/micropython,deshipu/micropython,noahwilliamsson/micropython,misterdanb/micropython,martinribelotta/micropython,HenrikSolver/micropython,firstval/micropython,AriZuu/micropython,emfcamp/micropython,kerneltask/micropython,noahwilliamsson/micropython,adafruit/circuitpython,cnoviello/micropython,blmorris/micropython,ericsnowcurrently/micropython,trezor/micropython,blmorris/micropython,deshipu/micropython,cnoviello/micropython,SungEun-Steve-Kim/test-mp,henriknelson/micropython,danicampora/micropython,ryannathans/micropython,lowRISC/micropython,hiway/micropython,chrisdearman/micropython,alex-march/micropython,orionrobots/micropython,ernesto-g/micropython,dhylands/micropython,praemdonck/micropython,emfcamp/micropython,infinnovation/micropython,alex-robbins/micropython,slzatz/micropython,misterdanb/micropython,emfcamp/micropython,kostyll/micropython,AriZuu/micropython,lbattraw/micropython,henriknelson/micropython,EcmaXp/micropython,dmazzella/micropython,dxxb/micropython,jimkmc/micropython,ericsnowcurrently/micropython,pramasoul/micropython,stonegithubs/micropython,cnoviello/micropython,dhylands/micropython,pramasoul/micropython,mianos/micropython,kerneltask/micropython,SungEun-Steve-Kim/test-mp,Timmenem/micropython,KISSMonX/micropython,dhylands/micropython,misterdanb/micropython,danicampora/micropython,cwyark/micropython,SHA2017-badge/micropython-esp32,stonegithubs/micropython,chrisdearman/micropython,pozetroninc/micropython,feilongfl/micropython,adafruit/micropython,torwag/micropython,heisewangluo/micropython,heisewangluo/micropython,oopy/micropython,matthewelse/micropython,EcmaXp/micropython,KISSMonX/micropython,cwyark/micropython,neilh10/micropython,warner83/micropython,turbinenreiter/micropython,Vogtinator/micropython,danicampora/micropython,ChuckM/micropython,mianos/micropython,lbattraw/micropython,toolmacher/micropython,vitiral/micropython,swegener/micropython,Peetz0r/micropython-esp32,micropython/micropython-esp32,vitiral/micropython,galenhz/micropython,PappaPeppar/micropython,cwyark/micropython,henriknelson/micropython,toolmacher/micropython,omtinez/micropython,micropython/micropython-esp32,ruffy91/micropython,noahchense/micropython,chrisdearman/micropython,alex-robbins/micropython,galenhz/micropython,tralamazza/micropython,galenhz/micropython,stonegithubs/micropython,slzatz/micropython,tuc-osg/micropython,trezor/micropython,matthewelse/micropython,ahotam/micropython,suda/micropython,turbinenreiter/micropython,jmarcelino/pycom-micropython,cnoviello/micropython,blazewicz/micropython,warner83/micropython,lowRISC/micropython,misterdanb/micropython,Timmenem/micropython,selste/micropython,jlillest/micropython,warner83/micropython,blazewicz/micropython,drrk/micropython,suda/micropython,hosaka/micropython,redbear/micropython,lowRISC/micropython,alex-robbins/micropython,HenrikSolver/micropython,slzatz/micropython,cloudformdesign/micropython,selste/micropython,supergis/micropython,deshipu/micropython,dhylands/micropython,KISSMonX/micropython,kostyll/micropython,ganshun666/micropython,MrSurly/micropython-esp32,turbinenreiter/micropython,utopiaprince/micropython,tralamazza/micropython,toolmacher/micropython,hosaka/micropython,dxxb/micropython,jmarcelino/pycom-micropython,Vogtinator/micropython,ChuckM/micropython,mpalomer/micropython,pramasoul/micropython,oopy/micropython,adafruit/micropython,blmorris/micropython,xuxiaoxin/micropython,kerneltask/micropython,bvernoux/micropython,Timmenem/micropython,paul-xxx/micropython,KISSMonX/micropython,neilh10/micropython,adamkh/micropython,noahchense/micropython,alex-march/micropython,adafruit/circuitpython,ahotam/micropython,utopiaprince/micropython,hiway/micropython,tralamazza/micropython,dxxb/micropython,martinribelotta/micropython,supergis/micropython,vriera/micropython,orionrobots/micropython,neilh10/micropython,selste/micropython,dinau/micropython,ChuckM/micropython,mpalomer/micropython,danicampora/micropython,warner83/micropython,MrSurly/micropython,PappaPeppar/micropython,trezor/micropython,jlillest/micropython,TDAbboud/micropython,MrSurly/micropython,noahchense/micropython,ryannathans/micropython,lbattraw/micropython,ryannathans/micropython,ericsnowcurrently/micropython,dmazzella/micropython,adamkh/micropython,skybird6672/micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,SHA2017-badge/micropython-esp32,aethaniel/micropython
|
tests: Add test for cmath module.
|
# test the functions imported from cmath
try:
from cmath import *
except ImportError:
print("SKIP")
import sys
sys.exit()
# make sure these constants exist in cmath
print("%.5g" % e)
print("%.5g" % pi)
test_values_non_zero = []
base_values = (0.0, 0.5, 1.23456, 10.)
for r in base_values:
for i in base_values:
if r != 0. or i != 0.:
test_values_non_zero.append(complex(r, i))
if r != 0.:
test_values_non_zero.append(complex(-r, i))
if i != 0.:
test_values_non_zero.append(complex(r, -i))
if r != 0. and i != 0.:
test_values_non_zero.append(complex(-r, -i))
test_values = [complex(0., 0.),] + test_values_non_zero
print(test_values)
functions = [
('phase', phase, test_values),
('polar', polar, test_values),
('rect', rect, ((0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (123., -456.))),
('exp', exp, test_values),
('log', log, test_values_non_zero),
('log10', log10, test_values_non_zero),
('sqrt', sqrt, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
]
for f_name, f, test_vals in functions:
print(f_name)
for val in test_vals:
if type(val) == tuple:
ret = f(*val)
else:
ret = f(val)
if type(ret) == float:
print("%.5g" % ret)
elif type(ret) == tuple:
print("%.5g %.5g" % ret)
else:
# some test (eg cmath.sqrt(-0.5)) disagree with CPython with tiny real part
real = ret.real
if abs(real) < 1e15:
real = 0.
print("complex(%.5g, %.5g)" % (real, ret.imag))
|
<commit_before><commit_msg>tests: Add test for cmath module.<commit_after>
|
# test the functions imported from cmath
try:
from cmath import *
except ImportError:
print("SKIP")
import sys
sys.exit()
# make sure these constants exist in cmath
print("%.5g" % e)
print("%.5g" % pi)
test_values_non_zero = []
base_values = (0.0, 0.5, 1.23456, 10.)
for r in base_values:
for i in base_values:
if r != 0. or i != 0.:
test_values_non_zero.append(complex(r, i))
if r != 0.:
test_values_non_zero.append(complex(-r, i))
if i != 0.:
test_values_non_zero.append(complex(r, -i))
if r != 0. and i != 0.:
test_values_non_zero.append(complex(-r, -i))
test_values = [complex(0., 0.),] + test_values_non_zero
print(test_values)
functions = [
('phase', phase, test_values),
('polar', polar, test_values),
('rect', rect, ((0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (123., -456.))),
('exp', exp, test_values),
('log', log, test_values_non_zero),
('log10', log10, test_values_non_zero),
('sqrt', sqrt, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
]
for f_name, f, test_vals in functions:
print(f_name)
for val in test_vals:
if type(val) == tuple:
ret = f(*val)
else:
ret = f(val)
if type(ret) == float:
print("%.5g" % ret)
elif type(ret) == tuple:
print("%.5g %.5g" % ret)
else:
# some test (eg cmath.sqrt(-0.5)) disagree with CPython with tiny real part
real = ret.real
if abs(real) < 1e15:
real = 0.
print("complex(%.5g, %.5g)" % (real, ret.imag))
|
tests: Add test for cmath module.# test the functions imported from cmath
try:
from cmath import *
except ImportError:
print("SKIP")
import sys
sys.exit()
# make sure these constants exist in cmath
print("%.5g" % e)
print("%.5g" % pi)
test_values_non_zero = []
base_values = (0.0, 0.5, 1.23456, 10.)
for r in base_values:
for i in base_values:
if r != 0. or i != 0.:
test_values_non_zero.append(complex(r, i))
if r != 0.:
test_values_non_zero.append(complex(-r, i))
if i != 0.:
test_values_non_zero.append(complex(r, -i))
if r != 0. and i != 0.:
test_values_non_zero.append(complex(-r, -i))
test_values = [complex(0., 0.),] + test_values_non_zero
print(test_values)
functions = [
('phase', phase, test_values),
('polar', polar, test_values),
('rect', rect, ((0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (123., -456.))),
('exp', exp, test_values),
('log', log, test_values_non_zero),
('log10', log10, test_values_non_zero),
('sqrt', sqrt, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
]
for f_name, f, test_vals in functions:
print(f_name)
for val in test_vals:
if type(val) == tuple:
ret = f(*val)
else:
ret = f(val)
if type(ret) == float:
print("%.5g" % ret)
elif type(ret) == tuple:
print("%.5g %.5g" % ret)
else:
# some test (eg cmath.sqrt(-0.5)) disagree with CPython with tiny real part
real = ret.real
if abs(real) < 1e15:
real = 0.
print("complex(%.5g, %.5g)" % (real, ret.imag))
|
<commit_before><commit_msg>tests: Add test for cmath module.<commit_after># test the functions imported from cmath
try:
from cmath import *
except ImportError:
print("SKIP")
import sys
sys.exit()
# make sure these constants exist in cmath
print("%.5g" % e)
print("%.5g" % pi)
test_values_non_zero = []
base_values = (0.0, 0.5, 1.23456, 10.)
for r in base_values:
for i in base_values:
if r != 0. or i != 0.:
test_values_non_zero.append(complex(r, i))
if r != 0.:
test_values_non_zero.append(complex(-r, i))
if i != 0.:
test_values_non_zero.append(complex(r, -i))
if r != 0. and i != 0.:
test_values_non_zero.append(complex(-r, -i))
test_values = [complex(0., 0.),] + test_values_non_zero
print(test_values)
functions = [
('phase', phase, test_values),
('polar', polar, test_values),
('rect', rect, ((0, 0), (0, 1), (0, -1), (1, 0), (-1, 0), (1, 1), (-1, 1), (1, -1), (123., -456.))),
('exp', exp, test_values),
('log', log, test_values_non_zero),
('log10', log10, test_values_non_zero),
('sqrt', sqrt, test_values),
('cos', cos, test_values),
('sin', sin, test_values),
]
for f_name, f, test_vals in functions:
print(f_name)
for val in test_vals:
if type(val) == tuple:
ret = f(*val)
else:
ret = f(val)
if type(ret) == float:
print("%.5g" % ret)
elif type(ret) == tuple:
print("%.5g %.5g" % ret)
else:
# some test (eg cmath.sqrt(-0.5)) disagree with CPython with tiny real part
real = ret.real
if abs(real) < 1e15:
real = 0.
print("complex(%.5g, %.5g)" % (real, ret.imag))
|
|
db4d3a226dbebcb0a2cd89d02362293e2f80082e
|
bin/reset_analysis_database.py
|
bin/reset_analysis_database.py
|
from pymongo import MongoClient
import json
import sys
import emission.core.get_database as edb
from emission.tests import common
def purgeAnalysisData():
edb.get_analysis_timeseries_db().remove()
edb.get_common_place_db().remove()
edb.get_common_trip_db().remove()
edb.get_pipeline_state_db().remove()
if __name__ == '__main__':
if len(sys.argv) == 0:
print "USAGE: %s [userName]" % sys.argv[0]
exit(1)
purgeAnalysisData()
|
Reset the emisson state as well
|
Reset the emisson state as well
Similar to 96eff63790e28741f45f0875fe888687b803eb6e but for emission users.
Ideally this should only include the analysis_db, but we need the pipeline
info, and Josh created two extra collections for the common trips and common
places.
|
Python
|
bsd-3-clause
|
sunil07t/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server
|
Reset the emisson state as well
Similar to 96eff63790e28741f45f0875fe888687b803eb6e but for emission users.
Ideally this should only include the analysis_db, but we need the pipeline
info, and Josh created two extra collections for the common trips and common
places.
|
from pymongo import MongoClient
import json
import sys
import emission.core.get_database as edb
from emission.tests import common
def purgeAnalysisData():
edb.get_analysis_timeseries_db().remove()
edb.get_common_place_db().remove()
edb.get_common_trip_db().remove()
edb.get_pipeline_state_db().remove()
if __name__ == '__main__':
if len(sys.argv) == 0:
print "USAGE: %s [userName]" % sys.argv[0]
exit(1)
purgeAnalysisData()
|
<commit_before><commit_msg>Reset the emisson state as well
Similar to 96eff63790e28741f45f0875fe888687b803eb6e but for emission users.
Ideally this should only include the analysis_db, but we need the pipeline
info, and Josh created two extra collections for the common trips and common
places.<commit_after>
|
from pymongo import MongoClient
import json
import sys
import emission.core.get_database as edb
from emission.tests import common
def purgeAnalysisData():
edb.get_analysis_timeseries_db().remove()
edb.get_common_place_db().remove()
edb.get_common_trip_db().remove()
edb.get_pipeline_state_db().remove()
if __name__ == '__main__':
if len(sys.argv) == 0:
print "USAGE: %s [userName]" % sys.argv[0]
exit(1)
purgeAnalysisData()
|
Reset the emisson state as well
Similar to 96eff63790e28741f45f0875fe888687b803eb6e but for emission users.
Ideally this should only include the analysis_db, but we need the pipeline
info, and Josh created two extra collections for the common trips and common
places.from pymongo import MongoClient
import json
import sys
import emission.core.get_database as edb
from emission.tests import common
def purgeAnalysisData():
edb.get_analysis_timeseries_db().remove()
edb.get_common_place_db().remove()
edb.get_common_trip_db().remove()
edb.get_pipeline_state_db().remove()
if __name__ == '__main__':
if len(sys.argv) == 0:
print "USAGE: %s [userName]" % sys.argv[0]
exit(1)
purgeAnalysisData()
|
<commit_before><commit_msg>Reset the emisson state as well
Similar to 96eff63790e28741f45f0875fe888687b803eb6e but for emission users.
Ideally this should only include the analysis_db, but we need the pipeline
info, and Josh created two extra collections for the common trips and common
places.<commit_after>from pymongo import MongoClient
import json
import sys
import emission.core.get_database as edb
from emission.tests import common
def purgeAnalysisData():
edb.get_analysis_timeseries_db().remove()
edb.get_common_place_db().remove()
edb.get_common_trip_db().remove()
edb.get_pipeline_state_db().remove()
if __name__ == '__main__':
if len(sys.argv) == 0:
print "USAGE: %s [userName]" % sys.argv[0]
exit(1)
purgeAnalysisData()
|
|
cc8fd9051e086fdf51d5ca5a20c85544a789fa22
|
test/unit/qiprofile/test_imaging.py
|
test/unit/qiprofile/test_imaging.py
|
from datetime import datetime
from mongoengine import connect
from nose.tools import assert_equal
from qiprofile_rest_client.model.subject import Subject
from qipipe.qiprofile import imaging
from ...helpers.logging import logger
from ... import PROJECT
COLLECTION = 'Breast'
"""The test collection."""
SUBJECT = 1
"""The test subject number."""
SESSION = 1
"""The test session number."""
class TestImaging(object):
"""
Imaging sync tests.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_sync(self):
# TODO
pass
|
Add qiprofile imaging sync test case.
|
Add qiprofile imaging sync test case.
|
Python
|
bsd-2-clause
|
ohsu-qin/qipipe
|
Add qiprofile imaging sync test case.
|
from datetime import datetime
from mongoengine import connect
from nose.tools import assert_equal
from qiprofile_rest_client.model.subject import Subject
from qipipe.qiprofile import imaging
from ...helpers.logging import logger
from ... import PROJECT
COLLECTION = 'Breast'
"""The test collection."""
SUBJECT = 1
"""The test subject number."""
SESSION = 1
"""The test session number."""
class TestImaging(object):
"""
Imaging sync tests.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_sync(self):
# TODO
pass
|
<commit_before><commit_msg>Add qiprofile imaging sync test case.<commit_after>
|
from datetime import datetime
from mongoengine import connect
from nose.tools import assert_equal
from qiprofile_rest_client.model.subject import Subject
from qipipe.qiprofile import imaging
from ...helpers.logging import logger
from ... import PROJECT
COLLECTION = 'Breast'
"""The test collection."""
SUBJECT = 1
"""The test subject number."""
SESSION = 1
"""The test session number."""
class TestImaging(object):
"""
Imaging sync tests.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_sync(self):
# TODO
pass
|
Add qiprofile imaging sync test case.from datetime import datetime
from mongoengine import connect
from nose.tools import assert_equal
from qiprofile_rest_client.model.subject import Subject
from qipipe.qiprofile import imaging
from ...helpers.logging import logger
from ... import PROJECT
COLLECTION = 'Breast'
"""The test collection."""
SUBJECT = 1
"""The test subject number."""
SESSION = 1
"""The test session number."""
class TestImaging(object):
"""
Imaging sync tests.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_sync(self):
# TODO
pass
|
<commit_before><commit_msg>Add qiprofile imaging sync test case.<commit_after>from datetime import datetime
from mongoengine import connect
from nose.tools import assert_equal
from qiprofile_rest_client.model.subject import Subject
from qipipe.qiprofile import imaging
from ...helpers.logging import logger
from ... import PROJECT
COLLECTION = 'Breast'
"""The test collection."""
SUBJECT = 1
"""The test subject number."""
SESSION = 1
"""The test session number."""
class TestImaging(object):
"""
Imaging sync tests.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_sync(self):
# TODO
pass
|
|
0c124f11e9c22cc739b82e8b089f0b85af90de4c
|
src/python/tests/generators/test_purchasing_profile_generator.py
|
src/python/tests/generators/test_purchasing_profile_generator.py
|
import unittest
from algorithms.markovmodel import MarkovModel
from algorithms.markovmodel import MarkovModelBuilder
from generators.purchasing_profile_generator import PurchasingProfileBuilder
from datamodels.simulation_models import PurchasingProfile
class TestPurchasingProfileBuilder(unittest.TestCase):
def test_add_profile(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
self.assertIsInstance(profile_builder.profiles, dict)
self.assertTrue(len(profile_builder.profiles) == 3)
self.assertIn("alpha", profile_builder.profiles)
self.assertIn("beta", profile_builder.profiles)
self.assertIn("gamma", profile_builder.profiles)
def test_build(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
profile = profile_builder.build()
self.assertIsInstance(profile, PurchasingProfile)
self.assertTrue(len(profile.get_product_categories()) == 3)
result = profile.get_profile("alpha")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("beta")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("gamma")
self.assertIsInstance(result, MarkovModel)
class TestProductCategoryMarkovModelGenerator(unittest.TestCase):
pass
class TestPurchasingProfileGenerator(unittest.TestCase):
pass
|
Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGenerator
|
Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGenerator
|
Python
|
apache-2.0
|
rnowling/bigpetstore-data-generator,rnowling/bigpetstore-data-generator,rnowling/bigpetstore-data-generator
|
Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGenerator
|
import unittest
from algorithms.markovmodel import MarkovModel
from algorithms.markovmodel import MarkovModelBuilder
from generators.purchasing_profile_generator import PurchasingProfileBuilder
from datamodels.simulation_models import PurchasingProfile
class TestPurchasingProfileBuilder(unittest.TestCase):
def test_add_profile(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
self.assertIsInstance(profile_builder.profiles, dict)
self.assertTrue(len(profile_builder.profiles) == 3)
self.assertIn("alpha", profile_builder.profiles)
self.assertIn("beta", profile_builder.profiles)
self.assertIn("gamma", profile_builder.profiles)
def test_build(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
profile = profile_builder.build()
self.assertIsInstance(profile, PurchasingProfile)
self.assertTrue(len(profile.get_product_categories()) == 3)
result = profile.get_profile("alpha")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("beta")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("gamma")
self.assertIsInstance(result, MarkovModel)
class TestProductCategoryMarkovModelGenerator(unittest.TestCase):
pass
class TestPurchasingProfileGenerator(unittest.TestCase):
pass
|
<commit_before><commit_msg>Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGenerator<commit_after>
|
import unittest
from algorithms.markovmodel import MarkovModel
from algorithms.markovmodel import MarkovModelBuilder
from generators.purchasing_profile_generator import PurchasingProfileBuilder
from datamodels.simulation_models import PurchasingProfile
class TestPurchasingProfileBuilder(unittest.TestCase):
def test_add_profile(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
self.assertIsInstance(profile_builder.profiles, dict)
self.assertTrue(len(profile_builder.profiles) == 3)
self.assertIn("alpha", profile_builder.profiles)
self.assertIn("beta", profile_builder.profiles)
self.assertIn("gamma", profile_builder.profiles)
def test_build(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
profile = profile_builder.build()
self.assertIsInstance(profile, PurchasingProfile)
self.assertTrue(len(profile.get_product_categories()) == 3)
result = profile.get_profile("alpha")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("beta")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("gamma")
self.assertIsInstance(result, MarkovModel)
class TestProductCategoryMarkovModelGenerator(unittest.TestCase):
pass
class TestPurchasingProfileGenerator(unittest.TestCase):
pass
|
Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGeneratorimport unittest
from algorithms.markovmodel import MarkovModel
from algorithms.markovmodel import MarkovModelBuilder
from generators.purchasing_profile_generator import PurchasingProfileBuilder
from datamodels.simulation_models import PurchasingProfile
class TestPurchasingProfileBuilder(unittest.TestCase):
def test_add_profile(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
self.assertIsInstance(profile_builder.profiles, dict)
self.assertTrue(len(profile_builder.profiles) == 3)
self.assertIn("alpha", profile_builder.profiles)
self.assertIn("beta", profile_builder.profiles)
self.assertIn("gamma", profile_builder.profiles)
def test_build(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
profile = profile_builder.build()
self.assertIsInstance(profile, PurchasingProfile)
self.assertTrue(len(profile.get_product_categories()) == 3)
result = profile.get_profile("alpha")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("beta")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("gamma")
self.assertIsInstance(result, MarkovModel)
class TestProductCategoryMarkovModelGenerator(unittest.TestCase):
pass
class TestPurchasingProfileGenerator(unittest.TestCase):
pass
|
<commit_before><commit_msg>Add first tests for PurchasingProfileBuilder and stubs for PurchasingProfileGenerator and ProductCategoryMarkovModelGenerator<commit_after>import unittest
from algorithms.markovmodel import MarkovModel
from algorithms.markovmodel import MarkovModelBuilder
from generators.purchasing_profile_generator import PurchasingProfileBuilder
from datamodels.simulation_models import PurchasingProfile
class TestPurchasingProfileBuilder(unittest.TestCase):
def test_add_profile(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
self.assertIsInstance(profile_builder.profiles, dict)
self.assertTrue(len(profile_builder.profiles) == 3)
self.assertIn("alpha", profile_builder.profiles)
self.assertIn("beta", profile_builder.profiles)
self.assertIn("gamma", profile_builder.profiles)
def test_build(self):
profile_builder = PurchasingProfileBuilder()
for name in ["alpha", "beta", "gamma"]:
builder = MarkovModelBuilder()
builder.add_start_state("a", 1.0)
builder.add_edge_weight("a", "b", 0.5)
builder.add_edge_weight("a", "c", 0.5)
model = builder.build_msm()
profile_builder.add_profile(name, model)
profile = profile_builder.build()
self.assertIsInstance(profile, PurchasingProfile)
self.assertTrue(len(profile.get_product_categories()) == 3)
result = profile.get_profile("alpha")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("beta")
self.assertIsInstance(result, MarkovModel)
result = profile.get_profile("gamma")
self.assertIsInstance(result, MarkovModel)
class TestProductCategoryMarkovModelGenerator(unittest.TestCase):
pass
class TestPurchasingProfileGenerator(unittest.TestCase):
pass
|
|
f2b61a1ce6f73618e66709868fe41dc8f6e99bdb
|
calc_renpass_gis/scenario_reader/csv_parser.py
|
calc_renpass_gis/scenario_reader/csv_parser.py
|
from db import LinearTransformer, Source, Sink, Scenario, Storage, session
import pandas as pd
def asfloat(x):
try:
# do not convert boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
# path to scenario
path = '~/oemof/examples/solph/nodes_from_csv/scenarios/'
file_nodes_flows = 'nep_2014.csv'
file_nodes_flows_sequences = 'nep_2014_seq.csv'
delimiter = ','
# read in csv files
nodes_flows = pd.read_csv(path + file_nodes_flows, sep=delimiter)
nodes_flows_seq = pd.read_csv(path + file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# create new scenario
scenario_name = 'nep_2014_test'
sc = Scenario(name=scenario_name)
session.add(sc)
session.commit()
# dictionary with all mapped tables
dc = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = dc[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
to_ignore = ['id', 'flow_direction']
if col not in to_ignore and col in series:
if col == 'scenario_id':
setattr(obj, col, sc.id)
continue
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
print('Comment or empty row at index {li}.'.format(li=empty))
session.commit()
|
Add script file reading csv sc files writing to db
|
Add script file reading csv sc files writing to db
|
Python
|
agpl-3.0
|
openego/data_processing
|
Add script file reading csv sc files writing to db
|
from db import LinearTransformer, Source, Sink, Scenario, Storage, session
import pandas as pd
def asfloat(x):
try:
# do not convert boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
# path to scenario
path = '~/oemof/examples/solph/nodes_from_csv/scenarios/'
file_nodes_flows = 'nep_2014.csv'
file_nodes_flows_sequences = 'nep_2014_seq.csv'
delimiter = ','
# read in csv files
nodes_flows = pd.read_csv(path + file_nodes_flows, sep=delimiter)
nodes_flows_seq = pd.read_csv(path + file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# create new scenario
scenario_name = 'nep_2014_test'
sc = Scenario(name=scenario_name)
session.add(sc)
session.commit()
# dictionary with all mapped tables
dc = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = dc[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
to_ignore = ['id', 'flow_direction']
if col not in to_ignore and col in series:
if col == 'scenario_id':
setattr(obj, col, sc.id)
continue
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
print('Comment or empty row at index {li}.'.format(li=empty))
session.commit()
|
<commit_before><commit_msg>Add script file reading csv sc files writing to db<commit_after>
|
from db import LinearTransformer, Source, Sink, Scenario, Storage, session
import pandas as pd
def asfloat(x):
try:
# do not convert boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
# path to scenario
path = '~/oemof/examples/solph/nodes_from_csv/scenarios/'
file_nodes_flows = 'nep_2014.csv'
file_nodes_flows_sequences = 'nep_2014_seq.csv'
delimiter = ','
# read in csv files
nodes_flows = pd.read_csv(path + file_nodes_flows, sep=delimiter)
nodes_flows_seq = pd.read_csv(path + file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# create new scenario
scenario_name = 'nep_2014_test'
sc = Scenario(name=scenario_name)
session.add(sc)
session.commit()
# dictionary with all mapped tables
dc = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = dc[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
to_ignore = ['id', 'flow_direction']
if col not in to_ignore and col in series:
if col == 'scenario_id':
setattr(obj, col, sc.id)
continue
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
print('Comment or empty row at index {li}.'.format(li=empty))
session.commit()
|
Add script file reading csv sc files writing to dbfrom db import LinearTransformer, Source, Sink, Scenario, Storage, session
import pandas as pd
def asfloat(x):
try:
# do not convert boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
# path to scenario
path = '~/oemof/examples/solph/nodes_from_csv/scenarios/'
file_nodes_flows = 'nep_2014.csv'
file_nodes_flows_sequences = 'nep_2014_seq.csv'
delimiter = ','
# read in csv files
nodes_flows = pd.read_csv(path + file_nodes_flows, sep=delimiter)
nodes_flows_seq = pd.read_csv(path + file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# create new scenario
scenario_name = 'nep_2014_test'
sc = Scenario(name=scenario_name)
session.add(sc)
session.commit()
# dictionary with all mapped tables
dc = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = dc[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
to_ignore = ['id', 'flow_direction']
if col not in to_ignore and col in series:
if col == 'scenario_id':
setattr(obj, col, sc.id)
continue
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
print('Comment or empty row at index {li}.'.format(li=empty))
session.commit()
|
<commit_before><commit_msg>Add script file reading csv sc files writing to db<commit_after>from db import LinearTransformer, Source, Sink, Scenario, Storage, session
import pandas as pd
def asfloat(x):
try:
# do not convert boolean
return float(x) if not isinstance(x, bool) else x
except ValueError:
return x
# path to scenario
path = '~/oemof/examples/solph/nodes_from_csv/scenarios/'
file_nodes_flows = 'nep_2014.csv'
file_nodes_flows_sequences = 'nep_2014_seq.csv'
delimiter = ','
# read in csv files
nodes_flows = pd.read_csv(path + file_nodes_flows, sep=delimiter)
nodes_flows_seq = pd.read_csv(path + file_nodes_flows_sequences, sep=delimiter,
header=None)
nodes_flows_seq.dropna(axis=0, how='all', inplace=True)
nodes_flows_seq.drop(0, axis=1, inplace=True)
nodes_flows_seq = nodes_flows_seq.transpose()
nodes_flows_seq.set_index([0, 1, 2, 3, 4], inplace=True)
nodes_flows_seq.columns = range(0, len(nodes_flows_seq.columns))
nodes_flows_seq = nodes_flows_seq.astype(float)
# create new scenario
scenario_name = 'nep_2014_test'
sc = Scenario(name=scenario_name)
session.add(sc)
session.commit()
# dictionary with all mapped tables
dc = {'Source': Source, 'LinearTransformer': LinearTransformer,
'Sink': Sink, 'Storage': Storage}
empty = []
for idx, series in nodes_flows.iterrows():
# filter for empty rows
series = series.dropna()
try:
obj = dc[series['class']]()
except KeyError:
empty.append(idx)
continue
# map table fields to keys of row series
for col in obj.__table__.columns.keys():
to_ignore = ['id', 'flow_direction']
if col not in to_ignore and col in series:
if col == 'scenario_id':
setattr(obj, col, sc.id)
continue
prop = asfloat(series[col])
if prop == 'seq':
seq = nodes_flows_seq.loc[series['class'],
series['label'],
series['source'],
series['target'],
col]
setattr(obj, col, list(seq))
elif isinstance(prop, float):
setattr(obj, col, [prop])
else:
setattr(obj, col, prop)
session.add(obj)
print('Comment or empty row at index {li}.'.format(li=empty))
session.commit()
|
|
ce131fc380d8717ade6058e5dd8e65b3ce4e0a7d
|
bin/gftools-drop-hints.py
|
bin/gftools-drop-hints.py
|
#!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2021 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import sys
from fontTools.ttLib import TTFont
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="font file to process")
parser.add_argument("output", help="file to save font", nargs="?")
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main():
"""Drop the hints from the first file specified and save as second."""
args = parser.parse_args()
font = ttLib.TTFont(args.input)
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
if not args.output:
args.output = args.input
font.save(args.output)
if __name__ == "__main__":
main()
|
Add drop hints script from Noto
|
Add drop hints script from Noto
|
Python
|
apache-2.0
|
googlefonts/gftools,googlefonts/gftools
|
Add drop hints script from Noto
|
#!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2021 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import sys
from fontTools.ttLib import TTFont
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="font file to process")
parser.add_argument("output", help="file to save font", nargs="?")
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main():
"""Drop the hints from the first file specified and save as second."""
args = parser.parse_args()
font = ttLib.TTFont(args.input)
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
if not args.output:
args.output = args.input
font.save(args.output)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add drop hints script from Noto<commit_after>
|
#!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2021 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import sys
from fontTools.ttLib import TTFont
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="font file to process")
parser.add_argument("output", help="file to save font", nargs="?")
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main():
"""Drop the hints from the first file specified and save as second."""
args = parser.parse_args()
font = ttLib.TTFont(args.input)
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
if not args.output:
args.output = args.input
font.save(args.output)
if __name__ == "__main__":
main()
|
Add drop hints script from Noto#!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2021 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import sys
from fontTools.ttLib import TTFont
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="font file to process")
parser.add_argument("output", help="file to save font", nargs="?")
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main():
"""Drop the hints from the first file specified and save as second."""
args = parser.parse_args()
font = ttLib.TTFont(args.input)
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
if not args.output:
args.output = args.input
font.save(args.output)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add drop hints script from Noto<commit_after>#!/usr/bin/env python3
#
# Copyright 2014 Google Inc. All rights reserved.
# Copyright 2021 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Drop hints from a font."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import sys
from fontTools.ttLib import TTFont
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input", help="font file to process")
parser.add_argument("output", help="file to save font", nargs="?")
def drop_hints_from_glyphs(font):
"""Drops the hints from a font's glyphs."""
glyf_table = font["glyf"]
for glyph_index in range(len(glyf_table.glyphOrder)):
glyph_name = glyf_table.glyphOrder[glyph_index]
glyph = glyf_table[glyph_name]
if glyph.numberOfContours > 0:
if glyph.program.bytecode:
glyph.program.bytecode = array.array("B")
def drop_tables(font, tables):
"""Drops the listed tables from a font."""
for table in tables:
if table in font:
del font[table]
def main():
"""Drop the hints from the first file specified and save as second."""
args = parser.parse_args()
font = ttLib.TTFont(args.input)
drop_hints_from_glyphs(font)
drop_tables(font, ["cvt ", "fpgm", "hdmx", "LTSH", "prep", "VDMX"])
if not args.output:
args.output = args.input
font.save(args.output)
if __name__ == "__main__":
main()
|
|
c6739998b3ffe9cc76eee1788fe1d5219daf2b5f
|
superdesk/macros/take_key_validator.py
|
superdesk/macros/take_key_validator.py
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'frontend'
action_type = 'direct'
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'backend'
action_type = 'direct'
|
Remove take key validator macro
|
[SD-4784] Remove take key validator macro
|
Python
|
agpl-3.0
|
nistormihai/superdesk-core,superdesk/superdesk-core,ancafarcas/superdesk-core,superdesk/superdesk-core,mugurrus/superdesk-core,hlmnrmr/superdesk-core,hlmnrmr/superdesk-core,mugurrus/superdesk-core,petrjasek/superdesk-core,petrjasek/superdesk-core,marwoodandrew/superdesk-core,nistormihai/superdesk-core,ancafarcas/superdesk-core,ioanpocol/superdesk-core,marwoodandrew/superdesk-core,mdhaman/superdesk-core,mugurrus/superdesk-core,petrjasek/superdesk-core,superdesk/superdesk-core,mdhaman/superdesk-core,ioanpocol/superdesk-core,mdhaman/superdesk-core,superdesk/superdesk-core,ioanpocol/superdesk-core,petrjasek/superdesk-core
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'frontend'
action_type = 'direct'
[SD-4784] Remove take key validator macro
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'backend'
action_type = 'direct'
|
<commit_before># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'frontend'
action_type = 'direct'
<commit_msg>[SD-4784] Remove take key validator macro<commit_after>
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'backend'
action_type = 'direct'
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'frontend'
action_type = 'direct'
[SD-4784] Remove take key validator macro# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'backend'
action_type = 'direct'
|
<commit_before># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'frontend'
action_type = 'direct'
<commit_msg>[SD-4784] Remove take key validator macro<commit_after># -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
def validate(item, **kwargs):
"""Checks if item has take_key value"""
# validation
if not item.get('anpa_take_key', '').strip():
raise KeyError('Take key cannot be empty!')
return item
name = 'take_key_validator'
label = 'validate take key'
callback = validate
access_type = 'backend'
action_type = 'direct'
|
25692e7b89c6c3e79f608f5bfca98d6b58825874
|
TWLight/users/migrations/0062_delete_hanging_userless_bundle_auths.py
|
TWLight/users/migrations/0062_delete_hanging_userless_bundle_auths.py
|
from django.db import migrations
def delete_hanging_bundle_auths(apps, schema_editor):
Authorization = apps.get_model("users", "Authorization")
Authorization.objects.filter(
user=None,
partners__authorization_method=3, # using the actual number of Partner.BUNDLE
).distinct() # distinct() required because partners__authorization_method is ManyToMany
class Migration(migrations.Migration):
dependencies = [("users", "0061_make_staff_superusers_wp_eligible")]
operations = [migrations.RunPython(delete_hanging_bundle_auths)]
|
Add migration to delete hanging bundle auths
|
Add migration to delete hanging bundle auths
|
Python
|
mit
|
WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight,WikipediaLibrary/TWLight
|
Add migration to delete hanging bundle auths
|
from django.db import migrations
def delete_hanging_bundle_auths(apps, schema_editor):
Authorization = apps.get_model("users", "Authorization")
Authorization.objects.filter(
user=None,
partners__authorization_method=3, # using the actual number of Partner.BUNDLE
).distinct() # distinct() required because partners__authorization_method is ManyToMany
class Migration(migrations.Migration):
dependencies = [("users", "0061_make_staff_superusers_wp_eligible")]
operations = [migrations.RunPython(delete_hanging_bundle_auths)]
|
<commit_before><commit_msg>Add migration to delete hanging bundle auths<commit_after>
|
from django.db import migrations
def delete_hanging_bundle_auths(apps, schema_editor):
Authorization = apps.get_model("users", "Authorization")
Authorization.objects.filter(
user=None,
partners__authorization_method=3, # using the actual number of Partner.BUNDLE
).distinct() # distinct() required because partners__authorization_method is ManyToMany
class Migration(migrations.Migration):
dependencies = [("users", "0061_make_staff_superusers_wp_eligible")]
operations = [migrations.RunPython(delete_hanging_bundle_auths)]
|
Add migration to delete hanging bundle authsfrom django.db import migrations
def delete_hanging_bundle_auths(apps, schema_editor):
Authorization = apps.get_model("users", "Authorization")
Authorization.objects.filter(
user=None,
partners__authorization_method=3, # using the actual number of Partner.BUNDLE
).distinct() # distinct() required because partners__authorization_method is ManyToMany
class Migration(migrations.Migration):
dependencies = [("users", "0061_make_staff_superusers_wp_eligible")]
operations = [migrations.RunPython(delete_hanging_bundle_auths)]
|
<commit_before><commit_msg>Add migration to delete hanging bundle auths<commit_after>from django.db import migrations
def delete_hanging_bundle_auths(apps, schema_editor):
Authorization = apps.get_model("users", "Authorization")
Authorization.objects.filter(
user=None,
partners__authorization_method=3, # using the actual number of Partner.BUNDLE
).distinct() # distinct() required because partners__authorization_method is ManyToMany
class Migration(migrations.Migration):
dependencies = [("users", "0061_make_staff_superusers_wp_eligible")]
operations = [migrations.RunPython(delete_hanging_bundle_auths)]
|
|
cb5ec479fbe4bc70b2501d1ab885c9d0a7d26567
|
betty/cropper/management/commands/make_disk_storage_paths_absolute.py
|
betty/cropper/management/commands/make_disk_storage_paths_absolute.py
|
from optparse import make_option
import os.path
from betty.conf.app import settings
from django.core.management.base import BaseCommand
from betty.cropper.models import Image
class Command(BaseCommand):
help = 'Convert disk storage relative paths to absolute'
# This needs to run on Django 1.7
option_list = BaseCommand.option_list + (
make_option('--check',
action='store_true',
dest='check',
default=False,
help='Dry-run (read-only) check mode'),
make_option('--limit',
type=int,
dest='limit',
help='Maximum number of images to process'),
)
def handle(self, *args, **options):
for idx, image in enumerate(Image.objects.iterator()):
if options['limit'] and idx > options['limit']:
self.stdout.write('Early exit (limit %s reached)'.format(options['limit']))
for field in [image.source,
image.optimized]:
if field.name:
if not field.name.startswith(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, field.name)
self.stdout.write(u'{}{} --> {}'.format(
'[CHECK] ' if options['check'] else '',
field.name,
path))
# Sanity checks
assert os.path.exists(path)
assert path.startswith(settings.MEDIA_ROOT)
assert '//' not in path, "Guard against weird path joins"
if not options['check']:
field.name = path
image.save()
else:
self.stdout.write('SKIP: %s', field.name)
|
Add script to migrate relative disk paths to absolute
|
Add script to migrate relative disk paths to absolute
Previously disk paths were relative to BETTY_IMAGE_ROOT/MEDIA_ROOT, now they absolute.
|
Python
|
mit
|
theonion/betty-cropper,theonion/betty-cropper,theonion/betty-cropper,theonion/betty-cropper
|
Add script to migrate relative disk paths to absolute
Previously disk paths were relative to BETTY_IMAGE_ROOT/MEDIA_ROOT, now they absolute.
|
from optparse import make_option
import os.path
from betty.conf.app import settings
from django.core.management.base import BaseCommand
from betty.cropper.models import Image
class Command(BaseCommand):
help = 'Convert disk storage relative paths to absolute'
# This needs to run on Django 1.7
option_list = BaseCommand.option_list + (
make_option('--check',
action='store_true',
dest='check',
default=False,
help='Dry-run (read-only) check mode'),
make_option('--limit',
type=int,
dest='limit',
help='Maximum number of images to process'),
)
def handle(self, *args, **options):
for idx, image in enumerate(Image.objects.iterator()):
if options['limit'] and idx > options['limit']:
self.stdout.write('Early exit (limit %s reached)'.format(options['limit']))
for field in [image.source,
image.optimized]:
if field.name:
if not field.name.startswith(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, field.name)
self.stdout.write(u'{}{} --> {}'.format(
'[CHECK] ' if options['check'] else '',
field.name,
path))
# Sanity checks
assert os.path.exists(path)
assert path.startswith(settings.MEDIA_ROOT)
assert '//' not in path, "Guard against weird path joins"
if not options['check']:
field.name = path
image.save()
else:
self.stdout.write('SKIP: %s', field.name)
|
<commit_before><commit_msg>Add script to migrate relative disk paths to absolute
Previously disk paths were relative to BETTY_IMAGE_ROOT/MEDIA_ROOT, now they absolute.<commit_after>
|
from optparse import make_option
import os.path
from betty.conf.app import settings
from django.core.management.base import BaseCommand
from betty.cropper.models import Image
class Command(BaseCommand):
help = 'Convert disk storage relative paths to absolute'
# This needs to run on Django 1.7
option_list = BaseCommand.option_list + (
make_option('--check',
action='store_true',
dest='check',
default=False,
help='Dry-run (read-only) check mode'),
make_option('--limit',
type=int,
dest='limit',
help='Maximum number of images to process'),
)
def handle(self, *args, **options):
for idx, image in enumerate(Image.objects.iterator()):
if options['limit'] and idx > options['limit']:
self.stdout.write('Early exit (limit %s reached)'.format(options['limit']))
for field in [image.source,
image.optimized]:
if field.name:
if not field.name.startswith(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, field.name)
self.stdout.write(u'{}{} --> {}'.format(
'[CHECK] ' if options['check'] else '',
field.name,
path))
# Sanity checks
assert os.path.exists(path)
assert path.startswith(settings.MEDIA_ROOT)
assert '//' not in path, "Guard against weird path joins"
if not options['check']:
field.name = path
image.save()
else:
self.stdout.write('SKIP: %s', field.name)
|
Add script to migrate relative disk paths to absolute
Previously disk paths were relative to BETTY_IMAGE_ROOT/MEDIA_ROOT, now they absolute.from optparse import make_option
import os.path
from betty.conf.app import settings
from django.core.management.base import BaseCommand
from betty.cropper.models import Image
class Command(BaseCommand):
help = 'Convert disk storage relative paths to absolute'
# This needs to run on Django 1.7
option_list = BaseCommand.option_list + (
make_option('--check',
action='store_true',
dest='check',
default=False,
help='Dry-run (read-only) check mode'),
make_option('--limit',
type=int,
dest='limit',
help='Maximum number of images to process'),
)
def handle(self, *args, **options):
for idx, image in enumerate(Image.objects.iterator()):
if options['limit'] and idx > options['limit']:
self.stdout.write('Early exit (limit %s reached)'.format(options['limit']))
for field in [image.source,
image.optimized]:
if field.name:
if not field.name.startswith(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, field.name)
self.stdout.write(u'{}{} --> {}'.format(
'[CHECK] ' if options['check'] else '',
field.name,
path))
# Sanity checks
assert os.path.exists(path)
assert path.startswith(settings.MEDIA_ROOT)
assert '//' not in path, "Guard against weird path joins"
if not options['check']:
field.name = path
image.save()
else:
self.stdout.write('SKIP: %s', field.name)
|
<commit_before><commit_msg>Add script to migrate relative disk paths to absolute
Previously disk paths were relative to BETTY_IMAGE_ROOT/MEDIA_ROOT, now they absolute.<commit_after>from optparse import make_option
import os.path
from betty.conf.app import settings
from django.core.management.base import BaseCommand
from betty.cropper.models import Image
class Command(BaseCommand):
help = 'Convert disk storage relative paths to absolute'
# This needs to run on Django 1.7
option_list = BaseCommand.option_list + (
make_option('--check',
action='store_true',
dest='check',
default=False,
help='Dry-run (read-only) check mode'),
make_option('--limit',
type=int,
dest='limit',
help='Maximum number of images to process'),
)
def handle(self, *args, **options):
for idx, image in enumerate(Image.objects.iterator()):
if options['limit'] and idx > options['limit']:
self.stdout.write('Early exit (limit %s reached)'.format(options['limit']))
for field in [image.source,
image.optimized]:
if field.name:
if not field.name.startswith(settings.MEDIA_ROOT):
path = os.path.join(settings.MEDIA_ROOT, field.name)
self.stdout.write(u'{}{} --> {}'.format(
'[CHECK] ' if options['check'] else '',
field.name,
path))
# Sanity checks
assert os.path.exists(path)
assert path.startswith(settings.MEDIA_ROOT)
assert '//' not in path, "Guard against weird path joins"
if not options['check']:
field.name = path
image.save()
else:
self.stdout.write('SKIP: %s', field.name)
|
|
0eb0fbb222d4e03257ffeab7c24a5fe9fcee14f1
|
week4/tweepy_rest_api_models.py
|
week4/tweepy_rest_api_models.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Robert Dempsey on 02/02/2016.
Example script showing how to use the Tweepy Python library to interact
with the Twitter REST API, using available models
Tweepy docs: http://tweepy.readthedocs.org/en/v3.5.0/
Twitter API docs: https://dev.twitter.com/rest/public
"""
import configparser
import tweepy
def main():
"""Look up a user and get the top 20 friends and their statuses."""
# Read the config file and get the goodies
config = configparser.ConfigParser()
config.read('../config/config.ini')
# Set all of the variables we need for Twitter
consumer_key = config['Twitter']['consumer_key']
consumer_secret = config['Twitter']['consumer_secret']
access_token = config['Twitter']['access_token']
access_token_secret = config['Twitter']['access_token_secret']
# Authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create an API object to use
api = tweepy.API(auth)
# Get the latest tweets
user = api.get_user('DistrictDataLab')
# Print out various information about the user
print("Screen name: {}".format(user.screen_name))
print("Follower count: {}\n".format(user.followers_count))
print("User Friend List (20)\n")
for friend in user.friends():
print("Screen name: {}".format(friend.screen_name))
print("Follower count: {}".format(friend.followers_count))
print("Latest status: {}\n".format(friend.status.text))
if __name__ == '__main__':
main()
|
Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
|
Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
Signed-off-by: Robert Dempsey <715b5a941e732be1613fdd9d94dfd8e50c02b187@gmail.com>
|
Python
|
mit
|
rdempsey/web-scraping-data-mining-course,rdempsey/web-scraping-data-mining-course,rdempsey/web-scraping-data-mining-course
|
Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
Signed-off-by: Robert Dempsey <715b5a941e732be1613fdd9d94dfd8e50c02b187@gmail.com>
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Robert Dempsey on 02/02/2016.
Example script showing how to use the Tweepy Python library to interact
with the Twitter REST API, using available models
Tweepy docs: http://tweepy.readthedocs.org/en/v3.5.0/
Twitter API docs: https://dev.twitter.com/rest/public
"""
import configparser
import tweepy
def main():
"""Look up a user and get the top 20 friends and their statuses."""
# Read the config file and get the goodies
config = configparser.ConfigParser()
config.read('../config/config.ini')
# Set all of the variables we need for Twitter
consumer_key = config['Twitter']['consumer_key']
consumer_secret = config['Twitter']['consumer_secret']
access_token = config['Twitter']['access_token']
access_token_secret = config['Twitter']['access_token_secret']
# Authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create an API object to use
api = tweepy.API(auth)
# Get the latest tweets
user = api.get_user('DistrictDataLab')
# Print out various information about the user
print("Screen name: {}".format(user.screen_name))
print("Follower count: {}\n".format(user.followers_count))
print("User Friend List (20)\n")
for friend in user.friends():
print("Screen name: {}".format(friend.screen_name))
print("Follower count: {}".format(friend.followers_count))
print("Latest status: {}\n".format(friend.status.text))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
Signed-off-by: Robert Dempsey <715b5a941e732be1613fdd9d94dfd8e50c02b187@gmail.com><commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Robert Dempsey on 02/02/2016.
Example script showing how to use the Tweepy Python library to interact
with the Twitter REST API, using available models
Tweepy docs: http://tweepy.readthedocs.org/en/v3.5.0/
Twitter API docs: https://dev.twitter.com/rest/public
"""
import configparser
import tweepy
def main():
"""Look up a user and get the top 20 friends and their statuses."""
# Read the config file and get the goodies
config = configparser.ConfigParser()
config.read('../config/config.ini')
# Set all of the variables we need for Twitter
consumer_key = config['Twitter']['consumer_key']
consumer_secret = config['Twitter']['consumer_secret']
access_token = config['Twitter']['access_token']
access_token_secret = config['Twitter']['access_token_secret']
# Authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create an API object to use
api = tweepy.API(auth)
# Get the latest tweets
user = api.get_user('DistrictDataLab')
# Print out various information about the user
print("Screen name: {}".format(user.screen_name))
print("Follower count: {}\n".format(user.followers_count))
print("User Friend List (20)\n")
for friend in user.friends():
print("Screen name: {}".format(friend.screen_name))
print("Follower count: {}".format(friend.followers_count))
print("Latest status: {}\n".format(friend.status.text))
if __name__ == '__main__':
main()
|
Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
Signed-off-by: Robert Dempsey <715b5a941e732be1613fdd9d94dfd8e50c02b187@gmail.com>#!/usr/bin/env python
# encoding: utf-8
"""
Created by Robert Dempsey on 02/02/2016.
Example script showing how to use the Tweepy Python library to interact
with the Twitter REST API, using available models
Tweepy docs: http://tweepy.readthedocs.org/en/v3.5.0/
Twitter API docs: https://dev.twitter.com/rest/public
"""
import configparser
import tweepy
def main():
"""Look up a user and get the top 20 friends and their statuses."""
# Read the config file and get the goodies
config = configparser.ConfigParser()
config.read('../config/config.ini')
# Set all of the variables we need for Twitter
consumer_key = config['Twitter']['consumer_key']
consumer_secret = config['Twitter']['consumer_secret']
access_token = config['Twitter']['access_token']
access_token_secret = config['Twitter']['access_token_secret']
# Authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create an API object to use
api = tweepy.API(auth)
# Get the latest tweets
user = api.get_user('DistrictDataLab')
# Print out various information about the user
print("Screen name: {}".format(user.screen_name))
print("Follower count: {}\n".format(user.followers_count))
print("User Friend List (20)\n")
for friend in user.friends():
print("Screen name: {}".format(friend.screen_name))
print("Follower count: {}".format(friend.followers_count))
print("Latest status: {}\n".format(friend.status.text))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an example script for using the REST API to get the 20 of a users' friends and their latest status.
Signed-off-by: Robert Dempsey <715b5a941e732be1613fdd9d94dfd8e50c02b187@gmail.com><commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
Created by Robert Dempsey on 02/02/2016.
Example script showing how to use the Tweepy Python library to interact
with the Twitter REST API, using available models
Tweepy docs: http://tweepy.readthedocs.org/en/v3.5.0/
Twitter API docs: https://dev.twitter.com/rest/public
"""
import configparser
import tweepy
def main():
"""Look up a user and get the top 20 friends and their statuses."""
# Read the config file and get the goodies
config = configparser.ConfigParser()
config.read('../config/config.ini')
# Set all of the variables we need for Twitter
consumer_key = config['Twitter']['consumer_key']
consumer_secret = config['Twitter']['consumer_secret']
access_token = config['Twitter']['access_token']
access_token_secret = config['Twitter']['access_token_secret']
# Authenticate with Twitter
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Create an API object to use
api = tweepy.API(auth)
# Get the latest tweets
user = api.get_user('DistrictDataLab')
# Print out various information about the user
print("Screen name: {}".format(user.screen_name))
print("Follower count: {}\n".format(user.followers_count))
print("User Friend List (20)\n")
for friend in user.friends():
print("Screen name: {}".format(friend.screen_name))
print("Follower count: {}".format(friend.followers_count))
print("Latest status: {}\n".format(friend.status.text))
if __name__ == '__main__':
main()
|
|
84e646f9bf94e50e0fcb344fa18beaaaafe4c1a2
|
scripts/de-lit.py
|
scripts/de-lit.py
|
#!/usr/bin/env python -w
import itertools, re
def print_docs(docs_, outfp):
'''print out docs, skipping the hashes for leading/trailing blanks'''
blank = re.compile('^\s*$')
def blanks(lst):
return itertools.takewhile(lambda x: blank.match(x), lst)
def drop_blanks(lst):
return itertools.dropwhile(lambda x: blank.match(x), lst)
def revl(lst):
return reversed(list(lst))
docs = list(docs_)
leading = sum(1 for _ in blanks(docs))
trailing = sum(1 for _ in blanks(reversed(docs)))
remaining = revl(drop_blanks(revl(drop_blanks(docs))))
for x in range(leading): outfp.write('\n')
for x in remaining: outfp.write('# %s' % x)
for x in range(trailing): outfp.write('\n')
def convert_litcoffee_to_coffee(infp, outfp):
docs, in_doc = [], True
coffee = re.compile('^\s{4}(.+)$')
for line in infp:
m = coffee.match(line)
if m:
if in_doc:
print_docs(docs, outfp)
docs = []
outfp.write('%s\n' % m.group(1))
in_doc = False
else:
docs.append(line)
in_doc = True
print_docs(docs, outfp)
if '__main__' == __name__:
import sys
convert_litcoffee_to_coffee(sys.stdin, sys.stdout)
|
Convert literate coffeescript in to regular coffeescript
|
Convert literate coffeescript in to regular coffeescript
|
Python
|
unlicense
|
cporter/Random
|
Convert literate coffeescript in to regular coffeescript
|
#!/usr/bin/env python -w
import itertools, re
def print_docs(docs_, outfp):
'''print out docs, skipping the hashes for leading/trailing blanks'''
blank = re.compile('^\s*$')
def blanks(lst):
return itertools.takewhile(lambda x: blank.match(x), lst)
def drop_blanks(lst):
return itertools.dropwhile(lambda x: blank.match(x), lst)
def revl(lst):
return reversed(list(lst))
docs = list(docs_)
leading = sum(1 for _ in blanks(docs))
trailing = sum(1 for _ in blanks(reversed(docs)))
remaining = revl(drop_blanks(revl(drop_blanks(docs))))
for x in range(leading): outfp.write('\n')
for x in remaining: outfp.write('# %s' % x)
for x in range(trailing): outfp.write('\n')
def convert_litcoffee_to_coffee(infp, outfp):
docs, in_doc = [], True
coffee = re.compile('^\s{4}(.+)$')
for line in infp:
m = coffee.match(line)
if m:
if in_doc:
print_docs(docs, outfp)
docs = []
outfp.write('%s\n' % m.group(1))
in_doc = False
else:
docs.append(line)
in_doc = True
print_docs(docs, outfp)
if '__main__' == __name__:
import sys
convert_litcoffee_to_coffee(sys.stdin, sys.stdout)
|
<commit_before><commit_msg>Convert literate coffeescript in to regular coffeescript<commit_after>
|
#!/usr/bin/env python -w
import itertools, re
def print_docs(docs_, outfp):
'''print out docs, skipping the hashes for leading/trailing blanks'''
blank = re.compile('^\s*$')
def blanks(lst):
return itertools.takewhile(lambda x: blank.match(x), lst)
def drop_blanks(lst):
return itertools.dropwhile(lambda x: blank.match(x), lst)
def revl(lst):
return reversed(list(lst))
docs = list(docs_)
leading = sum(1 for _ in blanks(docs))
trailing = sum(1 for _ in blanks(reversed(docs)))
remaining = revl(drop_blanks(revl(drop_blanks(docs))))
for x in range(leading): outfp.write('\n')
for x in remaining: outfp.write('# %s' % x)
for x in range(trailing): outfp.write('\n')
def convert_litcoffee_to_coffee(infp, outfp):
docs, in_doc = [], True
coffee = re.compile('^\s{4}(.+)$')
for line in infp:
m = coffee.match(line)
if m:
if in_doc:
print_docs(docs, outfp)
docs = []
outfp.write('%s\n' % m.group(1))
in_doc = False
else:
docs.append(line)
in_doc = True
print_docs(docs, outfp)
if '__main__' == __name__:
import sys
convert_litcoffee_to_coffee(sys.stdin, sys.stdout)
|
Convert literate coffeescript in to regular coffeescript#!/usr/bin/env python -w
import itertools, re
def print_docs(docs_, outfp):
'''print out docs, skipping the hashes for leading/trailing blanks'''
blank = re.compile('^\s*$')
def blanks(lst):
return itertools.takewhile(lambda x: blank.match(x), lst)
def drop_blanks(lst):
return itertools.dropwhile(lambda x: blank.match(x), lst)
def revl(lst):
return reversed(list(lst))
docs = list(docs_)
leading = sum(1 for _ in blanks(docs))
trailing = sum(1 for _ in blanks(reversed(docs)))
remaining = revl(drop_blanks(revl(drop_blanks(docs))))
for x in range(leading): outfp.write('\n')
for x in remaining: outfp.write('# %s' % x)
for x in range(trailing): outfp.write('\n')
def convert_litcoffee_to_coffee(infp, outfp):
docs, in_doc = [], True
coffee = re.compile('^\s{4}(.+)$')
for line in infp:
m = coffee.match(line)
if m:
if in_doc:
print_docs(docs, outfp)
docs = []
outfp.write('%s\n' % m.group(1))
in_doc = False
else:
docs.append(line)
in_doc = True
print_docs(docs, outfp)
if '__main__' == __name__:
import sys
convert_litcoffee_to_coffee(sys.stdin, sys.stdout)
|
<commit_before><commit_msg>Convert literate coffeescript in to regular coffeescript<commit_after>#!/usr/bin/env python -w
import itertools, re
def print_docs(docs_, outfp):
'''print out docs, skipping the hashes for leading/trailing blanks'''
blank = re.compile('^\s*$')
def blanks(lst):
return itertools.takewhile(lambda x: blank.match(x), lst)
def drop_blanks(lst):
return itertools.dropwhile(lambda x: blank.match(x), lst)
def revl(lst):
return reversed(list(lst))
docs = list(docs_)
leading = sum(1 for _ in blanks(docs))
trailing = sum(1 for _ in blanks(reversed(docs)))
remaining = revl(drop_blanks(revl(drop_blanks(docs))))
for x in range(leading): outfp.write('\n')
for x in remaining: outfp.write('# %s' % x)
for x in range(trailing): outfp.write('\n')
def convert_litcoffee_to_coffee(infp, outfp):
docs, in_doc = [], True
coffee = re.compile('^\s{4}(.+)$')
for line in infp:
m = coffee.match(line)
if m:
if in_doc:
print_docs(docs, outfp)
docs = []
outfp.write('%s\n' % m.group(1))
in_doc = False
else:
docs.append(line)
in_doc = True
print_docs(docs, outfp)
if '__main__' == __name__:
import sys
convert_litcoffee_to_coffee(sys.stdin, sys.stdout)
|
|
7b08257332d227f039849553402483a7b0c97a41
|
test/packetdrop.py
|
test/packetdrop.py
|
import mpipe
import time
def echo(value):
time.sleep(0.125)
print(value)
return True
pipe2 = mpipe.Pipeline(mpipe.OrderedStage(echo))
class Filter(mpipe.OrderedWorker):
def __init__(self):
self.count = 0
def doTask(self, value):
if self.count == 0:
pipe2.put(value)
self.count += 1
elif self.count == 1:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe2.put(value)
self.count += 1
elif self.count == 2:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
pipe2.put(value)
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe1 = mpipe.Pipeline(mpipe.Stage(Filter))
for number in range(10000):
time.sleep(0.001)
pipe1.put(number)
pipe1.put(None)
pipe2.put(None)
for result in pipe1.results():
pass
|
Add timeout test (packet drop scenario.)
|
Add timeout test (packet drop scenario.)
|
Python
|
mit
|
vmlaker/mpipe
|
Add timeout test (packet drop scenario.)
|
import mpipe
import time
def echo(value):
time.sleep(0.125)
print(value)
return True
pipe2 = mpipe.Pipeline(mpipe.OrderedStage(echo))
class Filter(mpipe.OrderedWorker):
def __init__(self):
self.count = 0
def doTask(self, value):
if self.count == 0:
pipe2.put(value)
self.count += 1
elif self.count == 1:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe2.put(value)
self.count += 1
elif self.count == 2:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
pipe2.put(value)
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe1 = mpipe.Pipeline(mpipe.Stage(Filter))
for number in range(10000):
time.sleep(0.001)
pipe1.put(number)
pipe1.put(None)
pipe2.put(None)
for result in pipe1.results():
pass
|
<commit_before><commit_msg>Add timeout test (packet drop scenario.)<commit_after>
|
import mpipe
import time
def echo(value):
time.sleep(0.125)
print(value)
return True
pipe2 = mpipe.Pipeline(mpipe.OrderedStage(echo))
class Filter(mpipe.OrderedWorker):
def __init__(self):
self.count = 0
def doTask(self, value):
if self.count == 0:
pipe2.put(value)
self.count += 1
elif self.count == 1:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe2.put(value)
self.count += 1
elif self.count == 2:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
pipe2.put(value)
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe1 = mpipe.Pipeline(mpipe.Stage(Filter))
for number in range(10000):
time.sleep(0.001)
pipe1.put(number)
pipe1.put(None)
pipe2.put(None)
for result in pipe1.results():
pass
|
Add timeout test (packet drop scenario.)import mpipe
import time
def echo(value):
time.sleep(0.125)
print(value)
return True
pipe2 = mpipe.Pipeline(mpipe.OrderedStage(echo))
class Filter(mpipe.OrderedWorker):
def __init__(self):
self.count = 0
def doTask(self, value):
if self.count == 0:
pipe2.put(value)
self.count += 1
elif self.count == 1:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe2.put(value)
self.count += 1
elif self.count == 2:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
pipe2.put(value)
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe1 = mpipe.Pipeline(mpipe.Stage(Filter))
for number in range(10000):
time.sleep(0.001)
pipe1.put(number)
pipe1.put(None)
pipe2.put(None)
for result in pipe1.results():
pass
|
<commit_before><commit_msg>Add timeout test (packet drop scenario.)<commit_after>import mpipe
import time
def echo(value):
time.sleep(0.125)
print(value)
return True
pipe2 = mpipe.Pipeline(mpipe.OrderedStage(echo))
class Filter(mpipe.OrderedWorker):
def __init__(self):
self.count = 0
def doTask(self, value):
if self.count == 0:
pipe2.put(value)
self.count += 1
elif self.count == 1:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe2.put(value)
self.count += 1
elif self.count == 2:
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
pipe2.put(value)
valid, result = pipe2.get(0.00001)
if valid:
self.putResult(result)
self.count -= 1
pipe1 = mpipe.Pipeline(mpipe.Stage(Filter))
for number in range(10000):
time.sleep(0.001)
pipe1.put(number)
pipe1.put(None)
pipe2.put(None)
for result in pipe1.results():
pass
|
|
2163fd238f01681dc101b5f3e06556ff21473597
|
spacy/tests/gold/test_biluo.py
|
spacy/tests/gold/test_biluo.py
|
from __future__ import unicode_literals
from ...gold import biluo_tags_from_offsets
from ...vocab import Vocab
from ...tokens.doc import Doc
import pytest
@pytest.fixture
def vocab():
return Vocab()
def test_U(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('London', False),
('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to London"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'U-LOC', 'O']
def test_BL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'L-LOC', 'O']
def test_BIL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'I-LOC', 'L-LOC', 'O']
def test_misalign(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley.', False)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', '', '', '']
|
Add tests for entity->biluo transformation
|
Add tests for entity->biluo transformation
|
Python
|
mit
|
recognai/spaCy,spacy-io/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,raphael0202/spaCy,banglakit/spaCy,aikramer2/spaCy,banglakit/spaCy,aikramer2/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,banglakit/spaCy,explosion/spaCy,recognai/spaCy,Gregory-Howard/spaCy,banglakit/spaCy,explosion/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,explosion/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,recognai/spaCy,aikramer2/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,banglakit/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,honnibal/spaCy,banglakit/spaCy,recognai/spaCy
|
Add tests for entity->biluo transformation
|
from __future__ import unicode_literals
from ...gold import biluo_tags_from_offsets
from ...vocab import Vocab
from ...tokens.doc import Doc
import pytest
@pytest.fixture
def vocab():
return Vocab()
def test_U(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('London', False),
('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to London"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'U-LOC', 'O']
def test_BL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'L-LOC', 'O']
def test_BIL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'I-LOC', 'L-LOC', 'O']
def test_misalign(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley.', False)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', '', '', '']
|
<commit_before><commit_msg>Add tests for entity->biluo transformation<commit_after>
|
from __future__ import unicode_literals
from ...gold import biluo_tags_from_offsets
from ...vocab import Vocab
from ...tokens.doc import Doc
import pytest
@pytest.fixture
def vocab():
return Vocab()
def test_U(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('London', False),
('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to London"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'U-LOC', 'O']
def test_BL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'L-LOC', 'O']
def test_BIL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'I-LOC', 'L-LOC', 'O']
def test_misalign(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley.', False)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', '', '', '']
|
Add tests for entity->biluo transformationfrom __future__ import unicode_literals
from ...gold import biluo_tags_from_offsets
from ...vocab import Vocab
from ...tokens.doc import Doc
import pytest
@pytest.fixture
def vocab():
return Vocab()
def test_U(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('London', False),
('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to London"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'U-LOC', 'O']
def test_BL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'L-LOC', 'O']
def test_BIL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'I-LOC', 'L-LOC', 'O']
def test_misalign(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley.', False)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', '', '', '']
|
<commit_before><commit_msg>Add tests for entity->biluo transformation<commit_after>from __future__ import unicode_literals
from ...gold import biluo_tags_from_offsets
from ...vocab import Vocab
from ...tokens.doc import Doc
import pytest
@pytest.fixture
def vocab():
return Vocab()
def test_U(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('London', False),
('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to London"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'U-LOC', 'O']
def test_BL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'L-LOC', 'O']
def test_BIL(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley', False), ('.', True)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', 'B-LOC', 'I-LOC', 'L-LOC', 'O']
def test_misalign(vocab):
orths_and_spaces = [('I', True), ('flew', True), ('to', True), ('San', True),
('Francisco', True), ('Valley.', False)]
doc = Doc(vocab, orths_and_spaces=orths_and_spaces)
entities = [(len("I flew to "), len("I flew to San Francisco Valley"), 'LOC')]
tags = biluo_tags_from_offsets(doc, entities)
assert tags == ['O', 'O', 'O', '', '', '']
|
|
a292fff44aff6eb4f971aff3ccf3d3871ec68836
|
DataWrangling/CaseStudy/audit.py
|
DataWrangling/CaseStudy/audit.py
|
"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
import os
OSMFILE = "example3.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd.": "Road",
"Ave": "Avenue"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
# YOUR CODE HERE
for key, value in mapping.iteritems():
if name.find(key) != -1:
name = name.replace(key, value)
break
return name
def test():
os.chdir('./data')
st_types = audit(OSMFILE)
print (st_types)
assert len(st_types) == 3
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print (name, "=>", better_name)
if name == "West Lexington St.":
assert better_name == "West Lexington Street"
if name == "Baldwin Rd.":
assert better_name == "Baldwin Road"
if __name__ == '__main__':
test()
|
Add a script which contains a function that takes a string with street name as an argument and should return the fixed name
|
feat: Add a script which contains a function that takes a string with street name as an argument and should return the fixed name
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Add a script which contains a function that takes a string with street name as an argument and should return the fixed name
|
"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
import os
OSMFILE = "example3.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd.": "Road",
"Ave": "Avenue"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
# YOUR CODE HERE
for key, value in mapping.iteritems():
if name.find(key) != -1:
name = name.replace(key, value)
break
return name
def test():
os.chdir('./data')
st_types = audit(OSMFILE)
print (st_types)
assert len(st_types) == 3
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print (name, "=>", better_name)
if name == "West Lexington St.":
assert better_name == "West Lexington Street"
if name == "Baldwin Rd.":
assert better_name == "Baldwin Road"
if __name__ == '__main__':
test()
|
<commit_before><commit_msg>feat: Add a script which contains a function that takes a string with street name as an argument and should return the fixed name<commit_after>
|
"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
import os
OSMFILE = "example3.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd.": "Road",
"Ave": "Avenue"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
# YOUR CODE HERE
for key, value in mapping.iteritems():
if name.find(key) != -1:
name = name.replace(key, value)
break
return name
def test():
os.chdir('./data')
st_types = audit(OSMFILE)
print (st_types)
assert len(st_types) == 3
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print (name, "=>", better_name)
if name == "West Lexington St.":
assert better_name == "West Lexington Street"
if name == "Baldwin Rd.":
assert better_name == "Baldwin Road"
if __name__ == '__main__':
test()
|
feat: Add a script which contains a function that takes a string with street name as an argument and should return the fixed name"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
import os
OSMFILE = "example3.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd.": "Road",
"Ave": "Avenue"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
# YOUR CODE HERE
for key, value in mapping.iteritems():
if name.find(key) != -1:
name = name.replace(key, value)
break
return name
def test():
os.chdir('./data')
st_types = audit(OSMFILE)
print (st_types)
assert len(st_types) == 3
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print (name, "=>", better_name)
if name == "West Lexington St.":
assert better_name == "West Lexington Street"
if name == "Baldwin Rd.":
assert better_name == "Baldwin Road"
if __name__ == '__main__':
test()
|
<commit_before><commit_msg>feat: Add a script which contains a function that takes a string with street name as an argument and should return the fixed name<commit_after>"""
Your task in this exercise has two steps:
- audit the OSMFILE and change the variable 'mapping' to reflect the changes needed to fix
the unexpected street types to the appropriate ones in the expected list.
You have to add mappings only for the actual problems you find in this OSMFILE,
not a generalized solution, since that may and will depend on the particular area you are auditing.
- write the update_name function, to actually fix the street name.
The function takes a string with street name as an argument and should return the fixed name
We have provided a simple test so that you see what exactly is expected
"""
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
import os
OSMFILE = "example3.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
expected = ["Street", "Avenue", "Boulevard", "Drive", "Court", "Place", "Square", "Lane", "Road",
"Trail", "Parkway", "Commons"]
# UPDATE THIS VARIABLE
mapping = { "St": "Street",
"St.": "Street",
"Rd.": "Road",
"Ave": "Avenue"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
# YOUR CODE HERE
for key, value in mapping.iteritems():
if name.find(key) != -1:
name = name.replace(key, value)
break
return name
def test():
os.chdir('./data')
st_types = audit(OSMFILE)
print (st_types)
assert len(st_types) == 3
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
print (name, "=>", better_name)
if name == "West Lexington St.":
assert better_name == "West Lexington Street"
if name == "Baldwin Rd.":
assert better_name == "Baldwin Road"
if __name__ == '__main__':
test()
|
|
f28e4ac920454b34d6290959eba650702d08fd85
|
examples/one_to_ninety_nine.py
|
examples/one_to_ninety_nine.py
|
#
# one_to_ninety_nine.py
#
# Copyright 2021, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
#
# BNF:
# units ::= one | two | three | ... | nine
# teens ::= ten | eleven | twelve | ... | nineteen
# tens ::= twenty | thirty | ... | ninety
# one_to_99 ::= units | teens | (tens [["-"] units])
#
import pyparsing as pp
def define_numeric_word_range(
names: str,
from_: int,
to_: int,
step: int = 1) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
values = range(from_, to_ + 1, step)
return pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
units = define_numeric_word_range(
"one two three four five six seven eight nine", 1, 9
).set_name("units")
teens = define_numeric_word_range(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen", 10, 19
).set_name("teens")
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, step=10
).set_name("tens")
opt_dash = pp.Opt(pp.Suppress("-"))
twenty_to_99 = tens + pp.Opt(opt_dash + units)
one_to_99 = (units | teens | twenty_to_99).set_name("1-99")
one_to_99.add_parse_action(sum)
numeric_expression = one_to_99
if __name__ == "__main__":
numeric_expression.run_tests(
"""
one
seven
twelve
twenty six
forty-two
"""
)
# create railroad diagram
numeric_expression.create_diagram("one_to_99_diagram.html", vertical=5)
|
Add simplified 1-99 example, extracted from number_words.py
|
Add simplified 1-99 example, extracted from number_words.py
|
Python
|
mit
|
pyparsing/pyparsing,pyparsing/pyparsing
|
Add simplified 1-99 example, extracted from number_words.py
|
#
# one_to_ninety_nine.py
#
# Copyright 2021, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
#
# BNF:
# units ::= one | two | three | ... | nine
# teens ::= ten | eleven | twelve | ... | nineteen
# tens ::= twenty | thirty | ... | ninety
# one_to_99 ::= units | teens | (tens [["-"] units])
#
import pyparsing as pp
def define_numeric_word_range(
names: str,
from_: int,
to_: int,
step: int = 1) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
values = range(from_, to_ + 1, step)
return pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
units = define_numeric_word_range(
"one two three four five six seven eight nine", 1, 9
).set_name("units")
teens = define_numeric_word_range(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen", 10, 19
).set_name("teens")
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, step=10
).set_name("tens")
opt_dash = pp.Opt(pp.Suppress("-"))
twenty_to_99 = tens + pp.Opt(opt_dash + units)
one_to_99 = (units | teens | twenty_to_99).set_name("1-99")
one_to_99.add_parse_action(sum)
numeric_expression = one_to_99
if __name__ == "__main__":
numeric_expression.run_tests(
"""
one
seven
twelve
twenty six
forty-two
"""
)
# create railroad diagram
numeric_expression.create_diagram("one_to_99_diagram.html", vertical=5)
|
<commit_before><commit_msg>Add simplified 1-99 example, extracted from number_words.py<commit_after>
|
#
# one_to_ninety_nine.py
#
# Copyright 2021, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
#
# BNF:
# units ::= one | two | three | ... | nine
# teens ::= ten | eleven | twelve | ... | nineteen
# tens ::= twenty | thirty | ... | ninety
# one_to_99 ::= units | teens | (tens [["-"] units])
#
import pyparsing as pp
def define_numeric_word_range(
names: str,
from_: int,
to_: int,
step: int = 1) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
values = range(from_, to_ + 1, step)
return pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
units = define_numeric_word_range(
"one two three four five six seven eight nine", 1, 9
).set_name("units")
teens = define_numeric_word_range(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen", 10, 19
).set_name("teens")
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, step=10
).set_name("tens")
opt_dash = pp.Opt(pp.Suppress("-"))
twenty_to_99 = tens + pp.Opt(opt_dash + units)
one_to_99 = (units | teens | twenty_to_99).set_name("1-99")
one_to_99.add_parse_action(sum)
numeric_expression = one_to_99
if __name__ == "__main__":
numeric_expression.run_tests(
"""
one
seven
twelve
twenty six
forty-two
"""
)
# create railroad diagram
numeric_expression.create_diagram("one_to_99_diagram.html", vertical=5)
|
Add simplified 1-99 example, extracted from number_words.py#
# one_to_ninety_nine.py
#
# Copyright 2021, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
#
# BNF:
# units ::= one | two | three | ... | nine
# teens ::= ten | eleven | twelve | ... | nineteen
# tens ::= twenty | thirty | ... | ninety
# one_to_99 ::= units | teens | (tens [["-"] units])
#
import pyparsing as pp
def define_numeric_word_range(
names: str,
from_: int,
to_: int,
step: int = 1) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
values = range(from_, to_ + 1, step)
return pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
units = define_numeric_word_range(
"one two three four five six seven eight nine", 1, 9
).set_name("units")
teens = define_numeric_word_range(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen", 10, 19
).set_name("teens")
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, step=10
).set_name("tens")
opt_dash = pp.Opt(pp.Suppress("-"))
twenty_to_99 = tens + pp.Opt(opt_dash + units)
one_to_99 = (units | teens | twenty_to_99).set_name("1-99")
one_to_99.add_parse_action(sum)
numeric_expression = one_to_99
if __name__ == "__main__":
numeric_expression.run_tests(
"""
one
seven
twelve
twenty six
forty-two
"""
)
# create railroad diagram
numeric_expression.create_diagram("one_to_99_diagram.html", vertical=5)
|
<commit_before><commit_msg>Add simplified 1-99 example, extracted from number_words.py<commit_after>#
# one_to_ninety_nine.py
#
# Copyright 2021, Paul McGuire
#
# Parser/evaluator for expressions of numbers as written out in words:
# - one
# - seven
# - twelve
# - twenty six
# - forty-two
#
# BNF:
# units ::= one | two | three | ... | nine
# teens ::= ten | eleven | twelve | ... | nineteen
# tens ::= twenty | thirty | ... | ninety
# one_to_99 ::= units | teens | (tens [["-"] units])
#
import pyparsing as pp
def define_numeric_word_range(
names: str,
from_: int,
to_: int,
step: int = 1) -> pp.MatchFirst:
"""
Compose a MatchFirst of CaselessKeywords, given their names and values,
which when parsed, are converted to their value
"""
def define_numeric_word(nm: str, val: int):
return pp.CaselessKeyword(nm).add_parse_action(lambda: val)
names = names.split()
values = range(from_, to_ + 1, step)
return pp.MatchFirst(
define_numeric_word(name, value) for name, value in zip(names, values)
)
units = define_numeric_word_range(
"one two three four five six seven eight nine", 1, 9
).set_name("units")
teens = define_numeric_word_range(
"ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen", 10, 19
).set_name("teens")
tens = define_numeric_word_range(
"twenty thirty forty fifty sixty seventy eighty ninety", 20, 90, step=10
).set_name("tens")
opt_dash = pp.Opt(pp.Suppress("-"))
twenty_to_99 = tens + pp.Opt(opt_dash + units)
one_to_99 = (units | teens | twenty_to_99).set_name("1-99")
one_to_99.add_parse_action(sum)
numeric_expression = one_to_99
if __name__ == "__main__":
numeric_expression.run_tests(
"""
one
seven
twelve
twenty six
forty-two
"""
)
# create railroad diagram
numeric_expression.create_diagram("one_to_99_diagram.html", vertical=5)
|
|
45bdbad2aac5aaf78ef12cb0ea87d7e5212939d3
|
lab/disruptors/n9k_ip_block.py
|
lab/disruptors/n9k_ip_block.py
|
def start(context, log, args):
import time
duration = args['duration']
period = 20
n9k1_ip, n9k2_ip, _, _ = context.n9k_creds()
log.info('Blocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
start_time = time.time()
for controller in context.controllers():
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
while start_time + duration > time.time():
log.info('N9K IPs are blocked.')
time.sleep(period)
log.info('Unblocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
for controller in context.controllers():
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
|
Add n9k IP iptables block
|
Add n9k IP iptables block
Change-Id: Ife7168aebd5de6d017c48d3d7b6b1a0227c25f7c
|
Python
|
apache-2.0
|
CiscoSystems/os-sqe,CiscoSystems/os-sqe,CiscoSystems/os-sqe
|
Add n9k IP iptables block
Change-Id: Ife7168aebd5de6d017c48d3d7b6b1a0227c25f7c
|
def start(context, log, args):
import time
duration = args['duration']
period = 20
n9k1_ip, n9k2_ip, _, _ = context.n9k_creds()
log.info('Blocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
start_time = time.time()
for controller in context.controllers():
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
while start_time + duration > time.time():
log.info('N9K IPs are blocked.')
time.sleep(period)
log.info('Unblocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
for controller in context.controllers():
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
|
<commit_before><commit_msg>Add n9k IP iptables block
Change-Id: Ife7168aebd5de6d017c48d3d7b6b1a0227c25f7c<commit_after>
|
def start(context, log, args):
import time
duration = args['duration']
period = 20
n9k1_ip, n9k2_ip, _, _ = context.n9k_creds()
log.info('Blocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
start_time = time.time()
for controller in context.controllers():
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
while start_time + duration > time.time():
log.info('N9K IPs are blocked.')
time.sleep(period)
log.info('Unblocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
for controller in context.controllers():
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
|
Add n9k IP iptables block
Change-Id: Ife7168aebd5de6d017c48d3d7b6b1a0227c25f7cdef start(context, log, args):
import time
duration = args['duration']
period = 20
n9k1_ip, n9k2_ip, _, _ = context.n9k_creds()
log.info('Blocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
start_time = time.time()
for controller in context.controllers():
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
while start_time + duration > time.time():
log.info('N9K IPs are blocked.')
time.sleep(period)
log.info('Unblocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
for controller in context.controllers():
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
|
<commit_before><commit_msg>Add n9k IP iptables block
Change-Id: Ife7168aebd5de6d017c48d3d7b6b1a0227c25f7c<commit_after>def start(context, log, args):
import time
duration = args['duration']
period = 20
n9k1_ip, n9k2_ip, _, _ = context.n9k_creds()
log.info('Blocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
start_time = time.time()
for controller in context.controllers():
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -A OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
while start_time + duration > time.time():
log.info('N9K IPs are blocked.')
time.sleep(period)
log.info('Unblocking N9K IPs ({0},{1}) on controllers ...'.format(n9k1_ip, n9k2_ip))
for controller in context.controllers():
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k1_ip))
controller.run(command='iptables -D OUTPUT -d {0}/32 -j DROP'.format(n9k2_ip))
|
|
cc558a1dc03bb8dec058c80e74e5cbdca7641011
|
features/tests/test_api.py
|
features/tests/test_api.py
|
from rest_framework.test import APITestCase
def has(d, key):
if d.get(key) is not None:
return True
else:
return False
class TestApi(APITestCase):
def test_root(self):
"""Test api root URL is accessible"""
request = self.client.get('/api/', format='json')
self.assertEqual(request.status_code, 200)
data = request.data
# Should have a url for features
self.assertTrue(has(data, 'features'))
# And productarea
self.assertTrue(has(data, 'productarea'))
# And client
self.assertTrue(has(data, 'client'))
|
Add tests for rest api
|
Add tests for rest api
|
Python
|
mit
|
wkevina/feature-requests-app,wkevina/feature-requests-app,wkevina/feature-requests-app
|
Add tests for rest api
|
from rest_framework.test import APITestCase
def has(d, key):
if d.get(key) is not None:
return True
else:
return False
class TestApi(APITestCase):
def test_root(self):
"""Test api root URL is accessible"""
request = self.client.get('/api/', format='json')
self.assertEqual(request.status_code, 200)
data = request.data
# Should have a url for features
self.assertTrue(has(data, 'features'))
# And productarea
self.assertTrue(has(data, 'productarea'))
# And client
self.assertTrue(has(data, 'client'))
|
<commit_before><commit_msg>Add tests for rest api<commit_after>
|
from rest_framework.test import APITestCase
def has(d, key):
if d.get(key) is not None:
return True
else:
return False
class TestApi(APITestCase):
def test_root(self):
"""Test api root URL is accessible"""
request = self.client.get('/api/', format='json')
self.assertEqual(request.status_code, 200)
data = request.data
# Should have a url for features
self.assertTrue(has(data, 'features'))
# And productarea
self.assertTrue(has(data, 'productarea'))
# And client
self.assertTrue(has(data, 'client'))
|
Add tests for rest apifrom rest_framework.test import APITestCase
def has(d, key):
if d.get(key) is not None:
return True
else:
return False
class TestApi(APITestCase):
def test_root(self):
"""Test api root URL is accessible"""
request = self.client.get('/api/', format='json')
self.assertEqual(request.status_code, 200)
data = request.data
# Should have a url for features
self.assertTrue(has(data, 'features'))
# And productarea
self.assertTrue(has(data, 'productarea'))
# And client
self.assertTrue(has(data, 'client'))
|
<commit_before><commit_msg>Add tests for rest api<commit_after>from rest_framework.test import APITestCase
def has(d, key):
if d.get(key) is not None:
return True
else:
return False
class TestApi(APITestCase):
def test_root(self):
"""Test api root URL is accessible"""
request = self.client.get('/api/', format='json')
self.assertEqual(request.status_code, 200)
data = request.data
# Should have a url for features
self.assertTrue(has(data, 'features'))
# And productarea
self.assertTrue(has(data, 'productarea'))
# And client
self.assertTrue(has(data, 'client'))
|
|
1c91f8112988b81af7440fc6e15e03dafc9ff680
|
share/management/commands/build_views.py
|
share/management/commands/build_views.py
|
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.migrations import Migration
from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from share.models.base import ShareAbstract
class Command(BaseCommand):
MATERIALIZED_VIEW = '''
CREATE MATERIALIZED VIEW {concrete} AS (
SELECT version.*
FROM {current} AS current
LEFT JOIN {version} AS version
ON current.version = version.id
) WITH DATA;
'''
MATERIALIZED_VIEW_REVERSE = '''
DROP MATERIALIZED VIEW {concrete};
'''
PROCEDURE = '''
CREATE OR REPLACE FUNCTION after_{version}_insert() RETURNS trigger AS $$
BEGIN
INSERT INTO {current}(id, version)
VALUES(NEW.p_id, NEW.id)
ON CONFLICT (id) DO UPDATE
SET version=NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''
PROCEDURE_REVERSE = '''
DROP FUNCTION after_{version}_insert();
'''
TRIGGER = '''
CREATE TRIGGER {version}_insert
AFTER INSERT ON {version}
FOR EACH ROW
EXECUTE PROCEDURE after_{version}_insert();
'''
TRIGGER_REVERSE = '''
DROP TRIGGER {version}_insert
'''
can_import_settings = True
def handle(self, *args, **options):
ops = []
for model in apps.get_models(include_auto_created=True):
if not issubclass(model, ShareAbstract):
continue
names = {
'concrete': model._meta.db_table,
'version': model.Version._meta.db_table,
'current': model.Current._meta.db_table,
}
ops.extend([
operations.RunSQL(self.MATERIALIZED_VIEW.format(**names).strip(), reverse_sql=self.MATERIALIZED_VIEW_REVERSE.format(**names).strip()),
operations.RunSQL(self.PROCEDURE.format(**names).strip(), reverse_sql=self.PROCEDURE_REVERSE.format(**names).strip()),
operations.RunSQL(self.TRIGGER.format(**names).strip(), reverse_sql=self.TRIGGER_REVERSE.format(**names).strip()),
])
m = Migration('create_triggers_views', 'share')
m.operations = ops
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [m]}, graph=loader.graph,)
for migration in changes['share']:
writer = MigrationWriter(migration)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
Add management command to generate SQL constraints for SHARE versioning
|
Add management command to generate SQL constraints for SHARE versioning
|
Python
|
apache-2.0
|
laurenbarker/SHARE,zamattiac/SHARE,laurenbarker/SHARE,zamattiac/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,zamattiac/SHARE
|
Add management command to generate SQL constraints for SHARE versioning
|
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.migrations import Migration
from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from share.models.base import ShareAbstract
class Command(BaseCommand):
MATERIALIZED_VIEW = '''
CREATE MATERIALIZED VIEW {concrete} AS (
SELECT version.*
FROM {current} AS current
LEFT JOIN {version} AS version
ON current.version = version.id
) WITH DATA;
'''
MATERIALIZED_VIEW_REVERSE = '''
DROP MATERIALIZED VIEW {concrete};
'''
PROCEDURE = '''
CREATE OR REPLACE FUNCTION after_{version}_insert() RETURNS trigger AS $$
BEGIN
INSERT INTO {current}(id, version)
VALUES(NEW.p_id, NEW.id)
ON CONFLICT (id) DO UPDATE
SET version=NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''
PROCEDURE_REVERSE = '''
DROP FUNCTION after_{version}_insert();
'''
TRIGGER = '''
CREATE TRIGGER {version}_insert
AFTER INSERT ON {version}
FOR EACH ROW
EXECUTE PROCEDURE after_{version}_insert();
'''
TRIGGER_REVERSE = '''
DROP TRIGGER {version}_insert
'''
can_import_settings = True
def handle(self, *args, **options):
ops = []
for model in apps.get_models(include_auto_created=True):
if not issubclass(model, ShareAbstract):
continue
names = {
'concrete': model._meta.db_table,
'version': model.Version._meta.db_table,
'current': model.Current._meta.db_table,
}
ops.extend([
operations.RunSQL(self.MATERIALIZED_VIEW.format(**names).strip(), reverse_sql=self.MATERIALIZED_VIEW_REVERSE.format(**names).strip()),
operations.RunSQL(self.PROCEDURE.format(**names).strip(), reverse_sql=self.PROCEDURE_REVERSE.format(**names).strip()),
operations.RunSQL(self.TRIGGER.format(**names).strip(), reverse_sql=self.TRIGGER_REVERSE.format(**names).strip()),
])
m = Migration('create_triggers_views', 'share')
m.operations = ops
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [m]}, graph=loader.graph,)
for migration in changes['share']:
writer = MigrationWriter(migration)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
<commit_before><commit_msg>Add management command to generate SQL constraints for SHARE versioning<commit_after>
|
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.migrations import Migration
from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from share.models.base import ShareAbstract
class Command(BaseCommand):
MATERIALIZED_VIEW = '''
CREATE MATERIALIZED VIEW {concrete} AS (
SELECT version.*
FROM {current} AS current
LEFT JOIN {version} AS version
ON current.version = version.id
) WITH DATA;
'''
MATERIALIZED_VIEW_REVERSE = '''
DROP MATERIALIZED VIEW {concrete};
'''
PROCEDURE = '''
CREATE OR REPLACE FUNCTION after_{version}_insert() RETURNS trigger AS $$
BEGIN
INSERT INTO {current}(id, version)
VALUES(NEW.p_id, NEW.id)
ON CONFLICT (id) DO UPDATE
SET version=NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''
PROCEDURE_REVERSE = '''
DROP FUNCTION after_{version}_insert();
'''
TRIGGER = '''
CREATE TRIGGER {version}_insert
AFTER INSERT ON {version}
FOR EACH ROW
EXECUTE PROCEDURE after_{version}_insert();
'''
TRIGGER_REVERSE = '''
DROP TRIGGER {version}_insert
'''
can_import_settings = True
def handle(self, *args, **options):
ops = []
for model in apps.get_models(include_auto_created=True):
if not issubclass(model, ShareAbstract):
continue
names = {
'concrete': model._meta.db_table,
'version': model.Version._meta.db_table,
'current': model.Current._meta.db_table,
}
ops.extend([
operations.RunSQL(self.MATERIALIZED_VIEW.format(**names).strip(), reverse_sql=self.MATERIALIZED_VIEW_REVERSE.format(**names).strip()),
operations.RunSQL(self.PROCEDURE.format(**names).strip(), reverse_sql=self.PROCEDURE_REVERSE.format(**names).strip()),
operations.RunSQL(self.TRIGGER.format(**names).strip(), reverse_sql=self.TRIGGER_REVERSE.format(**names).strip()),
])
m = Migration('create_triggers_views', 'share')
m.operations = ops
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [m]}, graph=loader.graph,)
for migration in changes['share']:
writer = MigrationWriter(migration)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
Add management command to generate SQL constraints for SHARE versioningfrom django.apps import apps
from django.core.management.base import BaseCommand
from django.db.migrations import Migration
from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from share.models.base import ShareAbstract
class Command(BaseCommand):
MATERIALIZED_VIEW = '''
CREATE MATERIALIZED VIEW {concrete} AS (
SELECT version.*
FROM {current} AS current
LEFT JOIN {version} AS version
ON current.version = version.id
) WITH DATA;
'''
MATERIALIZED_VIEW_REVERSE = '''
DROP MATERIALIZED VIEW {concrete};
'''
PROCEDURE = '''
CREATE OR REPLACE FUNCTION after_{version}_insert() RETURNS trigger AS $$
BEGIN
INSERT INTO {current}(id, version)
VALUES(NEW.p_id, NEW.id)
ON CONFLICT (id) DO UPDATE
SET version=NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''
PROCEDURE_REVERSE = '''
DROP FUNCTION after_{version}_insert();
'''
TRIGGER = '''
CREATE TRIGGER {version}_insert
AFTER INSERT ON {version}
FOR EACH ROW
EXECUTE PROCEDURE after_{version}_insert();
'''
TRIGGER_REVERSE = '''
DROP TRIGGER {version}_insert
'''
can_import_settings = True
def handle(self, *args, **options):
ops = []
for model in apps.get_models(include_auto_created=True):
if not issubclass(model, ShareAbstract):
continue
names = {
'concrete': model._meta.db_table,
'version': model.Version._meta.db_table,
'current': model.Current._meta.db_table,
}
ops.extend([
operations.RunSQL(self.MATERIALIZED_VIEW.format(**names).strip(), reverse_sql=self.MATERIALIZED_VIEW_REVERSE.format(**names).strip()),
operations.RunSQL(self.PROCEDURE.format(**names).strip(), reverse_sql=self.PROCEDURE_REVERSE.format(**names).strip()),
operations.RunSQL(self.TRIGGER.format(**names).strip(), reverse_sql=self.TRIGGER_REVERSE.format(**names).strip()),
])
m = Migration('create_triggers_views', 'share')
m.operations = ops
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [m]}, graph=loader.graph,)
for migration in changes['share']:
writer = MigrationWriter(migration)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
<commit_before><commit_msg>Add management command to generate SQL constraints for SHARE versioning<commit_after>from django.apps import apps
from django.core.management.base import BaseCommand
from django.db.migrations import Migration
from django.db.migrations import operations
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from share.models.base import ShareAbstract
class Command(BaseCommand):
MATERIALIZED_VIEW = '''
CREATE MATERIALIZED VIEW {concrete} AS (
SELECT version.*
FROM {current} AS current
LEFT JOIN {version} AS version
ON current.version = version.id
) WITH DATA;
'''
MATERIALIZED_VIEW_REVERSE = '''
DROP MATERIALIZED VIEW {concrete};
'''
PROCEDURE = '''
CREATE OR REPLACE FUNCTION after_{version}_insert() RETURNS trigger AS $$
BEGIN
INSERT INTO {current}(id, version)
VALUES(NEW.p_id, NEW.id)
ON CONFLICT (id) DO UPDATE
SET version=NEW.id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
'''
PROCEDURE_REVERSE = '''
DROP FUNCTION after_{version}_insert();
'''
TRIGGER = '''
CREATE TRIGGER {version}_insert
AFTER INSERT ON {version}
FOR EACH ROW
EXECUTE PROCEDURE after_{version}_insert();
'''
TRIGGER_REVERSE = '''
DROP TRIGGER {version}_insert
'''
can_import_settings = True
def handle(self, *args, **options):
ops = []
for model in apps.get_models(include_auto_created=True):
if not issubclass(model, ShareAbstract):
continue
names = {
'concrete': model._meta.db_table,
'version': model.Version._meta.db_table,
'current': model.Current._meta.db_table,
}
ops.extend([
operations.RunSQL(self.MATERIALIZED_VIEW.format(**names).strip(), reverse_sql=self.MATERIALIZED_VIEW_REVERSE.format(**names).strip()),
operations.RunSQL(self.PROCEDURE.format(**names).strip(), reverse_sql=self.PROCEDURE_REVERSE.format(**names).strip()),
operations.RunSQL(self.TRIGGER.format(**names).strip(), reverse_sql=self.TRIGGER_REVERSE.format(**names).strip()),
])
m = Migration('create_triggers_views', 'share')
m.operations = ops
loader = MigrationLoader(None, ignore_no_migrations=True)
autodetector = MigrationAutodetector(loader.project_state(), ProjectState.from_apps(apps),)
changes = autodetector.arrange_for_graph(changes={'share': [m]}, graph=loader.graph,)
for migration in changes['share']:
writer = MigrationWriter(migration)
with open(writer.path, 'wb') as fp:
fp.write(writer.as_string())
|
|
15e3c99f478596b93e022d8f917c8bc1774ddba1
|
k8s/models/third_party_resource.py
|
k8s/models/third_party_resource.py
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import absolute_import
import six
from .common import ObjectMeta
from ..base import Model
from ..fields import Field, ListField
class Version(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class ThirdPartyResource(Model):
class Meta:
url_template = "/apis/extensions/v1beta1/thirdpartyresources/{name}"
metadata = Field(ObjectMeta)
description = Field(six.text_type)
versions = ListField(Version)
|
Create TPR when watching it and not present in cluster yet
|
Create TPR when watching it and not present in cluster yet
|
Python
|
apache-2.0
|
fiaas/k8s
|
Create TPR when watching it and not present in cluster yet
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import absolute_import
import six
from .common import ObjectMeta
from ..base import Model
from ..fields import Field, ListField
class Version(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class ThirdPartyResource(Model):
class Meta:
url_template = "/apis/extensions/v1beta1/thirdpartyresources/{name}"
metadata = Field(ObjectMeta)
description = Field(six.text_type)
versions = ListField(Version)
|
<commit_before><commit_msg>Create TPR when watching it and not present in cluster yet<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import absolute_import
import six
from .common import ObjectMeta
from ..base import Model
from ..fields import Field, ListField
class Version(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class ThirdPartyResource(Model):
class Meta:
url_template = "/apis/extensions/v1beta1/thirdpartyresources/{name}"
metadata = Field(ObjectMeta)
description = Field(six.text_type)
versions = ListField(Version)
|
Create TPR when watching it and not present in cluster yet#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import absolute_import
import six
from .common import ObjectMeta
from ..base import Model
from ..fields import Field, ListField
class Version(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class ThirdPartyResource(Model):
class Meta:
url_template = "/apis/extensions/v1beta1/thirdpartyresources/{name}"
metadata = Field(ObjectMeta)
description = Field(six.text_type)
versions = ListField(Version)
|
<commit_before><commit_msg>Create TPR when watching it and not present in cluster yet<commit_after>#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import absolute_import
import six
from .common import ObjectMeta
from ..base import Model
from ..fields import Field, ListField
class Version(Model):
name = Field(six.text_type)
value = Field(six.text_type)
class ThirdPartyResource(Model):
class Meta:
url_template = "/apis/extensions/v1beta1/thirdpartyresources/{name}"
metadata = Field(ObjectMeta)
description = Field(six.text_type)
versions = ListField(Version)
|
|
26bcb00d983a08e1dbc38d78934d7b363d394544
|
src/viewer/youtubedl_util.py
|
src/viewer/youtubedl_util.py
|
# vim: fileencoding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import re
import subprocess as sp
from collections import namedtuple
import logging
logging.basicConfig(level=logging.DEBUG)
FORMAT_LINE_REGEX = re.compile(
r'(?P<id>\d+)' # unique id (itag)
r'\s+:\s+'
r'(?P<extension>\w+)' # video extension
r'\s+\[(?P<quality>\w+)\]' # quality, e.g 360x640 or 720p
r'(?!.*audio)', # not an audio stream
re.IGNORECASE
)
PROGRESS_LINE_REGEX = re.compile(
r'\[download\]\s+'
r'(?P<percent>\d+\.\d+)%\s+'
r'of (?P<size>\d+(?:\.\d+)?\w+)',
re.IGNORECASE
)
VideoFormat = namedtuple('VideoFormat', ['id', 'extension', 'quality'])
def check_available():
try:
return sp.check_call(['youtube-dl', '-h']) == 0
except (OSError, sp.CalledProcessError) as e:
logging.debug(e.args)
return False
def video_formats(url):
p = sp.Popen(['youtube-dl', '-F', url], stdout=sp.PIPE)
formats = []
for line in map(str.strip, p.stdout):
m = FORMAT_LINE_REGEX.match(line)
if m:
formats.append(VideoFormat(**m.groupdict()))
return formats
def download(url, fmt=None, progress=False):
def gen_progress(p):
while True:
# can't use 'for line in p.stdout', because
# it returns all lines at ones and waits the
# process to terminate first
line = p.stdout.readline().strip()
logging.debug(line)
if not line:
break
m = PROGRESS_LINE_REGEX.match(line)
if m:
yield (float(m.group('percent')), m.group('size'))
args = ['youtube-dl', url]
if fmt is not None:
if isinstance(fmt, VideoFormat):
args.extend(['-f', fmt.id])
else:
args.extend(['-f', str(fmt)])
if progress is True:
return gen_progress(sp.Popen(args + ['--newline'], stdout=sp.PIPE))
else:
return sp.call(args, stdout=sys.stdout)
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.WARN)
# print('Available: ', check_available())
# pprint(video_formats(sys.argv[1]))
for p, size in download(sys.argv[1], 5, progress=True):
n = int(80 * (p / 100))
print('\r{}{} {}'.format('#' * n, ' ' * (80 - n), size), end='')
|
Add wrapper functions for youtube-dl
|
Add wrapper functions for youtube-dl
|
Python
|
mit
|
east825/youtube-dl-gui
|
Add wrapper functions for youtube-dl
|
# vim: fileencoding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import re
import subprocess as sp
from collections import namedtuple
import logging
logging.basicConfig(level=logging.DEBUG)
FORMAT_LINE_REGEX = re.compile(
r'(?P<id>\d+)' # unique id (itag)
r'\s+:\s+'
r'(?P<extension>\w+)' # video extension
r'\s+\[(?P<quality>\w+)\]' # quality, e.g 360x640 or 720p
r'(?!.*audio)', # not an audio stream
re.IGNORECASE
)
PROGRESS_LINE_REGEX = re.compile(
r'\[download\]\s+'
r'(?P<percent>\d+\.\d+)%\s+'
r'of (?P<size>\d+(?:\.\d+)?\w+)',
re.IGNORECASE
)
VideoFormat = namedtuple('VideoFormat', ['id', 'extension', 'quality'])
def check_available():
try:
return sp.check_call(['youtube-dl', '-h']) == 0
except (OSError, sp.CalledProcessError) as e:
logging.debug(e.args)
return False
def video_formats(url):
p = sp.Popen(['youtube-dl', '-F', url], stdout=sp.PIPE)
formats = []
for line in map(str.strip, p.stdout):
m = FORMAT_LINE_REGEX.match(line)
if m:
formats.append(VideoFormat(**m.groupdict()))
return formats
def download(url, fmt=None, progress=False):
def gen_progress(p):
while True:
# can't use 'for line in p.stdout', because
# it returns all lines at ones and waits the
# process to terminate first
line = p.stdout.readline().strip()
logging.debug(line)
if not line:
break
m = PROGRESS_LINE_REGEX.match(line)
if m:
yield (float(m.group('percent')), m.group('size'))
args = ['youtube-dl', url]
if fmt is not None:
if isinstance(fmt, VideoFormat):
args.extend(['-f', fmt.id])
else:
args.extend(['-f', str(fmt)])
if progress is True:
return gen_progress(sp.Popen(args + ['--newline'], stdout=sp.PIPE))
else:
return sp.call(args, stdout=sys.stdout)
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.WARN)
# print('Available: ', check_available())
# pprint(video_formats(sys.argv[1]))
for p, size in download(sys.argv[1], 5, progress=True):
n = int(80 * (p / 100))
print('\r{}{} {}'.format('#' * n, ' ' * (80 - n), size), end='')
|
<commit_before><commit_msg>Add wrapper functions for youtube-dl<commit_after>
|
# vim: fileencoding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import re
import subprocess as sp
from collections import namedtuple
import logging
logging.basicConfig(level=logging.DEBUG)
FORMAT_LINE_REGEX = re.compile(
r'(?P<id>\d+)' # unique id (itag)
r'\s+:\s+'
r'(?P<extension>\w+)' # video extension
r'\s+\[(?P<quality>\w+)\]' # quality, e.g 360x640 or 720p
r'(?!.*audio)', # not an audio stream
re.IGNORECASE
)
PROGRESS_LINE_REGEX = re.compile(
r'\[download\]\s+'
r'(?P<percent>\d+\.\d+)%\s+'
r'of (?P<size>\d+(?:\.\d+)?\w+)',
re.IGNORECASE
)
VideoFormat = namedtuple('VideoFormat', ['id', 'extension', 'quality'])
def check_available():
try:
return sp.check_call(['youtube-dl', '-h']) == 0
except (OSError, sp.CalledProcessError) as e:
logging.debug(e.args)
return False
def video_formats(url):
p = sp.Popen(['youtube-dl', '-F', url], stdout=sp.PIPE)
formats = []
for line in map(str.strip, p.stdout):
m = FORMAT_LINE_REGEX.match(line)
if m:
formats.append(VideoFormat(**m.groupdict()))
return formats
def download(url, fmt=None, progress=False):
def gen_progress(p):
while True:
# can't use 'for line in p.stdout', because
# it returns all lines at ones and waits the
# process to terminate first
line = p.stdout.readline().strip()
logging.debug(line)
if not line:
break
m = PROGRESS_LINE_REGEX.match(line)
if m:
yield (float(m.group('percent')), m.group('size'))
args = ['youtube-dl', url]
if fmt is not None:
if isinstance(fmt, VideoFormat):
args.extend(['-f', fmt.id])
else:
args.extend(['-f', str(fmt)])
if progress is True:
return gen_progress(sp.Popen(args + ['--newline'], stdout=sp.PIPE))
else:
return sp.call(args, stdout=sys.stdout)
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.WARN)
# print('Available: ', check_available())
# pprint(video_formats(sys.argv[1]))
for p, size in download(sys.argv[1], 5, progress=True):
n = int(80 * (p / 100))
print('\r{}{} {}'.format('#' * n, ' ' * (80 - n), size), end='')
|
Add wrapper functions for youtube-dl# vim: fileencoding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import re
import subprocess as sp
from collections import namedtuple
import logging
logging.basicConfig(level=logging.DEBUG)
FORMAT_LINE_REGEX = re.compile(
r'(?P<id>\d+)' # unique id (itag)
r'\s+:\s+'
r'(?P<extension>\w+)' # video extension
r'\s+\[(?P<quality>\w+)\]' # quality, e.g 360x640 or 720p
r'(?!.*audio)', # not an audio stream
re.IGNORECASE
)
PROGRESS_LINE_REGEX = re.compile(
r'\[download\]\s+'
r'(?P<percent>\d+\.\d+)%\s+'
r'of (?P<size>\d+(?:\.\d+)?\w+)',
re.IGNORECASE
)
VideoFormat = namedtuple('VideoFormat', ['id', 'extension', 'quality'])
def check_available():
try:
return sp.check_call(['youtube-dl', '-h']) == 0
except (OSError, sp.CalledProcessError) as e:
logging.debug(e.args)
return False
def video_formats(url):
p = sp.Popen(['youtube-dl', '-F', url], stdout=sp.PIPE)
formats = []
for line in map(str.strip, p.stdout):
m = FORMAT_LINE_REGEX.match(line)
if m:
formats.append(VideoFormat(**m.groupdict()))
return formats
def download(url, fmt=None, progress=False):
def gen_progress(p):
while True:
# can't use 'for line in p.stdout', because
# it returns all lines at ones and waits the
# process to terminate first
line = p.stdout.readline().strip()
logging.debug(line)
if not line:
break
m = PROGRESS_LINE_REGEX.match(line)
if m:
yield (float(m.group('percent')), m.group('size'))
args = ['youtube-dl', url]
if fmt is not None:
if isinstance(fmt, VideoFormat):
args.extend(['-f', fmt.id])
else:
args.extend(['-f', str(fmt)])
if progress is True:
return gen_progress(sp.Popen(args + ['--newline'], stdout=sp.PIPE))
else:
return sp.call(args, stdout=sys.stdout)
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.WARN)
# print('Available: ', check_available())
# pprint(video_formats(sys.argv[1]))
for p, size in download(sys.argv[1], 5, progress=True):
n = int(80 * (p / 100))
print('\r{}{} {}'.format('#' * n, ' ' * (80 - n), size), end='')
|
<commit_before><commit_msg>Add wrapper functions for youtube-dl<commit_after># vim: fileencoding=utf-8
from __future__ import print_function
from __future__ import unicode_literals
import sys
import re
import subprocess as sp
from collections import namedtuple
import logging
logging.basicConfig(level=logging.DEBUG)
FORMAT_LINE_REGEX = re.compile(
r'(?P<id>\d+)' # unique id (itag)
r'\s+:\s+'
r'(?P<extension>\w+)' # video extension
r'\s+\[(?P<quality>\w+)\]' # quality, e.g 360x640 or 720p
r'(?!.*audio)', # not an audio stream
re.IGNORECASE
)
PROGRESS_LINE_REGEX = re.compile(
r'\[download\]\s+'
r'(?P<percent>\d+\.\d+)%\s+'
r'of (?P<size>\d+(?:\.\d+)?\w+)',
re.IGNORECASE
)
VideoFormat = namedtuple('VideoFormat', ['id', 'extension', 'quality'])
def check_available():
try:
return sp.check_call(['youtube-dl', '-h']) == 0
except (OSError, sp.CalledProcessError) as e:
logging.debug(e.args)
return False
def video_formats(url):
p = sp.Popen(['youtube-dl', '-F', url], stdout=sp.PIPE)
formats = []
for line in map(str.strip, p.stdout):
m = FORMAT_LINE_REGEX.match(line)
if m:
formats.append(VideoFormat(**m.groupdict()))
return formats
def download(url, fmt=None, progress=False):
def gen_progress(p):
while True:
# can't use 'for line in p.stdout', because
# it returns all lines at ones and waits the
# process to terminate first
line = p.stdout.readline().strip()
logging.debug(line)
if not line:
break
m = PROGRESS_LINE_REGEX.match(line)
if m:
yield (float(m.group('percent')), m.group('size'))
args = ['youtube-dl', url]
if fmt is not None:
if isinstance(fmt, VideoFormat):
args.extend(['-f', fmt.id])
else:
args.extend(['-f', str(fmt)])
if progress is True:
return gen_progress(sp.Popen(args + ['--newline'], stdout=sp.PIPE))
else:
return sp.call(args, stdout=sys.stdout)
if __name__ == '__main__':
import sys
logging.getLogger().setLevel(logging.WARN)
# print('Available: ', check_available())
# pprint(video_formats(sys.argv[1]))
for p, size in download(sys.argv[1], 5, progress=True):
n = int(80 * (p / 100))
print('\r{}{} {}'.format('#' * n, ' ' * (80 - n), size), end='')
|
|
a03581a1f64596717a8e4656e285f543cf6b3b77
|
scripts/test-param-activated.py
|
scripts/test-param-activated.py
|
#!/usr/bin/python3
from pathlib import Path
import re
import shlex
import json
tests_root_path = Path("./tests/regression")
# copied from options
activated_default = set([
"expRelation", "base", "threadid", "threadflag", "threadreturn",
"escape", "mutexEvents", "mutex", "access", "mallocWrapper", "mhp",
"assert"
])
for test_path in tests_root_path.glob("*/*.c"):
# print(test_path)
with test_path.open() as test_file:
line = test_file.readline().strip()
# print(line)
m = re.match(r"^//.*PARAM.*:\s*(.*)$", line)
if m is not None:
param = m.group(1)
params = shlex.split(param)
if "ana.activated" in params:
activated_i = params.index("ana.activated")
activated_str = params[activated_i + 1]
activated_str = activated_str.replace("'","\"") # silly Goblint JSON
# print(activated)
activated = set(json.loads(activated_str))
added = activated - activated_default
removed = activated_default - activated
# print(added, removed)
if added or removed:
print(test_path)
if added:
# print(f" added: {added}")
args_str = ""
for analysis in added:
args_str += f" --set ana.activated[+] {analysis}"
print(f" added:{args_str}")
if removed:
# print(f" removed: {removed}")
args_str = ""
for analysis in removed:
args_str += f" --set ana.activated[-] {analysis}"
print(f" removed:{args_str}")
|
Add script for finding tests with explicit ana.activated
|
Add script for finding tests with explicit ana.activated
|
Python
|
mit
|
goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer
|
Add script for finding tests with explicit ana.activated
|
#!/usr/bin/python3
from pathlib import Path
import re
import shlex
import json
tests_root_path = Path("./tests/regression")
# copied from options
activated_default = set([
"expRelation", "base", "threadid", "threadflag", "threadreturn",
"escape", "mutexEvents", "mutex", "access", "mallocWrapper", "mhp",
"assert"
])
for test_path in tests_root_path.glob("*/*.c"):
# print(test_path)
with test_path.open() as test_file:
line = test_file.readline().strip()
# print(line)
m = re.match(r"^//.*PARAM.*:\s*(.*)$", line)
if m is not None:
param = m.group(1)
params = shlex.split(param)
if "ana.activated" in params:
activated_i = params.index("ana.activated")
activated_str = params[activated_i + 1]
activated_str = activated_str.replace("'","\"") # silly Goblint JSON
# print(activated)
activated = set(json.loads(activated_str))
added = activated - activated_default
removed = activated_default - activated
# print(added, removed)
if added or removed:
print(test_path)
if added:
# print(f" added: {added}")
args_str = ""
for analysis in added:
args_str += f" --set ana.activated[+] {analysis}"
print(f" added:{args_str}")
if removed:
# print(f" removed: {removed}")
args_str = ""
for analysis in removed:
args_str += f" --set ana.activated[-] {analysis}"
print(f" removed:{args_str}")
|
<commit_before><commit_msg>Add script for finding tests with explicit ana.activated<commit_after>
|
#!/usr/bin/python3
from pathlib import Path
import re
import shlex
import json
tests_root_path = Path("./tests/regression")
# copied from options
activated_default = set([
"expRelation", "base", "threadid", "threadflag", "threadreturn",
"escape", "mutexEvents", "mutex", "access", "mallocWrapper", "mhp",
"assert"
])
for test_path in tests_root_path.glob("*/*.c"):
# print(test_path)
with test_path.open() as test_file:
line = test_file.readline().strip()
# print(line)
m = re.match(r"^//.*PARAM.*:\s*(.*)$", line)
if m is not None:
param = m.group(1)
params = shlex.split(param)
if "ana.activated" in params:
activated_i = params.index("ana.activated")
activated_str = params[activated_i + 1]
activated_str = activated_str.replace("'","\"") # silly Goblint JSON
# print(activated)
activated = set(json.loads(activated_str))
added = activated - activated_default
removed = activated_default - activated
# print(added, removed)
if added or removed:
print(test_path)
if added:
# print(f" added: {added}")
args_str = ""
for analysis in added:
args_str += f" --set ana.activated[+] {analysis}"
print(f" added:{args_str}")
if removed:
# print(f" removed: {removed}")
args_str = ""
for analysis in removed:
args_str += f" --set ana.activated[-] {analysis}"
print(f" removed:{args_str}")
|
Add script for finding tests with explicit ana.activated#!/usr/bin/python3
from pathlib import Path
import re
import shlex
import json
tests_root_path = Path("./tests/regression")
# copied from options
activated_default = set([
"expRelation", "base", "threadid", "threadflag", "threadreturn",
"escape", "mutexEvents", "mutex", "access", "mallocWrapper", "mhp",
"assert"
])
for test_path in tests_root_path.glob("*/*.c"):
# print(test_path)
with test_path.open() as test_file:
line = test_file.readline().strip()
# print(line)
m = re.match(r"^//.*PARAM.*:\s*(.*)$", line)
if m is not None:
param = m.group(1)
params = shlex.split(param)
if "ana.activated" in params:
activated_i = params.index("ana.activated")
activated_str = params[activated_i + 1]
activated_str = activated_str.replace("'","\"") # silly Goblint JSON
# print(activated)
activated = set(json.loads(activated_str))
added = activated - activated_default
removed = activated_default - activated
# print(added, removed)
if added or removed:
print(test_path)
if added:
# print(f" added: {added}")
args_str = ""
for analysis in added:
args_str += f" --set ana.activated[+] {analysis}"
print(f" added:{args_str}")
if removed:
# print(f" removed: {removed}")
args_str = ""
for analysis in removed:
args_str += f" --set ana.activated[-] {analysis}"
print(f" removed:{args_str}")
|
<commit_before><commit_msg>Add script for finding tests with explicit ana.activated<commit_after>#!/usr/bin/python3
from pathlib import Path
import re
import shlex
import json
tests_root_path = Path("./tests/regression")
# copied from options
activated_default = set([
"expRelation", "base", "threadid", "threadflag", "threadreturn",
"escape", "mutexEvents", "mutex", "access", "mallocWrapper", "mhp",
"assert"
])
for test_path in tests_root_path.glob("*/*.c"):
# print(test_path)
with test_path.open() as test_file:
line = test_file.readline().strip()
# print(line)
m = re.match(r"^//.*PARAM.*:\s*(.*)$", line)
if m is not None:
param = m.group(1)
params = shlex.split(param)
if "ana.activated" in params:
activated_i = params.index("ana.activated")
activated_str = params[activated_i + 1]
activated_str = activated_str.replace("'","\"") # silly Goblint JSON
# print(activated)
activated = set(json.loads(activated_str))
added = activated - activated_default
removed = activated_default - activated
# print(added, removed)
if added or removed:
print(test_path)
if added:
# print(f" added: {added}")
args_str = ""
for analysis in added:
args_str += f" --set ana.activated[+] {analysis}"
print(f" added:{args_str}")
if removed:
# print(f" removed: {removed}")
args_str = ""
for analysis in removed:
args_str += f" --set ana.activated[-] {analysis}"
print(f" removed:{args_str}")
|
|
dc1e018a19c4bd55b10a65ab79ac4e2e1fdc95dc
|
makedist.py
|
makedist.py
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
Add script to assist in packaging.
|
Add script to assist in packaging.
|
Python
|
mit
|
davidalber/Geneagrapher,davidalber/Geneagrapher
|
Add script to assist in packaging.
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
<commit_before><commit_msg>Add script to assist in packaging.<commit_after>
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
Add script to assist in packaging."""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
<commit_before><commit_msg>Add script to assist in packaging.<commit_after>"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
|
8582fd56109440e372215ed06617ae8e7e89f780
|
flicks/base/middleware.py
|
flicks/base/middleware.py
|
class ExceptionLoggingMiddleware(object):
"""
Small middleware that logs exceptions to the console. Useful in local
development.
"""
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
|
Add ExceptionLoggingMiddleware: Useful for debugging.
|
Add ExceptionLoggingMiddleware: Useful for debugging.
|
Python
|
bsd-3-clause
|
mozilla/firefox-flicks,mozilla/firefox-flicks,mozilla/firefox-flicks,mozilla/firefox-flicks
|
Add ExceptionLoggingMiddleware: Useful for debugging.
|
class ExceptionLoggingMiddleware(object):
"""
Small middleware that logs exceptions to the console. Useful in local
development.
"""
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
|
<commit_before><commit_msg>Add ExceptionLoggingMiddleware: Useful for debugging.<commit_after>
|
class ExceptionLoggingMiddleware(object):
"""
Small middleware that logs exceptions to the console. Useful in local
development.
"""
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
|
Add ExceptionLoggingMiddleware: Useful for debugging.class ExceptionLoggingMiddleware(object):
"""
Small middleware that logs exceptions to the console. Useful in local
development.
"""
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
|
<commit_before><commit_msg>Add ExceptionLoggingMiddleware: Useful for debugging.<commit_after>class ExceptionLoggingMiddleware(object):
"""
Small middleware that logs exceptions to the console. Useful in local
development.
"""
def process_exception(self, request, exception):
import traceback
print traceback.format_exc()
|
|
26e39ca1cdf894bfcecff16344987b6ae7cf638a
|
scripts/call_pods.py
|
scripts/call_pods.py
|
#!/bin/env python
## Utils script to call a list of pods
import subprocess
pods = open('/tmp/pods.txt', 'r')
for pod in pods:
print pod.strip('\n\r')
subprocess.call('wget http://pods.jasonrobinson.me/register/'+pod.strip('\n\r')+' -O /dev/null', shell=True)
|
Add a little script to register a list of pods
|
Add a little script to register a list of pods
|
Python
|
agpl-3.0
|
jaywink/the-federation.info,jaywink/the-federation.info,Flaburgan/diaspora-hub,jaywink/diaspora-hub,jaywink/the-federation.info,jaywink/diaspora-hub,Flaburgan/the-federation.info,Flaburgan/diaspora-hub,jaywink/diaspora-hub,Flaburgan/the-federation.info,Flaburgan/the-federation.info,Flaburgan/diaspora-hub,Flaburgan/the-federation.info
|
Add a little script to register a list of pods
|
#!/bin/env python
## Utils script to call a list of pods
import subprocess
pods = open('/tmp/pods.txt', 'r')
for pod in pods:
print pod.strip('\n\r')
subprocess.call('wget http://pods.jasonrobinson.me/register/'+pod.strip('\n\r')+' -O /dev/null', shell=True)
|
<commit_before><commit_msg>Add a little script to register a list of pods<commit_after>
|
#!/bin/env python
## Utils script to call a list of pods
import subprocess
pods = open('/tmp/pods.txt', 'r')
for pod in pods:
print pod.strip('\n\r')
subprocess.call('wget http://pods.jasonrobinson.me/register/'+pod.strip('\n\r')+' -O /dev/null', shell=True)
|
Add a little script to register a list of pods#!/bin/env python
## Utils script to call a list of pods
import subprocess
pods = open('/tmp/pods.txt', 'r')
for pod in pods:
print pod.strip('\n\r')
subprocess.call('wget http://pods.jasonrobinson.me/register/'+pod.strip('\n\r')+' -O /dev/null', shell=True)
|
<commit_before><commit_msg>Add a little script to register a list of pods<commit_after>#!/bin/env python
## Utils script to call a list of pods
import subprocess
pods = open('/tmp/pods.txt', 'r')
for pod in pods:
print pod.strip('\n\r')
subprocess.call('wget http://pods.jasonrobinson.me/register/'+pod.strip('\n\r')+' -O /dev/null', shell=True)
|
|
dd07e43e9bf326ec12b34796d6b1c2b2c802f53b
|
stores/fields.py
|
stores/fields.py
|
class Point(object):
def __init__(self, x, y=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, long, float)) and isinstance(y, (int, long, float)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
if ndim != 2:
raise TypeError('Invalid point dimension: %s' % str(ndim))
self.x = coords[0]
self.y = coords[1]
self.ndim = ndim
def __iter__(self):
"""
Allows iteration over coordinates of this Point.
"""
for coord in self.coords:
yield coord
def __len__(self):
"""
Returns the number of dimensions for this Point.
"""
return self.ndim
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "Point(%s, %s)" % self.coords
@property
def coords(self):
"""
Returns a tuple of the point.
"""
return (self.x, self.y)
@coords.setter
def coords(self, value):
"""
Sets the coordinates of the point with the given tuple.
"""
self.x, self.y = value
|
Add Point class based on GEOS package
|
Add Point class based on GEOS package
|
Python
|
bsd-3-clause
|
django-oscar/django-oscar-stores,django-oscar/django-oscar-stores,django-oscar/django-oscar-stores
|
Add Point class based on GEOS package
|
class Point(object):
def __init__(self, x, y=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, long, float)) and isinstance(y, (int, long, float)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
if ndim != 2:
raise TypeError('Invalid point dimension: %s' % str(ndim))
self.x = coords[0]
self.y = coords[1]
self.ndim = ndim
def __iter__(self):
"""
Allows iteration over coordinates of this Point.
"""
for coord in self.coords:
yield coord
def __len__(self):
"""
Returns the number of dimensions for this Point.
"""
return self.ndim
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "Point(%s, %s)" % self.coords
@property
def coords(self):
"""
Returns a tuple of the point.
"""
return (self.x, self.y)
@coords.setter
def coords(self, value):
"""
Sets the coordinates of the point with the given tuple.
"""
self.x, self.y = value
|
<commit_before><commit_msg>Add Point class based on GEOS package<commit_after>
|
class Point(object):
def __init__(self, x, y=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, long, float)) and isinstance(y, (int, long, float)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
if ndim != 2:
raise TypeError('Invalid point dimension: %s' % str(ndim))
self.x = coords[0]
self.y = coords[1]
self.ndim = ndim
def __iter__(self):
"""
Allows iteration over coordinates of this Point.
"""
for coord in self.coords:
yield coord
def __len__(self):
"""
Returns the number of dimensions for this Point.
"""
return self.ndim
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "Point(%s, %s)" % self.coords
@property
def coords(self):
"""
Returns a tuple of the point.
"""
return (self.x, self.y)
@coords.setter
def coords(self, value):
"""
Sets the coordinates of the point with the given tuple.
"""
self.x, self.y = value
|
Add Point class based on GEOS packageclass Point(object):
def __init__(self, x, y=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, long, float)) and isinstance(y, (int, long, float)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
if ndim != 2:
raise TypeError('Invalid point dimension: %s' % str(ndim))
self.x = coords[0]
self.y = coords[1]
self.ndim = ndim
def __iter__(self):
"""
Allows iteration over coordinates of this Point.
"""
for coord in self.coords:
yield coord
def __len__(self):
"""
Returns the number of dimensions for this Point.
"""
return self.ndim
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "Point(%s, %s)" % self.coords
@property
def coords(self):
"""
Returns a tuple of the point.
"""
return (self.x, self.y)
@coords.setter
def coords(self, value):
"""
Sets the coordinates of the point with the given tuple.
"""
self.x, self.y = value
|
<commit_before><commit_msg>Add Point class based on GEOS package<commit_after>class Point(object):
def __init__(self, x, y=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, long, float)) and isinstance(y, (int, long, float)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
if ndim != 2:
raise TypeError('Invalid point dimension: %s' % str(ndim))
self.x = coords[0]
self.y = coords[1]
self.ndim = ndim
def __iter__(self):
"""
Allows iteration over coordinates of this Point.
"""
for coord in self.coords:
yield coord
def __len__(self):
"""
Returns the number of dimensions for this Point.
"""
return self.ndim
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "Point(%s, %s)" % self.coords
@property
def coords(self):
"""
Returns a tuple of the point.
"""
return (self.x, self.y)
@coords.setter
def coords(self, value):
"""
Sets the coordinates of the point with the given tuple.
"""
self.x, self.y = value
|
|
cbe1889e2c575e91488d29e757eb8c2cad155ce4
|
testGPIO.py
|
testGPIO.py
|
import RPi.GPIO as GPIO
class Test:
# Mapping
PINS = {2,3,4,14,15,17,18,27,22,23,24,10,9,25,11,8,7,5,6,12,13,19,16,26,20,21}
def __init__(self):
## Attach a callback to each INPUT pin
for pin in self.PINS.items():
print("Attaching pin GPIO%s ..." % pin )
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(" Attaching event ...")
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.gpio_callback, bouncetime=300)
print(" ... Done.")
def gpio_callback(self,channel):
print("Pin %s activated" % channel)
test = Test();
while (True):
pass
|
Add a test script for GPIOs
|
Add a test script for GPIOs
|
Python
|
mit
|
tchapi/pianette,tchapi/pianette,tchapi/pianette,tchapi/pianette
|
Add a test script for GPIOs
|
import RPi.GPIO as GPIO
class Test:
# Mapping
PINS = {2,3,4,14,15,17,18,27,22,23,24,10,9,25,11,8,7,5,6,12,13,19,16,26,20,21}
def __init__(self):
## Attach a callback to each INPUT pin
for pin in self.PINS.items():
print("Attaching pin GPIO%s ..." % pin )
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(" Attaching event ...")
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.gpio_callback, bouncetime=300)
print(" ... Done.")
def gpio_callback(self,channel):
print("Pin %s activated" % channel)
test = Test();
while (True):
pass
|
<commit_before><commit_msg>Add a test script for GPIOs<commit_after>
|
import RPi.GPIO as GPIO
class Test:
# Mapping
PINS = {2,3,4,14,15,17,18,27,22,23,24,10,9,25,11,8,7,5,6,12,13,19,16,26,20,21}
def __init__(self):
## Attach a callback to each INPUT pin
for pin in self.PINS.items():
print("Attaching pin GPIO%s ..." % pin )
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(" Attaching event ...")
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.gpio_callback, bouncetime=300)
print(" ... Done.")
def gpio_callback(self,channel):
print("Pin %s activated" % channel)
test = Test();
while (True):
pass
|
Add a test script for GPIOsimport RPi.GPIO as GPIO
class Test:
# Mapping
PINS = {2,3,4,14,15,17,18,27,22,23,24,10,9,25,11,8,7,5,6,12,13,19,16,26,20,21}
def __init__(self):
## Attach a callback to each INPUT pin
for pin in self.PINS.items():
print("Attaching pin GPIO%s ..." % pin )
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(" Attaching event ...")
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.gpio_callback, bouncetime=300)
print(" ... Done.")
def gpio_callback(self,channel):
print("Pin %s activated" % channel)
test = Test();
while (True):
pass
|
<commit_before><commit_msg>Add a test script for GPIOs<commit_after>import RPi.GPIO as GPIO
class Test:
# Mapping
PINS = {2,3,4,14,15,17,18,27,22,23,24,10,9,25,11,8,7,5,6,12,13,19,16,26,20,21}
def __init__(self):
## Attach a callback to each INPUT pin
for pin in self.PINS.items():
print("Attaching pin GPIO%s ..." % pin )
GPIO.setup(pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
print(" Attaching event ...")
GPIO.add_event_detect(pin, GPIO.RISING, callback=self.gpio_callback, bouncetime=300)
print(" ... Done.")
def gpio_callback(self,channel):
print("Pin %s activated" % channel)
test = Test();
while (True):
pass
|
|
9fc0733d5a38b7e72855784e9b6819a692dfee2a
|
run2015.py
|
run2015.py
|
#!/usr/bin/python
from __future__ import print_function, unicode_literals
from os import chdir, getcwd, getenv
from subprocess import call
PYTHON = getenv('PYTHON', 'python')
FC = getenv('FC', 'gfortran')
DIR = getenv('DIR', './advent2015')
puzzles = ['01', '02']
print('---------------------------------------------------')
print('Advent of Code')
print()
print('Each puzzle contains two output values, the first')
print('from the Python script, the second from the Fortran')
print('program.')
print('---------------------------------------------------\n')
for puzzle in puzzles:
print('Puzzle {}'.format(puzzle))
py_src = './puzzle{}.py'.format(puzzle)
f95_src = './puzzle{}.f95'.format(puzzle)
py_cmd = [PYTHON, py_src]
f95_cmd_1 = [FC, f95_src]
f95_cmd_2 = ['./a.out']
chdir(DIR)
call(py_cmd)
chdir('..')
chdir('{}.f'.format(DIR))
call(f95_cmd_1)
call(f95_cmd_2)
chdir('..')
|
Add a script to run all of the programs
|
Add a script to run all of the programs
|
Python
|
mit
|
rnelson/adventofcode,rnelson/adventofcode,rnelson/adventofcode,rnelson/adventofcode
|
Add a script to run all of the programs
|
#!/usr/bin/python
from __future__ import print_function, unicode_literals
from os import chdir, getcwd, getenv
from subprocess import call
PYTHON = getenv('PYTHON', 'python')
FC = getenv('FC', 'gfortran')
DIR = getenv('DIR', './advent2015')
puzzles = ['01', '02']
print('---------------------------------------------------')
print('Advent of Code')
print()
print('Each puzzle contains two output values, the first')
print('from the Python script, the second from the Fortran')
print('program.')
print('---------------------------------------------------\n')
for puzzle in puzzles:
print('Puzzle {}'.format(puzzle))
py_src = './puzzle{}.py'.format(puzzle)
f95_src = './puzzle{}.f95'.format(puzzle)
py_cmd = [PYTHON, py_src]
f95_cmd_1 = [FC, f95_src]
f95_cmd_2 = ['./a.out']
chdir(DIR)
call(py_cmd)
chdir('..')
chdir('{}.f'.format(DIR))
call(f95_cmd_1)
call(f95_cmd_2)
chdir('..')
|
<commit_before><commit_msg>Add a script to run all of the programs<commit_after>
|
#!/usr/bin/python
from __future__ import print_function, unicode_literals
from os import chdir, getcwd, getenv
from subprocess import call
PYTHON = getenv('PYTHON', 'python')
FC = getenv('FC', 'gfortran')
DIR = getenv('DIR', './advent2015')
puzzles = ['01', '02']
print('---------------------------------------------------')
print('Advent of Code')
print()
print('Each puzzle contains two output values, the first')
print('from the Python script, the second from the Fortran')
print('program.')
print('---------------------------------------------------\n')
for puzzle in puzzles:
print('Puzzle {}'.format(puzzle))
py_src = './puzzle{}.py'.format(puzzle)
f95_src = './puzzle{}.f95'.format(puzzle)
py_cmd = [PYTHON, py_src]
f95_cmd_1 = [FC, f95_src]
f95_cmd_2 = ['./a.out']
chdir(DIR)
call(py_cmd)
chdir('..')
chdir('{}.f'.format(DIR))
call(f95_cmd_1)
call(f95_cmd_2)
chdir('..')
|
Add a script to run all of the programs#!/usr/bin/python
from __future__ import print_function, unicode_literals
from os import chdir, getcwd, getenv
from subprocess import call
PYTHON = getenv('PYTHON', 'python')
FC = getenv('FC', 'gfortran')
DIR = getenv('DIR', './advent2015')
puzzles = ['01', '02']
print('---------------------------------------------------')
print('Advent of Code')
print()
print('Each puzzle contains two output values, the first')
print('from the Python script, the second from the Fortran')
print('program.')
print('---------------------------------------------------\n')
for puzzle in puzzles:
print('Puzzle {}'.format(puzzle))
py_src = './puzzle{}.py'.format(puzzle)
f95_src = './puzzle{}.f95'.format(puzzle)
py_cmd = [PYTHON, py_src]
f95_cmd_1 = [FC, f95_src]
f95_cmd_2 = ['./a.out']
chdir(DIR)
call(py_cmd)
chdir('..')
chdir('{}.f'.format(DIR))
call(f95_cmd_1)
call(f95_cmd_2)
chdir('..')
|
<commit_before><commit_msg>Add a script to run all of the programs<commit_after>#!/usr/bin/python
from __future__ import print_function, unicode_literals
from os import chdir, getcwd, getenv
from subprocess import call
PYTHON = getenv('PYTHON', 'python')
FC = getenv('FC', 'gfortran')
DIR = getenv('DIR', './advent2015')
puzzles = ['01', '02']
print('---------------------------------------------------')
print('Advent of Code')
print()
print('Each puzzle contains two output values, the first')
print('from the Python script, the second from the Fortran')
print('program.')
print('---------------------------------------------------\n')
for puzzle in puzzles:
print('Puzzle {}'.format(puzzle))
py_src = './puzzle{}.py'.format(puzzle)
f95_src = './puzzle{}.f95'.format(puzzle)
py_cmd = [PYTHON, py_src]
f95_cmd_1 = [FC, f95_src]
f95_cmd_2 = ['./a.out']
chdir(DIR)
call(py_cmd)
chdir('..')
chdir('{}.f'.format(DIR))
call(f95_cmd_1)
call(f95_cmd_2)
chdir('..')
|
|
854cebe95c74afaba88ce32026e33fef8072f73f
|
cuse-maru/mix/gui/cuse-mixgui.py
|
cuse-maru/mix/gui/cuse-mixgui.py
|
#!/usr/bin/env python
from gi.repository import Gtk, GObject
import socket, os, sys
class Connection:
def __init__(self, sock):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(sock)
def set_volume(self, stream, vol):
command = "SETPLAYVOL {} {}".format(stream, vol)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
def get_volume(self, stream):
command = "GETPLAYVOL {}".format(stream)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
vol = int(reply.decode().split(" ")[-1])
return vol
class Control(Gtk.VBox):
def __init__(self, conn, i):
Gtk.Box.__init__(self)
self.pack_start(Gtk.Label("Stream #{}".format(i)), False, True, 10)
self.scale = Gtk.VScale()
self.scale.set_range(0, 100)
self.scale.set_value(0)
self.scale.set_size_request(-1, 300)
self.scale.set_property("inverted", True)
self.scale.set_sensitive(False)
self.pack_start(self.scale, True, True, 10)
self.i = i
self.conn = conn
self.scale.connect("value-changed", self.vol_change)
GObject.timeout_add_seconds(1, self.update_timer)
def vol_change(self, widget):
self.conn.set_volume(self.i, int(self.scale.get_value()))
def update_timer(self):
try:
self.scale.set_value(self.conn.get_volume(self.i))
self.scale.set_sensitive(True)
except:
self.scale.set_sensitive(False)
self.scale.set_value(0)
GObject.timeout_add_seconds(1, self.update_timer)
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "MARU Volume Control")
self.conn = Connection("/tmp/marumix")
self.set_border_width(5)
box = Gtk.HBox()
for i in range(8):
box.pack_start(Control(self.conn, i), True, True, 10)
self.add(box)
if __name__ == '__main__':
win = Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
Add simple Python GUI to control volume.
|
Add simple Python GUI to control volume.
|
Python
|
lgpl-2.1
|
Themaister/libmaru,Themaister/libmaru
|
Add simple Python GUI to control volume.
|
#!/usr/bin/env python
from gi.repository import Gtk, GObject
import socket, os, sys
class Connection:
def __init__(self, sock):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(sock)
def set_volume(self, stream, vol):
command = "SETPLAYVOL {} {}".format(stream, vol)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
def get_volume(self, stream):
command = "GETPLAYVOL {}".format(stream)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
vol = int(reply.decode().split(" ")[-1])
return vol
class Control(Gtk.VBox):
def __init__(self, conn, i):
Gtk.Box.__init__(self)
self.pack_start(Gtk.Label("Stream #{}".format(i)), False, True, 10)
self.scale = Gtk.VScale()
self.scale.set_range(0, 100)
self.scale.set_value(0)
self.scale.set_size_request(-1, 300)
self.scale.set_property("inverted", True)
self.scale.set_sensitive(False)
self.pack_start(self.scale, True, True, 10)
self.i = i
self.conn = conn
self.scale.connect("value-changed", self.vol_change)
GObject.timeout_add_seconds(1, self.update_timer)
def vol_change(self, widget):
self.conn.set_volume(self.i, int(self.scale.get_value()))
def update_timer(self):
try:
self.scale.set_value(self.conn.get_volume(self.i))
self.scale.set_sensitive(True)
except:
self.scale.set_sensitive(False)
self.scale.set_value(0)
GObject.timeout_add_seconds(1, self.update_timer)
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "MARU Volume Control")
self.conn = Connection("/tmp/marumix")
self.set_border_width(5)
box = Gtk.HBox()
for i in range(8):
box.pack_start(Control(self.conn, i), True, True, 10)
self.add(box)
if __name__ == '__main__':
win = Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
<commit_before><commit_msg>Add simple Python GUI to control volume.<commit_after>
|
#!/usr/bin/env python
from gi.repository import Gtk, GObject
import socket, os, sys
class Connection:
def __init__(self, sock):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(sock)
def set_volume(self, stream, vol):
command = "SETPLAYVOL {} {}".format(stream, vol)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
def get_volume(self, stream):
command = "GETPLAYVOL {}".format(stream)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
vol = int(reply.decode().split(" ")[-1])
return vol
class Control(Gtk.VBox):
def __init__(self, conn, i):
Gtk.Box.__init__(self)
self.pack_start(Gtk.Label("Stream #{}".format(i)), False, True, 10)
self.scale = Gtk.VScale()
self.scale.set_range(0, 100)
self.scale.set_value(0)
self.scale.set_size_request(-1, 300)
self.scale.set_property("inverted", True)
self.scale.set_sensitive(False)
self.pack_start(self.scale, True, True, 10)
self.i = i
self.conn = conn
self.scale.connect("value-changed", self.vol_change)
GObject.timeout_add_seconds(1, self.update_timer)
def vol_change(self, widget):
self.conn.set_volume(self.i, int(self.scale.get_value()))
def update_timer(self):
try:
self.scale.set_value(self.conn.get_volume(self.i))
self.scale.set_sensitive(True)
except:
self.scale.set_sensitive(False)
self.scale.set_value(0)
GObject.timeout_add_seconds(1, self.update_timer)
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "MARU Volume Control")
self.conn = Connection("/tmp/marumix")
self.set_border_width(5)
box = Gtk.HBox()
for i in range(8):
box.pack_start(Control(self.conn, i), True, True, 10)
self.add(box)
if __name__ == '__main__':
win = Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
Add simple Python GUI to control volume.#!/usr/bin/env python
from gi.repository import Gtk, GObject
import socket, os, sys
class Connection:
def __init__(self, sock):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(sock)
def set_volume(self, stream, vol):
command = "SETPLAYVOL {} {}".format(stream, vol)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
def get_volume(self, stream):
command = "GETPLAYVOL {}".format(stream)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
vol = int(reply.decode().split(" ")[-1])
return vol
class Control(Gtk.VBox):
def __init__(self, conn, i):
Gtk.Box.__init__(self)
self.pack_start(Gtk.Label("Stream #{}".format(i)), False, True, 10)
self.scale = Gtk.VScale()
self.scale.set_range(0, 100)
self.scale.set_value(0)
self.scale.set_size_request(-1, 300)
self.scale.set_property("inverted", True)
self.scale.set_sensitive(False)
self.pack_start(self.scale, True, True, 10)
self.i = i
self.conn = conn
self.scale.connect("value-changed", self.vol_change)
GObject.timeout_add_seconds(1, self.update_timer)
def vol_change(self, widget):
self.conn.set_volume(self.i, int(self.scale.get_value()))
def update_timer(self):
try:
self.scale.set_value(self.conn.get_volume(self.i))
self.scale.set_sensitive(True)
except:
self.scale.set_sensitive(False)
self.scale.set_value(0)
GObject.timeout_add_seconds(1, self.update_timer)
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "MARU Volume Control")
self.conn = Connection("/tmp/marumix")
self.set_border_width(5)
box = Gtk.HBox()
for i in range(8):
box.pack_start(Control(self.conn, i), True, True, 10)
self.add(box)
if __name__ == '__main__':
win = Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
<commit_before><commit_msg>Add simple Python GUI to control volume.<commit_after>#!/usr/bin/env python
from gi.repository import Gtk, GObject
import socket, os, sys
class Connection:
def __init__(self, sock):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(sock)
def set_volume(self, stream, vol):
command = "SETPLAYVOL {} {}".format(stream, vol)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
def get_volume(self, stream):
command = "GETPLAYVOL {}".format(stream)
message = "MARU{:4} {}".format(len(command) + 1, command)
self.sock.send(message.encode())
reply = self.sock.recv(8)
length = int(reply.decode().split(" ")[-1])
reply = self.sock.recv(length)
vol = int(reply.decode().split(" ")[-1])
return vol
class Control(Gtk.VBox):
def __init__(self, conn, i):
Gtk.Box.__init__(self)
self.pack_start(Gtk.Label("Stream #{}".format(i)), False, True, 10)
self.scale = Gtk.VScale()
self.scale.set_range(0, 100)
self.scale.set_value(0)
self.scale.set_size_request(-1, 300)
self.scale.set_property("inverted", True)
self.scale.set_sensitive(False)
self.pack_start(self.scale, True, True, 10)
self.i = i
self.conn = conn
self.scale.connect("value-changed", self.vol_change)
GObject.timeout_add_seconds(1, self.update_timer)
def vol_change(self, widget):
self.conn.set_volume(self.i, int(self.scale.get_value()))
def update_timer(self):
try:
self.scale.set_value(self.conn.get_volume(self.i))
self.scale.set_sensitive(True)
except:
self.scale.set_sensitive(False)
self.scale.set_value(0)
GObject.timeout_add_seconds(1, self.update_timer)
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title = "MARU Volume Control")
self.conn = Connection("/tmp/marumix")
self.set_border_width(5)
box = Gtk.HBox()
for i in range(8):
box.pack_start(Control(self.conn, i), True, True, 10)
self.add(box)
if __name__ == '__main__':
win = Window()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
|
95b71b9b832b3ccf9315f430f753cf133d6621a2
|
tests/conftest.py
|
tests/conftest.py
|
"""Configuration module for pytests"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import yield_fixture
from requests_mock import Mocker
from gobble.configuration import config
@yield_fixture(scope='session')
def mock_requests():
if config.MOCK_REQUESTS:
with Mocker() as mock:
yield mock
else:
yield None
|
Add a fixture that yields a requests Mocker() object.
|
Add a fixture that yields a requests Mocker() object.
|
Python
|
mit
|
openspending/gobble
|
Add a fixture that yields a requests Mocker() object.
|
"""Configuration module for pytests"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import yield_fixture
from requests_mock import Mocker
from gobble.configuration import config
@yield_fixture(scope='session')
def mock_requests():
if config.MOCK_REQUESTS:
with Mocker() as mock:
yield mock
else:
yield None
|
<commit_before><commit_msg>Add a fixture that yields a requests Mocker() object.<commit_after>
|
"""Configuration module for pytests"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import yield_fixture
from requests_mock import Mocker
from gobble.configuration import config
@yield_fixture(scope='session')
def mock_requests():
if config.MOCK_REQUESTS:
with Mocker() as mock:
yield mock
else:
yield None
|
Add a fixture that yields a requests Mocker() object."""Configuration module for pytests"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import yield_fixture
from requests_mock import Mocker
from gobble.configuration import config
@yield_fixture(scope='session')
def mock_requests():
if config.MOCK_REQUESTS:
with Mocker() as mock:
yield mock
else:
yield None
|
<commit_before><commit_msg>Add a fixture that yields a requests Mocker() object.<commit_after>"""Configuration module for pytests"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import yield_fixture
from requests_mock import Mocker
from gobble.configuration import config
@yield_fixture(scope='session')
def mock_requests():
if config.MOCK_REQUESTS:
with Mocker() as mock:
yield mock
else:
yield None
|
|
071157e5a752252a7864d5b0ad85fe4c364c1aec
|
solutions/uri/1022/1022.py
|
solutions/uri/1022/1022.py
|
import sys
n = int(sys.stdin.readline())
for _ in range(n):
a, _, b, o, c, _, d = sys.stdin.readline().split()
a, b, c, d = map(int, [a, b, c, d])
if o == "+":
num = (b * d) * a // b + (b * d) * c // d
den = b * d
elif o == "-":
num = (b * d) * a // b - (b * d) * c // d
den = b * d
elif o == "*":
num = a * c
den = b * d
else:
num = a * d
den = b * c
r = abs(den)
if abs(num) < abs(den):
r = abs(num)
num_r = num
den_r = den
j = 2
while j <= r // 2 + 1:
if num_r % j == 0 and den_r % j == 0:
num_r = num_r // j
den_r = den_r // j
else:
j += 1
print(f"{num}/{den} = {num_r}/{den_r}")
|
Solve TDA Rational in python
|
Solve TDA Rational in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve TDA Rational in python
|
import sys
n = int(sys.stdin.readline())
for _ in range(n):
a, _, b, o, c, _, d = sys.stdin.readline().split()
a, b, c, d = map(int, [a, b, c, d])
if o == "+":
num = (b * d) * a // b + (b * d) * c // d
den = b * d
elif o == "-":
num = (b * d) * a // b - (b * d) * c // d
den = b * d
elif o == "*":
num = a * c
den = b * d
else:
num = a * d
den = b * c
r = abs(den)
if abs(num) < abs(den):
r = abs(num)
num_r = num
den_r = den
j = 2
while j <= r // 2 + 1:
if num_r % j == 0 and den_r % j == 0:
num_r = num_r // j
den_r = den_r // j
else:
j += 1
print(f"{num}/{den} = {num_r}/{den_r}")
|
<commit_before><commit_msg>Solve TDA Rational in python<commit_after>
|
import sys
n = int(sys.stdin.readline())
for _ in range(n):
a, _, b, o, c, _, d = sys.stdin.readline().split()
a, b, c, d = map(int, [a, b, c, d])
if o == "+":
num = (b * d) * a // b + (b * d) * c // d
den = b * d
elif o == "-":
num = (b * d) * a // b - (b * d) * c // d
den = b * d
elif o == "*":
num = a * c
den = b * d
else:
num = a * d
den = b * c
r = abs(den)
if abs(num) < abs(den):
r = abs(num)
num_r = num
den_r = den
j = 2
while j <= r // 2 + 1:
if num_r % j == 0 and den_r % j == 0:
num_r = num_r // j
den_r = den_r // j
else:
j += 1
print(f"{num}/{den} = {num_r}/{den_r}")
|
Solve TDA Rational in pythonimport sys
n = int(sys.stdin.readline())
for _ in range(n):
a, _, b, o, c, _, d = sys.stdin.readline().split()
a, b, c, d = map(int, [a, b, c, d])
if o == "+":
num = (b * d) * a // b + (b * d) * c // d
den = b * d
elif o == "-":
num = (b * d) * a // b - (b * d) * c // d
den = b * d
elif o == "*":
num = a * c
den = b * d
else:
num = a * d
den = b * c
r = abs(den)
if abs(num) < abs(den):
r = abs(num)
num_r = num
den_r = den
j = 2
while j <= r // 2 + 1:
if num_r % j == 0 and den_r % j == 0:
num_r = num_r // j
den_r = den_r // j
else:
j += 1
print(f"{num}/{den} = {num_r}/{den_r}")
|
<commit_before><commit_msg>Solve TDA Rational in python<commit_after>import sys
n = int(sys.stdin.readline())
for _ in range(n):
a, _, b, o, c, _, d = sys.stdin.readline().split()
a, b, c, d = map(int, [a, b, c, d])
if o == "+":
num = (b * d) * a // b + (b * d) * c // d
den = b * d
elif o == "-":
num = (b * d) * a // b - (b * d) * c // d
den = b * d
elif o == "*":
num = a * c
den = b * d
else:
num = a * d
den = b * c
r = abs(den)
if abs(num) < abs(den):
r = abs(num)
num_r = num
den_r = den
j = 2
while j <= r // 2 + 1:
if num_r % j == 0 and den_r % j == 0:
num_r = num_r // j
den_r = den_r // j
else:
j += 1
print(f"{num}/{den} = {num_r}/{den_r}")
|
|
4938e7e66d187a375db24832c0bfdba79687756e
|
accelerator/migrations/0003_auto_20180410_1618.py
|
accelerator/migrations/0003_auto_20180410_1618.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-10 20:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0002_add_mc_permissions'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='location',
field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50),
),
]
|
Add Migration For Office Hours Locations
|
Add Migration For Office Hours Locations
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
Add Migration For Office Hours Locations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-10 20:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0002_add_mc_permissions'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='location',
field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50),
),
]
|
<commit_before><commit_msg>Add Migration For Office Hours Locations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-10 20:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0002_add_mc_permissions'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='location',
field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50),
),
]
|
Add Migration For Office Hours Locations# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-10 20:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0002_add_mc_permissions'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='location',
field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50),
),
]
|
<commit_before><commit_msg>Add Migration For Office Hours Locations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-10 20:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0002_add_mc_permissions'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='location',
field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50),
),
]
|
|
5c65c8f371b253df3e861e1d998d0a174eed3f97
|
tests/test_tag.py
|
tests/test_tag.py
|
from io import BytesIO
import pytest
from nbtlib.tag import *
tag_parsing_inputs = [
# Byte tag
(b'\x00', Byte(0)),
(b'\xFF', Byte(-1)),
(b'\x7F', Byte(127)),
(b'\x80', Byte(-128)),
# Short tag
(b'\x00\x00', Short(0)),
(b'\xFF\xFF', Short(-1)),
(b'\x7F\xFF', Short(32767)),
(b'\x80\x00', Short(-32768)),
# Int tag
(b'\x00\x00\x00\x00', Int(0)),
(b'\xFF\xFF\xFF\xFF', Int(-1)),
(b'\x7F\xFF\xFF\xFF', Int(2147483647)),
(b'\x80\x00\x00\x00', Int(-2147483648)),
# Long tag
(b'\x00\x00\x00\x00\x00\x00\x00\x00', Long(0)),
(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(-1)),
(b'\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(9223372036854775807)),
(b'\x80\x00\x00\x00\x00\x00\x00\x00', Long(-9223372036854775808)),
]
@pytest.mark.parametrize('bytes_input, expected_tag', tag_parsing_inputs)
def test_tag_parsing(bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input))
assert parsed_tag == expected_tag
|
Create tests for tag parsing
|
Create tests for tag parsing
|
Python
|
mit
|
vberlier/nbtlib
|
Create tests for tag parsing
|
from io import BytesIO
import pytest
from nbtlib.tag import *
tag_parsing_inputs = [
# Byte tag
(b'\x00', Byte(0)),
(b'\xFF', Byte(-1)),
(b'\x7F', Byte(127)),
(b'\x80', Byte(-128)),
# Short tag
(b'\x00\x00', Short(0)),
(b'\xFF\xFF', Short(-1)),
(b'\x7F\xFF', Short(32767)),
(b'\x80\x00', Short(-32768)),
# Int tag
(b'\x00\x00\x00\x00', Int(0)),
(b'\xFF\xFF\xFF\xFF', Int(-1)),
(b'\x7F\xFF\xFF\xFF', Int(2147483647)),
(b'\x80\x00\x00\x00', Int(-2147483648)),
# Long tag
(b'\x00\x00\x00\x00\x00\x00\x00\x00', Long(0)),
(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(-1)),
(b'\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(9223372036854775807)),
(b'\x80\x00\x00\x00\x00\x00\x00\x00', Long(-9223372036854775808)),
]
@pytest.mark.parametrize('bytes_input, expected_tag', tag_parsing_inputs)
def test_tag_parsing(bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input))
assert parsed_tag == expected_tag
|
<commit_before><commit_msg>Create tests for tag parsing<commit_after>
|
from io import BytesIO
import pytest
from nbtlib.tag import *
tag_parsing_inputs = [
# Byte tag
(b'\x00', Byte(0)),
(b'\xFF', Byte(-1)),
(b'\x7F', Byte(127)),
(b'\x80', Byte(-128)),
# Short tag
(b'\x00\x00', Short(0)),
(b'\xFF\xFF', Short(-1)),
(b'\x7F\xFF', Short(32767)),
(b'\x80\x00', Short(-32768)),
# Int tag
(b'\x00\x00\x00\x00', Int(0)),
(b'\xFF\xFF\xFF\xFF', Int(-1)),
(b'\x7F\xFF\xFF\xFF', Int(2147483647)),
(b'\x80\x00\x00\x00', Int(-2147483648)),
# Long tag
(b'\x00\x00\x00\x00\x00\x00\x00\x00', Long(0)),
(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(-1)),
(b'\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(9223372036854775807)),
(b'\x80\x00\x00\x00\x00\x00\x00\x00', Long(-9223372036854775808)),
]
@pytest.mark.parametrize('bytes_input, expected_tag', tag_parsing_inputs)
def test_tag_parsing(bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input))
assert parsed_tag == expected_tag
|
Create tests for tag parsing
from io import BytesIO
import pytest
from nbtlib.tag import *
tag_parsing_inputs = [
# Byte tag
(b'\x00', Byte(0)),
(b'\xFF', Byte(-1)),
(b'\x7F', Byte(127)),
(b'\x80', Byte(-128)),
# Short tag
(b'\x00\x00', Short(0)),
(b'\xFF\xFF', Short(-1)),
(b'\x7F\xFF', Short(32767)),
(b'\x80\x00', Short(-32768)),
# Int tag
(b'\x00\x00\x00\x00', Int(0)),
(b'\xFF\xFF\xFF\xFF', Int(-1)),
(b'\x7F\xFF\xFF\xFF', Int(2147483647)),
(b'\x80\x00\x00\x00', Int(-2147483648)),
# Long tag
(b'\x00\x00\x00\x00\x00\x00\x00\x00', Long(0)),
(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(-1)),
(b'\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(9223372036854775807)),
(b'\x80\x00\x00\x00\x00\x00\x00\x00', Long(-9223372036854775808)),
]
@pytest.mark.parametrize('bytes_input, expected_tag', tag_parsing_inputs)
def test_tag_parsing(bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input))
assert parsed_tag == expected_tag
|
<commit_before><commit_msg>Create tests for tag parsing<commit_after>
from io import BytesIO
import pytest
from nbtlib.tag import *
tag_parsing_inputs = [
# Byte tag
(b'\x00', Byte(0)),
(b'\xFF', Byte(-1)),
(b'\x7F', Byte(127)),
(b'\x80', Byte(-128)),
# Short tag
(b'\x00\x00', Short(0)),
(b'\xFF\xFF', Short(-1)),
(b'\x7F\xFF', Short(32767)),
(b'\x80\x00', Short(-32768)),
# Int tag
(b'\x00\x00\x00\x00', Int(0)),
(b'\xFF\xFF\xFF\xFF', Int(-1)),
(b'\x7F\xFF\xFF\xFF', Int(2147483647)),
(b'\x80\x00\x00\x00', Int(-2147483648)),
# Long tag
(b'\x00\x00\x00\x00\x00\x00\x00\x00', Long(0)),
(b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(-1)),
(b'\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF', Long(9223372036854775807)),
(b'\x80\x00\x00\x00\x00\x00\x00\x00', Long(-9223372036854775808)),
]
@pytest.mark.parametrize('bytes_input, expected_tag', tag_parsing_inputs)
def test_tag_parsing(bytes_input, expected_tag):
tag_type = type(expected_tag)
parsed_tag = tag_type.parse(BytesIO(bytes_input))
assert parsed_tag == expected_tag
|
|
52ab8b2f492b3c7d36b240f577f51e3120fd1302
|
src/ec2.py
|
src/ec2.py
|
import sys
import json
from subprocess import check_output
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
def create_item(attrs, contents):
item = Element('item')
for k in attrs: item.set(k, attrs[k])
for k in contents:
content = Element(k)
content.text = contents[k]
item.append(content)
return(item)
def search_ec2_instances(query):
output = check_output(['aws', 'ec2', 'describe-instances', '--instance-ids=%s' % query])
response = json.loads(output.decode('utf-8'))
return([i for r in response['Reservations'] for i in r['Instances']])
query = sys.argv[1].strip()
items = Element('items')
for i in search_ec2_instances(query):
for k in ['PublicDnsName', 'PublicIpAddress', 'PrivateIpAddress', 'InstanceId', 'PrivateDnsName']:
if k not in i: continue
v = i[k]
item = create_item(
{ 'uid': v, 'valid': 'YES', 'type': 'default' },
{ 'title': v, 'subtitle': k, 'arg': v }
)
items.append(item)
doc = minidom.parseString(ElementTree.tostring(items, 'utf-8'))
print(doc.toprettyxml())
|
Add a script to search EC2 instances by instance ID
|
Add a script to search EC2 instances by instance ID
|
Python
|
mit
|
saidie/alfred-aws
|
Add a script to search EC2 instances by instance ID
|
import sys
import json
from subprocess import check_output
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
def create_item(attrs, contents):
item = Element('item')
for k in attrs: item.set(k, attrs[k])
for k in contents:
content = Element(k)
content.text = contents[k]
item.append(content)
return(item)
def search_ec2_instances(query):
output = check_output(['aws', 'ec2', 'describe-instances', '--instance-ids=%s' % query])
response = json.loads(output.decode('utf-8'))
return([i for r in response['Reservations'] for i in r['Instances']])
query = sys.argv[1].strip()
items = Element('items')
for i in search_ec2_instances(query):
for k in ['PublicDnsName', 'PublicIpAddress', 'PrivateIpAddress', 'InstanceId', 'PrivateDnsName']:
if k not in i: continue
v = i[k]
item = create_item(
{ 'uid': v, 'valid': 'YES', 'type': 'default' },
{ 'title': v, 'subtitle': k, 'arg': v }
)
items.append(item)
doc = minidom.parseString(ElementTree.tostring(items, 'utf-8'))
print(doc.toprettyxml())
|
<commit_before><commit_msg>Add a script to search EC2 instances by instance ID<commit_after>
|
import sys
import json
from subprocess import check_output
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
def create_item(attrs, contents):
item = Element('item')
for k in attrs: item.set(k, attrs[k])
for k in contents:
content = Element(k)
content.text = contents[k]
item.append(content)
return(item)
def search_ec2_instances(query):
output = check_output(['aws', 'ec2', 'describe-instances', '--instance-ids=%s' % query])
response = json.loads(output.decode('utf-8'))
return([i for r in response['Reservations'] for i in r['Instances']])
query = sys.argv[1].strip()
items = Element('items')
for i in search_ec2_instances(query):
for k in ['PublicDnsName', 'PublicIpAddress', 'PrivateIpAddress', 'InstanceId', 'PrivateDnsName']:
if k not in i: continue
v = i[k]
item = create_item(
{ 'uid': v, 'valid': 'YES', 'type': 'default' },
{ 'title': v, 'subtitle': k, 'arg': v }
)
items.append(item)
doc = minidom.parseString(ElementTree.tostring(items, 'utf-8'))
print(doc.toprettyxml())
|
Add a script to search EC2 instances by instance IDimport sys
import json
from subprocess import check_output
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
def create_item(attrs, contents):
item = Element('item')
for k in attrs: item.set(k, attrs[k])
for k in contents:
content = Element(k)
content.text = contents[k]
item.append(content)
return(item)
def search_ec2_instances(query):
output = check_output(['aws', 'ec2', 'describe-instances', '--instance-ids=%s' % query])
response = json.loads(output.decode('utf-8'))
return([i for r in response['Reservations'] for i in r['Instances']])
query = sys.argv[1].strip()
items = Element('items')
for i in search_ec2_instances(query):
for k in ['PublicDnsName', 'PublicIpAddress', 'PrivateIpAddress', 'InstanceId', 'PrivateDnsName']:
if k not in i: continue
v = i[k]
item = create_item(
{ 'uid': v, 'valid': 'YES', 'type': 'default' },
{ 'title': v, 'subtitle': k, 'arg': v }
)
items.append(item)
doc = minidom.parseString(ElementTree.tostring(items, 'utf-8'))
print(doc.toprettyxml())
|
<commit_before><commit_msg>Add a script to search EC2 instances by instance ID<commit_after>import sys
import json
from subprocess import check_output
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, tostring
from xml.dom import minidom
def create_item(attrs, contents):
item = Element('item')
for k in attrs: item.set(k, attrs[k])
for k in contents:
content = Element(k)
content.text = contents[k]
item.append(content)
return(item)
def search_ec2_instances(query):
output = check_output(['aws', 'ec2', 'describe-instances', '--instance-ids=%s' % query])
response = json.loads(output.decode('utf-8'))
return([i for r in response['Reservations'] for i in r['Instances']])
query = sys.argv[1].strip()
items = Element('items')
for i in search_ec2_instances(query):
for k in ['PublicDnsName', 'PublicIpAddress', 'PrivateIpAddress', 'InstanceId', 'PrivateDnsName']:
if k not in i: continue
v = i[k]
item = create_item(
{ 'uid': v, 'valid': 'YES', 'type': 'default' },
{ 'title': v, 'subtitle': k, 'arg': v }
)
items.append(item)
doc = minidom.parseString(ElementTree.tostring(items, 'utf-8'))
print(doc.toprettyxml())
|
|
086fb81110aeaf2cf16e32e8c9468e7ec06042c4
|
future/builtins/backports/newopen.py
|
future/builtins/backports/newopen.py
|
class open(object):
"""Wrapper providing key part of Python 3 open() interface.
From IPython's py3compat.py module. License: BSD.
"""
def __init__(self, fname, mode="r", encoding="utf-8"):
self.f = orig_open(fname, mode)
self.enc = encoding
def write(self, s):
return self.f.write(s.encode(self.enc))
def read(self, size=-1):
return self.f.read(size).decode(self.enc)
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.f.close()
|
Add partial backport of Python 3's open() interface
|
Add partial backport of Python 3's open() interface
|
Python
|
mit
|
krischer/python-future,QuLogic/python-future,krischer/python-future,PythonCharmers/python-future,QuLogic/python-future,PythonCharmers/python-future,michaelpacer/python-future,michaelpacer/python-future
|
Add partial backport of Python 3's open() interface
|
class open(object):
"""Wrapper providing key part of Python 3 open() interface.
From IPython's py3compat.py module. License: BSD.
"""
def __init__(self, fname, mode="r", encoding="utf-8"):
self.f = orig_open(fname, mode)
self.enc = encoding
def write(self, s):
return self.f.write(s.encode(self.enc))
def read(self, size=-1):
return self.f.read(size).decode(self.enc)
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.f.close()
|
<commit_before><commit_msg>Add partial backport of Python 3's open() interface<commit_after>
|
class open(object):
"""Wrapper providing key part of Python 3 open() interface.
From IPython's py3compat.py module. License: BSD.
"""
def __init__(self, fname, mode="r", encoding="utf-8"):
self.f = orig_open(fname, mode)
self.enc = encoding
def write(self, s):
return self.f.write(s.encode(self.enc))
def read(self, size=-1):
return self.f.read(size).decode(self.enc)
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.f.close()
|
Add partial backport of Python 3's open() interfaceclass open(object):
"""Wrapper providing key part of Python 3 open() interface.
From IPython's py3compat.py module. License: BSD.
"""
def __init__(self, fname, mode="r", encoding="utf-8"):
self.f = orig_open(fname, mode)
self.enc = encoding
def write(self, s):
return self.f.write(s.encode(self.enc))
def read(self, size=-1):
return self.f.read(size).decode(self.enc)
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.f.close()
|
<commit_before><commit_msg>Add partial backport of Python 3's open() interface<commit_after>class open(object):
"""Wrapper providing key part of Python 3 open() interface.
From IPython's py3compat.py module. License: BSD.
"""
def __init__(self, fname, mode="r", encoding="utf-8"):
self.f = orig_open(fname, mode)
self.enc = encoding
def write(self, s):
return self.f.write(s.encode(self.enc))
def read(self, size=-1):
return self.f.read(size).decode(self.enc)
def close(self):
return self.f.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.f.close()
|
|
f419dce0164d00a56a90fb34f19e2a8adecda584
|
examples/raw_parameter_script.py
|
examples/raw_parameter_script.py
|
""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't work, it will autodetect
how this file was run. With pure Python, it will need to initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The same thing is required for the setUp()
and tearDown() methods, which are now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.database_env = "test"
b.log_path = "latest_logs/"
b.timeout_multiplier = None
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
Add an example script to demonstrate a run with pure Python
|
Add an example script to demonstrate a run with pure Python
|
Python
|
mit
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/seleniumspot,mdmintz/seleniumspot,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example script to demonstrate a run with pure Python
|
""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't work, it will autodetect
how this file was run. With pure Python, it will need to initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The same thing is required for the setUp()
and tearDown() methods, which are now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.database_env = "test"
b.log_path = "latest_logs/"
b.timeout_multiplier = None
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
<commit_before><commit_msg>Add an example script to demonstrate a run with pure Python<commit_after>
|
""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't work, it will autodetect
how this file was run. With pure Python, it will need to initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The same thing is required for the setUp()
and tearDown() methods, which are now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.database_env = "test"
b.log_path = "latest_logs/"
b.timeout_multiplier = None
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
Add an example script to demonstrate a run with pure Python""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't work, it will autodetect
how this file was run. With pure Python, it will need to initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The same thing is required for the setUp()
and tearDown() methods, which are now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.database_env = "test"
b.log_path = "latest_logs/"
b.timeout_multiplier = None
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
<commit_before><commit_msg>Add an example script to demonstrate a run with pure Python<commit_after>""" The main purpose of this file is to demonstrate running SeleniumBase
scripts without the use of Pytest by calling the script directly
with Python or from a Python interactive interpreter. Based on
whether relative imports work or don't work, it will autodetect
how this file was run. With pure Python, it will need to initialize
all the variables that would've been automatically initialized
by the Pytest plugin. The same thing is required for the setUp()
and tearDown() methods, which are now called from the script itself.
One big advantage to running tests with Pytest is that most of this
is done for you automatically, with the option to update any of the
parameters through command line parsing. Pytest also provides you
with other plugins, such as ones for generating test reports,
handling multithreading, and parametrized tests. Depending on your
specific needs, you may need to call SeleniumBase commands without
using Pytest, and this example shows you how. """
try:
# Running with Pytest / (Finds test methods to run using autodiscovery)
# Example run command: "pytest raw_parameter_script.py"
from .my_first_test import MyTestClass # (relative imports work: ".~")
except (ImportError, ValueError):
# Running with pure Python OR from a Python interactive interpreter
# Example run command: "python raw_parameter_script.py"
from my_first_test import MyTestClass # (relative imports DON'T work)
b = MyTestClass("test_basic")
b.browser = "chrome"
b.headless = False
b.servername = "localhost"
b.port = 4444
b.data = None
b.environment = "test"
b.database_env = "test"
b.log_path = "latest_logs/"
b.timeout_multiplier = None
b.with_db_reporting = False
b.with_s3_logging = False
b.js_checking_on = False
b.is_pytest = False
b.demo_mode = False
b.demo_sleep = 1
b.message_duration = 2
b.proxy_string = None
b.ad_block_on = False
b.highlights = None
b.check_js = False
b.cap_file = None
b.setUp()
try:
b.test_basic()
finally:
b.tearDown()
del b
|
|
c39d60870e89e66df46f147a4917f00218b4ad01
|
src/shared/game_state_change.py
|
src/shared/game_state_change.py
|
from abc import ABCMeta, abstractmethod
class GameStateChange(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, gameState):
pass
class ResourceChange(GameStateChange):
def __init__(self, playerId, delta):
super(ResourceChange, self).__init__()
self.playerId = playerId
self.delta = delta
def apply(self, gameState):
gameState.resources[self.playerId] -= self.delta
assert gameState.resources[self.playerId] >= 0
|
Add a class for representing changes to GameState.
|
Add a class for representing changes to GameState.
|
Python
|
mit
|
CheeseLord/warts,CheeseLord/warts
|
Add a class for representing changes to GameState.
|
from abc import ABCMeta, abstractmethod
class GameStateChange(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, gameState):
pass
class ResourceChange(GameStateChange):
def __init__(self, playerId, delta):
super(ResourceChange, self).__init__()
self.playerId = playerId
self.delta = delta
def apply(self, gameState):
gameState.resources[self.playerId] -= self.delta
assert gameState.resources[self.playerId] >= 0
|
<commit_before><commit_msg>Add a class for representing changes to GameState.<commit_after>
|
from abc import ABCMeta, abstractmethod
class GameStateChange(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, gameState):
pass
class ResourceChange(GameStateChange):
def __init__(self, playerId, delta):
super(ResourceChange, self).__init__()
self.playerId = playerId
self.delta = delta
def apply(self, gameState):
gameState.resources[self.playerId] -= self.delta
assert gameState.resources[self.playerId] >= 0
|
Add a class for representing changes to GameState.from abc import ABCMeta, abstractmethod
class GameStateChange(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, gameState):
pass
class ResourceChange(GameStateChange):
def __init__(self, playerId, delta):
super(ResourceChange, self).__init__()
self.playerId = playerId
self.delta = delta
def apply(self, gameState):
gameState.resources[self.playerId] -= self.delta
assert gameState.resources[self.playerId] >= 0
|
<commit_before><commit_msg>Add a class for representing changes to GameState.<commit_after>from abc import ABCMeta, abstractmethod
class GameStateChange(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, gameState):
pass
class ResourceChange(GameStateChange):
def __init__(self, playerId, delta):
super(ResourceChange, self).__init__()
self.playerId = playerId
self.delta = delta
def apply(self, gameState):
gameState.resources[self.playerId] -= self.delta
assert gameState.resources[self.playerId] >= 0
|
|
1260f2dfd29b007a3fd1954b869a19e631c3c66c
|
senlin/tests/tempest/api/profiles/test_profile_validate_negative.py
|
senlin/tests/tempest/api/profiles/test_profile_validate_negative.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('d128781c-808f-4dee-b8b6-abe4def40eb1')
def test_profile_validate_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('7c66eaa1-a78c-4b60-9b0f-c6fa91f28778')
def test_profile_validate_no_spec(self):
params = {
'profile': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('d661c452-3752-4196-9649-4b44ac9c55a6')
def test_profile_validate_profile_type_incorrect(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['type'] = 'senlin.profile.bogus'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('c0fe55cf-608c-4e89-bf85-4561805fc867')
def test_profile_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['properties']['bogus'] = 'foo'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
|
Add negative API tests for profile validation
|
Add negative API tests for profile validation
Add negative API tests for profile validation
Change-Id: I3ad3c9d891c857d9c6cfc08dcee2e3762d566115
|
Python
|
apache-2.0
|
stackforge/senlin,openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin
|
Add negative API tests for profile validation
Add negative API tests for profile validation
Change-Id: I3ad3c9d891c857d9c6cfc08dcee2e3762d566115
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('d128781c-808f-4dee-b8b6-abe4def40eb1')
def test_profile_validate_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('7c66eaa1-a78c-4b60-9b0f-c6fa91f28778')
def test_profile_validate_no_spec(self):
params = {
'profile': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('d661c452-3752-4196-9649-4b44ac9c55a6')
def test_profile_validate_profile_type_incorrect(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['type'] = 'senlin.profile.bogus'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('c0fe55cf-608c-4e89-bf85-4561805fc867')
def test_profile_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['properties']['bogus'] = 'foo'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
|
<commit_before><commit_msg>Add negative API tests for profile validation
Add negative API tests for profile validation
Change-Id: I3ad3c9d891c857d9c6cfc08dcee2e3762d566115<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('d128781c-808f-4dee-b8b6-abe4def40eb1')
def test_profile_validate_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('7c66eaa1-a78c-4b60-9b0f-c6fa91f28778')
def test_profile_validate_no_spec(self):
params = {
'profile': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('d661c452-3752-4196-9649-4b44ac9c55a6')
def test_profile_validate_profile_type_incorrect(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['type'] = 'senlin.profile.bogus'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('c0fe55cf-608c-4e89-bf85-4561805fc867')
def test_profile_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['properties']['bogus'] = 'foo'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
|
Add negative API tests for profile validation
Add negative API tests for profile validation
Change-Id: I3ad3c9d891c857d9c6cfc08dcee2e3762d566115# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('d128781c-808f-4dee-b8b6-abe4def40eb1')
def test_profile_validate_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('7c66eaa1-a78c-4b60-9b0f-c6fa91f28778')
def test_profile_validate_no_spec(self):
params = {
'profile': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('d661c452-3752-4196-9649-4b44ac9c55a6')
def test_profile_validate_profile_type_incorrect(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['type'] = 'senlin.profile.bogus'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('c0fe55cf-608c-4e89-bf85-4561805fc867')
def test_profile_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['properties']['bogus'] = 'foo'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
|
<commit_before><commit_msg>Add negative API tests for profile validation
Add negative API tests for profile validation
Change-Id: I3ad3c9d891c857d9c6cfc08dcee2e3762d566115<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('d128781c-808f-4dee-b8b6-abe4def40eb1')
def test_profile_validate_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('7c66eaa1-a78c-4b60-9b0f-c6fa91f28778')
def test_profile_validate_no_spec(self):
params = {
'profile': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('d661c452-3752-4196-9649-4b44ac9c55a6')
def test_profile_validate_profile_type_incorrect(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['type'] = 'senlin.profile.bogus'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('c0fe55cf-608c-4e89-bf85-4561805fc867')
def test_profile_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_nova_server)
spec['properties']['bogus'] = 'foo'
params = {
'profile': {
'name': 'test-profile',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'profiles', params)
|
|
bc00b0732ef1e8e89f37ff7a9a9d089eff5b85b7
|
problem2.py
|
problem2.py
|
"""
An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing
all occurrences of 'T' in t with 'U' in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
"""
if __name__ == '__main__':
with open('data/rosalind_rna.txt', 'r') as f:
sequence = f.read()
print sequence.replace('T', 'U')
|
Add solution to transcribing DNA into RNA
|
Add solution to transcribing DNA into RNA
|
Python
|
mit
|
MichaelAquilina/rosalind-solutions
|
Add solution to transcribing DNA into RNA
|
"""
An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing
all occurrences of 'T' in t with 'U' in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
"""
if __name__ == '__main__':
with open('data/rosalind_rna.txt', 'r') as f:
sequence = f.read()
print sequence.replace('T', 'U')
|
<commit_before><commit_msg>Add solution to transcribing DNA into RNA<commit_after>
|
"""
An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing
all occurrences of 'T' in t with 'U' in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
"""
if __name__ == '__main__':
with open('data/rosalind_rna.txt', 'r') as f:
sequence = f.read()
print sequence.replace('T', 'U')
|
Add solution to transcribing DNA into RNA"""
An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing
all occurrences of 'T' in t with 'U' in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
"""
if __name__ == '__main__':
with open('data/rosalind_rna.txt', 'r') as f:
sequence = f.read()
print sequence.replace('T', 'U')
|
<commit_before><commit_msg>Add solution to transcribing DNA into RNA<commit_after>"""
An RNA string is a string formed from the alphabet containing 'A', 'C', 'G', and 'U'.
Given a DNA string t corresponding to a coding strand, its transcribed RNA string u is formed by replacing
all occurrences of 'T' in t with 'U' in u.
Given: A DNA string t having length at most 1000 nt.
Return: The transcribed RNA string of t.
"""
if __name__ == '__main__':
with open('data/rosalind_rna.txt', 'r') as f:
sequence = f.read()
print sequence.replace('T', 'U')
|
|
d7bc8c5f7d04c72c49f88d9c6c0a962ba5bc539f
|
py/1st_k.py
|
py/1st_k.py
|
"""
== Given a sorted list, find the first occurance of k ==
"""
def find_first(l, k):
""" Assumes that 'l' is a list of integars and is sorted """
# Input checks
if len(l) is 0:
return -1
# Initialize binary search params
result = -1
upper = len(l) - 1
lower = 0
# Search loop
while lower <= upper:
# Calculate middle index
mid = lower + ((upper - lower)/2)
# Check if possible solution
if l[mid] == k:
result = mid
# Binary search
if l[mid] >= k:
upper = mid - 1
else:
lower = mid + 1
return result
def test_find_first():
# Test empty list
assert(find_first([], 0) == -1)
# Test k is not in list
assert(find_first(range(100), -1) == -1)
assert(find_first(range(-100, 100), 100) == -1)
# Test single k at boundaries
assert(find_first(range(1000), 999) == 999)
assert(find_first(range(10, 9001), 10) == 0)
# Test large number of repeast at various positions
test_list = range(100) + ([100] * 1000) + range(101, 100)
assert(find_first(test_list, 100) == 100)
test_list = range(-1000, 0) + ([0] * 997) + range(101, 100)
assert(find_first(test_list, 0) == 1000)
if __name__ == "__main__":
test_find_first()
|
Add python program for find first occurance of k
|
Add python program for find first occurance of k
|
Python
|
mit
|
tdeh/quickies,tdeh/quickies
|
Add python program for find first occurance of k
|
"""
== Given a sorted list, find the first occurance of k ==
"""
def find_first(l, k):
""" Assumes that 'l' is a list of integars and is sorted """
# Input checks
if len(l) is 0:
return -1
# Initialize binary search params
result = -1
upper = len(l) - 1
lower = 0
# Search loop
while lower <= upper:
# Calculate middle index
mid = lower + ((upper - lower)/2)
# Check if possible solution
if l[mid] == k:
result = mid
# Binary search
if l[mid] >= k:
upper = mid - 1
else:
lower = mid + 1
return result
def test_find_first():
# Test empty list
assert(find_first([], 0) == -1)
# Test k is not in list
assert(find_first(range(100), -1) == -1)
assert(find_first(range(-100, 100), 100) == -1)
# Test single k at boundaries
assert(find_first(range(1000), 999) == 999)
assert(find_first(range(10, 9001), 10) == 0)
# Test large number of repeast at various positions
test_list = range(100) + ([100] * 1000) + range(101, 100)
assert(find_first(test_list, 100) == 100)
test_list = range(-1000, 0) + ([0] * 997) + range(101, 100)
assert(find_first(test_list, 0) == 1000)
if __name__ == "__main__":
test_find_first()
|
<commit_before><commit_msg>Add python program for find first occurance of k<commit_after>
|
"""
== Given a sorted list, find the first occurance of k ==
"""
def find_first(l, k):
""" Assumes that 'l' is a list of integars and is sorted """
# Input checks
if len(l) is 0:
return -1
# Initialize binary search params
result = -1
upper = len(l) - 1
lower = 0
# Search loop
while lower <= upper:
# Calculate middle index
mid = lower + ((upper - lower)/2)
# Check if possible solution
if l[mid] == k:
result = mid
# Binary search
if l[mid] >= k:
upper = mid - 1
else:
lower = mid + 1
return result
def test_find_first():
# Test empty list
assert(find_first([], 0) == -1)
# Test k is not in list
assert(find_first(range(100), -1) == -1)
assert(find_first(range(-100, 100), 100) == -1)
# Test single k at boundaries
assert(find_first(range(1000), 999) == 999)
assert(find_first(range(10, 9001), 10) == 0)
# Test large number of repeast at various positions
test_list = range(100) + ([100] * 1000) + range(101, 100)
assert(find_first(test_list, 100) == 100)
test_list = range(-1000, 0) + ([0] * 997) + range(101, 100)
assert(find_first(test_list, 0) == 1000)
if __name__ == "__main__":
test_find_first()
|
Add python program for find first occurance of k"""
== Given a sorted list, find the first occurance of k ==
"""
def find_first(l, k):
""" Assumes that 'l' is a list of integars and is sorted """
# Input checks
if len(l) is 0:
return -1
# Initialize binary search params
result = -1
upper = len(l) - 1
lower = 0
# Search loop
while lower <= upper:
# Calculate middle index
mid = lower + ((upper - lower)/2)
# Check if possible solution
if l[mid] == k:
result = mid
# Binary search
if l[mid] >= k:
upper = mid - 1
else:
lower = mid + 1
return result
def test_find_first():
# Test empty list
assert(find_first([], 0) == -1)
# Test k is not in list
assert(find_first(range(100), -1) == -1)
assert(find_first(range(-100, 100), 100) == -1)
# Test single k at boundaries
assert(find_first(range(1000), 999) == 999)
assert(find_first(range(10, 9001), 10) == 0)
# Test large number of repeast at various positions
test_list = range(100) + ([100] * 1000) + range(101, 100)
assert(find_first(test_list, 100) == 100)
test_list = range(-1000, 0) + ([0] * 997) + range(101, 100)
assert(find_first(test_list, 0) == 1000)
if __name__ == "__main__":
test_find_first()
|
<commit_before><commit_msg>Add python program for find first occurance of k<commit_after>"""
== Given a sorted list, find the first occurance of k ==
"""
def find_first(l, k):
""" Assumes that 'l' is a list of integars and is sorted """
# Input checks
if len(l) is 0:
return -1
# Initialize binary search params
result = -1
upper = len(l) - 1
lower = 0
# Search loop
while lower <= upper:
# Calculate middle index
mid = lower + ((upper - lower)/2)
# Check if possible solution
if l[mid] == k:
result = mid
# Binary search
if l[mid] >= k:
upper = mid - 1
else:
lower = mid + 1
return result
def test_find_first():
# Test empty list
assert(find_first([], 0) == -1)
# Test k is not in list
assert(find_first(range(100), -1) == -1)
assert(find_first(range(-100, 100), 100) == -1)
# Test single k at boundaries
assert(find_first(range(1000), 999) == 999)
assert(find_first(range(10, 9001), 10) == 0)
# Test large number of repeast at various positions
test_list = range(100) + ([100] * 1000) + range(101, 100)
assert(find_first(test_list, 100) == 100)
test_list = range(-1000, 0) + ([0] * 997) + range(101, 100)
assert(find_first(test_list, 0) == 1000)
if __name__ == "__main__":
test_find_first()
|
|
04d2405b560e508a43fcd7b9be8036ea6d9335be
|
museum_site/migrations/0026_auto_20190307_0205.py
|
museum_site/migrations/0026_auto_20190307_0205.py
|
# Generated by Django 2.1.7 on 2019-03-07 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0025_auto_20181209_0608'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='page',
),
migrations.RemoveField(
model_name='article',
name='parent',
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, limit_choices_to={'page': 1}, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='superceded',
field=models.ForeignKey(blank=True, db_column='superceded_id', default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
migrations.AlterField(
model_name='review',
name='file',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
]
|
Remove unused page/parent fields from Articles
|
Remove unused page/parent fields from Articles
|
Python
|
mit
|
DrDos0016/z2,DrDos0016/z2,DrDos0016/z2
|
Remove unused page/parent fields from Articles
|
# Generated by Django 2.1.7 on 2019-03-07 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0025_auto_20181209_0608'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='page',
),
migrations.RemoveField(
model_name='article',
name='parent',
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, limit_choices_to={'page': 1}, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='superceded',
field=models.ForeignKey(blank=True, db_column='superceded_id', default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
migrations.AlterField(
model_name='review',
name='file',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
]
|
<commit_before><commit_msg>Remove unused page/parent fields from Articles<commit_after>
|
# Generated by Django 2.1.7 on 2019-03-07 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0025_auto_20181209_0608'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='page',
),
migrations.RemoveField(
model_name='article',
name='parent',
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, limit_choices_to={'page': 1}, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='superceded',
field=models.ForeignKey(blank=True, db_column='superceded_id', default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
migrations.AlterField(
model_name='review',
name='file',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
]
|
Remove unused page/parent fields from Articles# Generated by Django 2.1.7 on 2019-03-07 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0025_auto_20181209_0608'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='page',
),
migrations.RemoveField(
model_name='article',
name='parent',
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, limit_choices_to={'page': 1}, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='superceded',
field=models.ForeignKey(blank=True, db_column='superceded_id', default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
migrations.AlterField(
model_name='review',
name='file',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
]
|
<commit_before><commit_msg>Remove unused page/parent fields from Articles<commit_after># Generated by Django 2.1.7 on 2019-03-07 02:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0025_auto_20181209_0608'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='page',
),
migrations.RemoveField(
model_name='article',
name='parent',
),
migrations.AlterField(
model_name='file',
name='articles',
field=models.ManyToManyField(blank=True, default=None, limit_choices_to={'page': 1}, to='museum_site.Article'),
),
migrations.AlterField(
model_name='file',
name='superceded',
field=models.ForeignKey(blank=True, db_column='superceded_id', default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
migrations.AlterField(
model_name='review',
name='file',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='museum_site.File'),
),
]
|
|
63fdc6417888b672bed8d50d8cad9e2506d6b0f2
|
libpebble2/protocol/appglance.py
|
libpebble2/protocol/appglance.py
|
from __future__ import absolute_import
__author__ = 'katharine'
"""
This file is special in that it actually contains definitions of
blobdb blob formats rather than pebble protocol messages.
"""
from copy import deepcopy
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
from .timeline import TimelineAttribute
import struct
__all__ = ["AppGlanceSliceIconAndSubtitle", "AppGlance"]
class AppGlanceSliceType(IntEnum):
IconAndSubtitle = 0
class AppGlanceSlice(PebblePacket):
def __init__(self, expiration_time, slice_type, extra_attributes=None):
attributes = deepcopy(extra_attributes)
attributes.append(TimelineAttribute(attribute_id=37, content=struct.pack('<I', expiration_time)))
# Add 4 bytes to account for total_size (2), type (1), and attribute_count (1)
total_size = 4 + sum([len(attribute.serialise()) for attribute in attributes])
super(AppGlanceSlice, self).__init__(total_size=total_size, type=slice_type, attribute_count=len(attributes),
attributes=attributes)
class Meta:
endianness = '<'
total_size = Uint16()
type = Uint8(enum=AppGlanceSliceType)
attribute_count = Uint8()
attributes = FixedList(TimelineAttribute, count=attribute_count)
class AppGlanceSliceIconAndSubtitle(AppGlanceSlice):
def __init__(self, expiration_time, icon_resource_id=None, subtitle=None):
attributes = []
if icon_resource_id:
attributes.append(TimelineAttribute(attribute_id=4, content=struct.pack('<I', icon_resource_id)))
if subtitle:
attributes.append(TimelineAttribute(attribute_id=47, content=subtitle.encode('utf-8')))
super(AppGlanceSliceIconAndSubtitle, self).__init__(expiration_time, AppGlanceSliceType.IconAndSubtitle,
extra_attributes=attributes)
class AppGlance(PebblePacket):
class Meta:
endianness = '<'
version = Uint8()
creation_time = Uint32()
slices = FixedList(AppGlanceSlice)
|
Add BlobDB format definitions for AppGlanceSlice and AppGlance
|
Add BlobDB format definitions for AppGlanceSlice and AppGlance
|
Python
|
mit
|
pebble/libpebble2
|
Add BlobDB format definitions for AppGlanceSlice and AppGlance
|
from __future__ import absolute_import
__author__ = 'katharine'
"""
This file is special in that it actually contains definitions of
blobdb blob formats rather than pebble protocol messages.
"""
from copy import deepcopy
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
from .timeline import TimelineAttribute
import struct
__all__ = ["AppGlanceSliceIconAndSubtitle", "AppGlance"]
class AppGlanceSliceType(IntEnum):
IconAndSubtitle = 0
class AppGlanceSlice(PebblePacket):
def __init__(self, expiration_time, slice_type, extra_attributes=None):
attributes = deepcopy(extra_attributes)
attributes.append(TimelineAttribute(attribute_id=37, content=struct.pack('<I', expiration_time)))
# Add 4 bytes to account for total_size (2), type (1), and attribute_count (1)
total_size = 4 + sum([len(attribute.serialise()) for attribute in attributes])
super(AppGlanceSlice, self).__init__(total_size=total_size, type=slice_type, attribute_count=len(attributes),
attributes=attributes)
class Meta:
endianness = '<'
total_size = Uint16()
type = Uint8(enum=AppGlanceSliceType)
attribute_count = Uint8()
attributes = FixedList(TimelineAttribute, count=attribute_count)
class AppGlanceSliceIconAndSubtitle(AppGlanceSlice):
def __init__(self, expiration_time, icon_resource_id=None, subtitle=None):
attributes = []
if icon_resource_id:
attributes.append(TimelineAttribute(attribute_id=4, content=struct.pack('<I', icon_resource_id)))
if subtitle:
attributes.append(TimelineAttribute(attribute_id=47, content=subtitle.encode('utf-8')))
super(AppGlanceSliceIconAndSubtitle, self).__init__(expiration_time, AppGlanceSliceType.IconAndSubtitle,
extra_attributes=attributes)
class AppGlance(PebblePacket):
class Meta:
endianness = '<'
version = Uint8()
creation_time = Uint32()
slices = FixedList(AppGlanceSlice)
|
<commit_before><commit_msg>Add BlobDB format definitions for AppGlanceSlice and AppGlance<commit_after>
|
from __future__ import absolute_import
__author__ = 'katharine'
"""
This file is special in that it actually contains definitions of
blobdb blob formats rather than pebble protocol messages.
"""
from copy import deepcopy
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
from .timeline import TimelineAttribute
import struct
__all__ = ["AppGlanceSliceIconAndSubtitle", "AppGlance"]
class AppGlanceSliceType(IntEnum):
IconAndSubtitle = 0
class AppGlanceSlice(PebblePacket):
def __init__(self, expiration_time, slice_type, extra_attributes=None):
attributes = deepcopy(extra_attributes)
attributes.append(TimelineAttribute(attribute_id=37, content=struct.pack('<I', expiration_time)))
# Add 4 bytes to account for total_size (2), type (1), and attribute_count (1)
total_size = 4 + sum([len(attribute.serialise()) for attribute in attributes])
super(AppGlanceSlice, self).__init__(total_size=total_size, type=slice_type, attribute_count=len(attributes),
attributes=attributes)
class Meta:
endianness = '<'
total_size = Uint16()
type = Uint8(enum=AppGlanceSliceType)
attribute_count = Uint8()
attributes = FixedList(TimelineAttribute, count=attribute_count)
class AppGlanceSliceIconAndSubtitle(AppGlanceSlice):
def __init__(self, expiration_time, icon_resource_id=None, subtitle=None):
attributes = []
if icon_resource_id:
attributes.append(TimelineAttribute(attribute_id=4, content=struct.pack('<I', icon_resource_id)))
if subtitle:
attributes.append(TimelineAttribute(attribute_id=47, content=subtitle.encode('utf-8')))
super(AppGlanceSliceIconAndSubtitle, self).__init__(expiration_time, AppGlanceSliceType.IconAndSubtitle,
extra_attributes=attributes)
class AppGlance(PebblePacket):
class Meta:
endianness = '<'
version = Uint8()
creation_time = Uint32()
slices = FixedList(AppGlanceSlice)
|
Add BlobDB format definitions for AppGlanceSlice and AppGlancefrom __future__ import absolute_import
__author__ = 'katharine'
"""
This file is special in that it actually contains definitions of
blobdb blob formats rather than pebble protocol messages.
"""
from copy import deepcopy
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
from .timeline import TimelineAttribute
import struct
__all__ = ["AppGlanceSliceIconAndSubtitle", "AppGlance"]
class AppGlanceSliceType(IntEnum):
IconAndSubtitle = 0
class AppGlanceSlice(PebblePacket):
def __init__(self, expiration_time, slice_type, extra_attributes=None):
attributes = deepcopy(extra_attributes)
attributes.append(TimelineAttribute(attribute_id=37, content=struct.pack('<I', expiration_time)))
# Add 4 bytes to account for total_size (2), type (1), and attribute_count (1)
total_size = 4 + sum([len(attribute.serialise()) for attribute in attributes])
super(AppGlanceSlice, self).__init__(total_size=total_size, type=slice_type, attribute_count=len(attributes),
attributes=attributes)
class Meta:
endianness = '<'
total_size = Uint16()
type = Uint8(enum=AppGlanceSliceType)
attribute_count = Uint8()
attributes = FixedList(TimelineAttribute, count=attribute_count)
class AppGlanceSliceIconAndSubtitle(AppGlanceSlice):
def __init__(self, expiration_time, icon_resource_id=None, subtitle=None):
attributes = []
if icon_resource_id:
attributes.append(TimelineAttribute(attribute_id=4, content=struct.pack('<I', icon_resource_id)))
if subtitle:
attributes.append(TimelineAttribute(attribute_id=47, content=subtitle.encode('utf-8')))
super(AppGlanceSliceIconAndSubtitle, self).__init__(expiration_time, AppGlanceSliceType.IconAndSubtitle,
extra_attributes=attributes)
class AppGlance(PebblePacket):
class Meta:
endianness = '<'
version = Uint8()
creation_time = Uint32()
slices = FixedList(AppGlanceSlice)
|
<commit_before><commit_msg>Add BlobDB format definitions for AppGlanceSlice and AppGlance<commit_after>from __future__ import absolute_import
__author__ = 'katharine'
"""
This file is special in that it actually contains definitions of
blobdb blob formats rather than pebble protocol messages.
"""
from copy import deepcopy
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
from .timeline import TimelineAttribute
import struct
__all__ = ["AppGlanceSliceIconAndSubtitle", "AppGlance"]
class AppGlanceSliceType(IntEnum):
IconAndSubtitle = 0
class AppGlanceSlice(PebblePacket):
def __init__(self, expiration_time, slice_type, extra_attributes=None):
attributes = deepcopy(extra_attributes)
attributes.append(TimelineAttribute(attribute_id=37, content=struct.pack('<I', expiration_time)))
# Add 4 bytes to account for total_size (2), type (1), and attribute_count (1)
total_size = 4 + sum([len(attribute.serialise()) for attribute in attributes])
super(AppGlanceSlice, self).__init__(total_size=total_size, type=slice_type, attribute_count=len(attributes),
attributes=attributes)
class Meta:
endianness = '<'
total_size = Uint16()
type = Uint8(enum=AppGlanceSliceType)
attribute_count = Uint8()
attributes = FixedList(TimelineAttribute, count=attribute_count)
class AppGlanceSliceIconAndSubtitle(AppGlanceSlice):
def __init__(self, expiration_time, icon_resource_id=None, subtitle=None):
attributes = []
if icon_resource_id:
attributes.append(TimelineAttribute(attribute_id=4, content=struct.pack('<I', icon_resource_id)))
if subtitle:
attributes.append(TimelineAttribute(attribute_id=47, content=subtitle.encode('utf-8')))
super(AppGlanceSliceIconAndSubtitle, self).__init__(expiration_time, AppGlanceSliceType.IconAndSubtitle,
extra_attributes=attributes)
class AppGlance(PebblePacket):
class Meta:
endianness = '<'
version = Uint8()
creation_time = Uint32()
slices = FixedList(AppGlanceSlice)
|
|
0902e23ba850b4d3a8ba22a93bc90005e37b13b8
|
maxwellbloch/tests/test_field.py
|
maxwellbloch/tests/test_field.py
|
"""
Unit tests for the field module.
Thomas Ogden <t@ogden.eu>
"""
import unittest
from maxwellbloch import field
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
field_00 = field.Field()
self.assertEqual(field_00.label, '')
self.assertEqual(field_00.coupled_levels, [])
self.assertEqual(field_00.detuning, 0.0)
self.assertEqual(field_00.detuning_positive, True)
self.assertEqual(field_00.rabi_freq, 0.0)
self.assertEqual(field_00.rabi_freq_t_func, None)
self.assertEqual(field_00.rabi_freq_t_args, {})
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
Add Field default init test
|
Add Field default init test
|
Python
|
mit
|
tommyogden/maxwellbloch,tommyogden/maxwellbloch
|
Add Field default init test
|
"""
Unit tests for the field module.
Thomas Ogden <t@ogden.eu>
"""
import unittest
from maxwellbloch import field
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
field_00 = field.Field()
self.assertEqual(field_00.label, '')
self.assertEqual(field_00.coupled_levels, [])
self.assertEqual(field_00.detuning, 0.0)
self.assertEqual(field_00.detuning_positive, True)
self.assertEqual(field_00.rabi_freq, 0.0)
self.assertEqual(field_00.rabi_freq_t_func, None)
self.assertEqual(field_00.rabi_freq_t_args, {})
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add Field default init test<commit_after>
|
"""
Unit tests for the field module.
Thomas Ogden <t@ogden.eu>
"""
import unittest
from maxwellbloch import field
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
field_00 = field.Field()
self.assertEqual(field_00.label, '')
self.assertEqual(field_00.coupled_levels, [])
self.assertEqual(field_00.detuning, 0.0)
self.assertEqual(field_00.detuning_positive, True)
self.assertEqual(field_00.rabi_freq, 0.0)
self.assertEqual(field_00.rabi_freq_t_func, None)
self.assertEqual(field_00.rabi_freq_t_args, {})
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
Add Field default init test"""
Unit tests for the field module.
Thomas Ogden <t@ogden.eu>
"""
import unittest
from maxwellbloch import field
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
field_00 = field.Field()
self.assertEqual(field_00.label, '')
self.assertEqual(field_00.coupled_levels, [])
self.assertEqual(field_00.detuning, 0.0)
self.assertEqual(field_00.detuning_positive, True)
self.assertEqual(field_00.rabi_freq, 0.0)
self.assertEqual(field_00.rabi_freq_t_func, None)
self.assertEqual(field_00.rabi_freq_t_args, {})
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add Field default init test<commit_after>"""
Unit tests for the field module.
Thomas Ogden <t@ogden.eu>
"""
import unittest
from maxwellbloch import field
class TestInit(unittest.TestCase):
def test_init_default(self):
""" Test Default Initialise """
field_00 = field.Field()
self.assertEqual(field_00.label, '')
self.assertEqual(field_00.coupled_levels, [])
self.assertEqual(field_00.detuning, 0.0)
self.assertEqual(field_00.detuning_positive, True)
self.assertEqual(field_00.rabi_freq, 0.0)
self.assertEqual(field_00.rabi_freq_t_func, None)
self.assertEqual(field_00.rabi_freq_t_args, {})
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
|
cd57ebb8471094982c4f489b12b397ad3cde0091
|
osf/migrations/0105_merge_20180525_1529.py
|
osf/migrations/0105_merge_20180525_1529.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0104_merge_20180523_1240'),
('osf', '0104_merge_20180524_1621'),
]
operations = [
]
|
Add merge migration for GDPR
|
Add merge migration for GDPR
|
Python
|
apache-2.0
|
brianjgeiger/osf.io,cslzchen/osf.io,felliott/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,mfraezz/osf.io,aaxelb/osf.io,sloria/osf.io,felliott/osf.io,mfraezz/osf.io,pattisdr/osf.io,baylee-d/osf.io,icereval/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,saradbowman/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,pattisdr/osf.io,mattclark/osf.io,mfraezz/osf.io,baylee-d/osf.io,aaxelb/osf.io,baylee-d/osf.io,icereval/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,brianjgeiger/osf.io,sloria/osf.io,felliott/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,icereval/osf.io,mattclark/osf.io,pattisdr/osf.io,adlius/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,adlius/osf.io,sloria/osf.io,HalcyonChimera/osf.io,felliott/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,erinspace/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,adlius/osf.io
|
Add merge migration for GDPR
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0104_merge_20180523_1240'),
('osf', '0104_merge_20180524_1621'),
]
operations = [
]
|
<commit_before><commit_msg>Add merge migration for GDPR<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0104_merge_20180523_1240'),
('osf', '0104_merge_20180524_1621'),
]
operations = [
]
|
Add merge migration for GDPR# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0104_merge_20180523_1240'),
('osf', '0104_merge_20180524_1621'),
]
operations = [
]
|
<commit_before><commit_msg>Add merge migration for GDPR<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 20:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0104_merge_20180523_1240'),
('osf', '0104_merge_20180524_1621'),
]
operations = [
]
|
|
91e4c491dadc4806fa1ca113d8681ee199856243
|
misc/convertAll.py
|
misc/convertAll.py
|
#!/usr/bin/env python3
import os
from subprocess import call;
for d in os.listdir('relannis/'):
print("Checking " + d)
if(os.path.isdir('relannis/' + d)):
print("Converting " + d)
call(["build/annis_runner", "import", 'relannis/' + d, 'data/' + d])
|
Add helper script to convert all corpora from the relannis/ folder to the data/ folder
|
Add helper script to convert all corpora from the relannis/ folder to the data/ folder
|
Python
|
apache-2.0
|
thomaskrause/graphANNIS,thomaskrause/graphANNIS,thomaskrause/graphANNIS,thomaskrause/graphANNIS,thomaskrause/graphANNIS,thomaskrause/graphANNIS,thomaskrause/graphANNIS
|
Add helper script to convert all corpora from the relannis/ folder to the data/ folder
|
#!/usr/bin/env python3
import os
from subprocess import call;
for d in os.listdir('relannis/'):
print("Checking " + d)
if(os.path.isdir('relannis/' + d)):
print("Converting " + d)
call(["build/annis_runner", "import", 'relannis/' + d, 'data/' + d])
|
<commit_before><commit_msg>Add helper script to convert all corpora from the relannis/ folder to the data/ folder<commit_after>
|
#!/usr/bin/env python3
import os
from subprocess import call;
for d in os.listdir('relannis/'):
print("Checking " + d)
if(os.path.isdir('relannis/' + d)):
print("Converting " + d)
call(["build/annis_runner", "import", 'relannis/' + d, 'data/' + d])
|
Add helper script to convert all corpora from the relannis/ folder to the data/ folder#!/usr/bin/env python3
import os
from subprocess import call;
for d in os.listdir('relannis/'):
print("Checking " + d)
if(os.path.isdir('relannis/' + d)):
print("Converting " + d)
call(["build/annis_runner", "import", 'relannis/' + d, 'data/' + d])
|
<commit_before><commit_msg>Add helper script to convert all corpora from the relannis/ folder to the data/ folder<commit_after>#!/usr/bin/env python3
import os
from subprocess import call;
for d in os.listdir('relannis/'):
print("Checking " + d)
if(os.path.isdir('relannis/' + d)):
print("Converting " + d)
call(["build/annis_runner", "import", 'relannis/' + d, 'data/' + d])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.