commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9623a504b1856116c096fbdeedd8555dc6423549
|
candidates/tests/test_validators.py
|
candidates/tests/test_validators.py
|
from django.test import TestCase
from ..forms import BasePersonForm
class TestValidators(TestCase):
def test_twitter_bad_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'http://example.org/blah',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'twitter_username':
[u'The Twitter username must only consist of alphanumeric characters or underscore']
}
)
def test_twitter_fine(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
def test_twitter_full_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'https://twitter.com/madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
|
Add tests for the twitter_username validator
|
Add tests for the twitter_username validator
|
Python
|
agpl-3.0
|
neavouli/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mhl/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,mhl/yournextmp-popit,mysociety/yournextmp-popit,openstate/yournextrepresentative,mysociety/yournextrepresentative,mhl/yournextmp-popit,datamade/yournextmp-popit,openstate/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit
|
Add tests for the twitter_username validator
|
from django.test import TestCase
from ..forms import BasePersonForm
class TestValidators(TestCase):
def test_twitter_bad_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'http://example.org/blah',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'twitter_username':
[u'The Twitter username must only consist of alphanumeric characters or underscore']
}
)
def test_twitter_fine(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
def test_twitter_full_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'https://twitter.com/madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
|
<commit_before><commit_msg>Add tests for the twitter_username validator<commit_after>
|
from django.test import TestCase
from ..forms import BasePersonForm
class TestValidators(TestCase):
def test_twitter_bad_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'http://example.org/blah',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'twitter_username':
[u'The Twitter username must only consist of alphanumeric characters or underscore']
}
)
def test_twitter_fine(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
def test_twitter_full_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'https://twitter.com/madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
|
Add tests for the twitter_username validatorfrom django.test import TestCase
from ..forms import BasePersonForm
class TestValidators(TestCase):
def test_twitter_bad_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'http://example.org/blah',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'twitter_username':
[u'The Twitter username must only consist of alphanumeric characters or underscore']
}
)
def test_twitter_fine(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
def test_twitter_full_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'https://twitter.com/madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
|
<commit_before><commit_msg>Add tests for the twitter_username validator<commit_after>from django.test import TestCase
from ..forms import BasePersonForm
class TestValidators(TestCase):
def test_twitter_bad_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'http://example.org/blah',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'twitter_username':
[u'The Twitter username must only consist of alphanumeric characters or underscore']
}
)
def test_twitter_fine(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
def test_twitter_full_url(self):
form = BasePersonForm({
'name': 'John Doe',
'twitter_username': 'https://twitter.com/madeuptwitteraccount',
})
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data['twitter_username'],
'madeuptwitteraccount'
)
|
|
ccd19cb7676d073c771fedbf644b794ac02dd70c
|
pgallery/migrations/0005_auto_20200412_1111.py
|
pgallery/migrations/0005_auto_20200412_1111.py
|
# Generated by Django 2.2.12 on 2020-04-12 11:11
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pgallery', '0004_auto_20160416_2351'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='exif',
field=django.contrib.postgres.fields.hstore.HStoreField(db_index=True, default=dict, editable=False),
),
]
|
Add missing migration file for exif change.
|
Add missing migration file for exif change.
|
Python
|
mit
|
zsiciarz/django-pgallery,zsiciarz/django-pgallery
|
Add missing migration file for exif change.
|
# Generated by Django 2.2.12 on 2020-04-12 11:11
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pgallery', '0004_auto_20160416_2351'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='exif',
field=django.contrib.postgres.fields.hstore.HStoreField(db_index=True, default=dict, editable=False),
),
]
|
<commit_before><commit_msg>Add missing migration file for exif change.<commit_after>
|
# Generated by Django 2.2.12 on 2020-04-12 11:11
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pgallery', '0004_auto_20160416_2351'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='exif',
field=django.contrib.postgres.fields.hstore.HStoreField(db_index=True, default=dict, editable=False),
),
]
|
Add missing migration file for exif change.# Generated by Django 2.2.12 on 2020-04-12 11:11
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pgallery', '0004_auto_20160416_2351'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='exif',
field=django.contrib.postgres.fields.hstore.HStoreField(db_index=True, default=dict, editable=False),
),
]
|
<commit_before><commit_msg>Add missing migration file for exif change.<commit_after># Generated by Django 2.2.12 on 2020-04-12 11:11
import django.contrib.postgres.fields.hstore
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pgallery', '0004_auto_20160416_2351'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='exif',
field=django.contrib.postgres.fields.hstore.HStoreField(db_index=True, default=dict, editable=False),
),
]
|
|
93dcc1d93c8ecf17b9c07fc88b670ca1bd7e115b
|
01_challenge/test_solution.py
|
01_challenge/test_solution.py
|
import unittest
from solution import body_mass_index, shape_of
class TestBodyMassIndex(unittest.TestCase):
def test_body_mass_index(self):
self.assertEqual(body_mass_index(90, 2), 22.5)
self.assertEqual(body_mass_index(90, 1.88), 25.5)
class TestShapeOf(unittest.TestCase):
def test_shape_of_severe_malnutrition(self):
self.assertEqual(shape_of(13, 1), 'тежко недохранване')
self.assertEqual(shape_of(17, 1.2), 'тежко недохранване')
def test_shape_of_average_malnutrition(self):
self.assertEqual(shape_of(15.1, 1), 'средно недохранване')
self.assertEqual(shape_of(19, 1.1), 'средно недохранване')
def test_shape_of_mild_malnutrition(self):
self.assertEqual(shape_of(16.1, 1), 'леко недохранване')
self.assertEqual(shape_of(18.5, 1), 'леко недохранване')
def test_shape_of_normal_weight(self):
self.assertEqual(shape_of(22.9, 1.1), 'нормално тегло')
self.assertEqual(shape_of(60, 1.77), 'нормално тегло')
def test_shape_of_overweight(self):
self.assertEqual(shape_of(25.1, 1), 'наднормено тегло')
self.assertEqual(shape_of(30, 1), 'наднормено тегло')
def test_shape_of_first_obesity(self):
self.assertEqual(shape_of(32, 1), 'затлъстяване I степен')
self.assertEqual(shape_of(35, 1), 'затлъстяване I степен')
def test_shape_of_second_obesity(self):
self.assertEqual(shape_of(35.1, 1), 'затлъстяване II степен')
self.assertEqual(shape_of(40, 1), 'затлъстяване II степен')
def test_shape_of_third_obesity(self):
self.assertEqual(shape_of(55, 1), 'затлъстяване III степен')
self.assertEqual(shape_of(999, 2), 'затлъстяване III степен')
if __name__ == '__main__':
unittest.main()
|
Add tests for 01 challenge.
|
Add tests for 01 challenge.
|
Python
|
mit
|
pepincho/Python-Course-FMI
|
Add tests for 01 challenge.
|
import unittest
from solution import body_mass_index, shape_of
class TestBodyMassIndex(unittest.TestCase):
def test_body_mass_index(self):
self.assertEqual(body_mass_index(90, 2), 22.5)
self.assertEqual(body_mass_index(90, 1.88), 25.5)
class TestShapeOf(unittest.TestCase):
def test_shape_of_severe_malnutrition(self):
self.assertEqual(shape_of(13, 1), 'тежко недохранване')
self.assertEqual(shape_of(17, 1.2), 'тежко недохранване')
def test_shape_of_average_malnutrition(self):
self.assertEqual(shape_of(15.1, 1), 'средно недохранване')
self.assertEqual(shape_of(19, 1.1), 'средно недохранване')
def test_shape_of_mild_malnutrition(self):
self.assertEqual(shape_of(16.1, 1), 'леко недохранване')
self.assertEqual(shape_of(18.5, 1), 'леко недохранване')
def test_shape_of_normal_weight(self):
self.assertEqual(shape_of(22.9, 1.1), 'нормално тегло')
self.assertEqual(shape_of(60, 1.77), 'нормално тегло')
def test_shape_of_overweight(self):
self.assertEqual(shape_of(25.1, 1), 'наднормено тегло')
self.assertEqual(shape_of(30, 1), 'наднормено тегло')
def test_shape_of_first_obesity(self):
self.assertEqual(shape_of(32, 1), 'затлъстяване I степен')
self.assertEqual(shape_of(35, 1), 'затлъстяване I степен')
def test_shape_of_second_obesity(self):
self.assertEqual(shape_of(35.1, 1), 'затлъстяване II степен')
self.assertEqual(shape_of(40, 1), 'затлъстяване II степен')
def test_shape_of_third_obesity(self):
self.assertEqual(shape_of(55, 1), 'затлъстяване III степен')
self.assertEqual(shape_of(999, 2), 'затлъстяване III степен')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for 01 challenge.<commit_after>
|
import unittest
from solution import body_mass_index, shape_of
class TestBodyMassIndex(unittest.TestCase):
def test_body_mass_index(self):
self.assertEqual(body_mass_index(90, 2), 22.5)
self.assertEqual(body_mass_index(90, 1.88), 25.5)
class TestShapeOf(unittest.TestCase):
def test_shape_of_severe_malnutrition(self):
self.assertEqual(shape_of(13, 1), 'тежко недохранване')
self.assertEqual(shape_of(17, 1.2), 'тежко недохранване')
def test_shape_of_average_malnutrition(self):
self.assertEqual(shape_of(15.1, 1), 'средно недохранване')
self.assertEqual(shape_of(19, 1.1), 'средно недохранване')
def test_shape_of_mild_malnutrition(self):
self.assertEqual(shape_of(16.1, 1), 'леко недохранване')
self.assertEqual(shape_of(18.5, 1), 'леко недохранване')
def test_shape_of_normal_weight(self):
self.assertEqual(shape_of(22.9, 1.1), 'нормално тегло')
self.assertEqual(shape_of(60, 1.77), 'нормално тегло')
def test_shape_of_overweight(self):
self.assertEqual(shape_of(25.1, 1), 'наднормено тегло')
self.assertEqual(shape_of(30, 1), 'наднормено тегло')
def test_shape_of_first_obesity(self):
self.assertEqual(shape_of(32, 1), 'затлъстяване I степен')
self.assertEqual(shape_of(35, 1), 'затлъстяване I степен')
def test_shape_of_second_obesity(self):
self.assertEqual(shape_of(35.1, 1), 'затлъстяване II степен')
self.assertEqual(shape_of(40, 1), 'затлъстяване II степен')
def test_shape_of_third_obesity(self):
self.assertEqual(shape_of(55, 1), 'затлъстяване III степен')
self.assertEqual(shape_of(999, 2), 'затлъстяване III степен')
if __name__ == '__main__':
unittest.main()
|
Add tests for 01 challenge.import unittest
from solution import body_mass_index, shape_of
class TestBodyMassIndex(unittest.TestCase):
def test_body_mass_index(self):
self.assertEqual(body_mass_index(90, 2), 22.5)
self.assertEqual(body_mass_index(90, 1.88), 25.5)
class TestShapeOf(unittest.TestCase):
def test_shape_of_severe_malnutrition(self):
self.assertEqual(shape_of(13, 1), 'тежко недохранване')
self.assertEqual(shape_of(17, 1.2), 'тежко недохранване')
def test_shape_of_average_malnutrition(self):
self.assertEqual(shape_of(15.1, 1), 'средно недохранване')
self.assertEqual(shape_of(19, 1.1), 'средно недохранване')
def test_shape_of_mild_malnutrition(self):
self.assertEqual(shape_of(16.1, 1), 'леко недохранване')
self.assertEqual(shape_of(18.5, 1), 'леко недохранване')
def test_shape_of_normal_weight(self):
self.assertEqual(shape_of(22.9, 1.1), 'нормално тегло')
self.assertEqual(shape_of(60, 1.77), 'нормално тегло')
def test_shape_of_overweight(self):
self.assertEqual(shape_of(25.1, 1), 'наднормено тегло')
self.assertEqual(shape_of(30, 1), 'наднормено тегло')
def test_shape_of_first_obesity(self):
self.assertEqual(shape_of(32, 1), 'затлъстяване I степен')
self.assertEqual(shape_of(35, 1), 'затлъстяване I степен')
def test_shape_of_second_obesity(self):
self.assertEqual(shape_of(35.1, 1), 'затлъстяване II степен')
self.assertEqual(shape_of(40, 1), 'затлъстяване II степен')
def test_shape_of_third_obesity(self):
self.assertEqual(shape_of(55, 1), 'затлъстяване III степен')
self.assertEqual(shape_of(999, 2), 'затлъстяване III степен')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for 01 challenge.<commit_after>import unittest
from solution import body_mass_index, shape_of
class TestBodyMassIndex(unittest.TestCase):
def test_body_mass_index(self):
self.assertEqual(body_mass_index(90, 2), 22.5)
self.assertEqual(body_mass_index(90, 1.88), 25.5)
class TestShapeOf(unittest.TestCase):
def test_shape_of_severe_malnutrition(self):
self.assertEqual(shape_of(13, 1), 'тежко недохранване')
self.assertEqual(shape_of(17, 1.2), 'тежко недохранване')
def test_shape_of_average_malnutrition(self):
self.assertEqual(shape_of(15.1, 1), 'средно недохранване')
self.assertEqual(shape_of(19, 1.1), 'средно недохранване')
def test_shape_of_mild_malnutrition(self):
self.assertEqual(shape_of(16.1, 1), 'леко недохранване')
self.assertEqual(shape_of(18.5, 1), 'леко недохранване')
def test_shape_of_normal_weight(self):
self.assertEqual(shape_of(22.9, 1.1), 'нормално тегло')
self.assertEqual(shape_of(60, 1.77), 'нормално тегло')
def test_shape_of_overweight(self):
self.assertEqual(shape_of(25.1, 1), 'наднормено тегло')
self.assertEqual(shape_of(30, 1), 'наднормено тегло')
def test_shape_of_first_obesity(self):
self.assertEqual(shape_of(32, 1), 'затлъстяване I степен')
self.assertEqual(shape_of(35, 1), 'затлъстяване I степен')
def test_shape_of_second_obesity(self):
self.assertEqual(shape_of(35.1, 1), 'затлъстяване II степен')
self.assertEqual(shape_of(40, 1), 'затлъстяване II степен')
def test_shape_of_third_obesity(self):
self.assertEqual(shape_of(55, 1), 'затлъстяване III степен')
self.assertEqual(shape_of(999, 2), 'затлъстяване III степен')
if __name__ == '__main__':
unittest.main()
|
|
777ac84471f3baf02dbe49ca93dc2f45433a3967
|
CodeFights/removeArrayPart.py
|
CodeFights/removeArrayPart.py
|
#!/usr/local/bin/python
# Code Fights Remove Array Part Problem
def removeArrayPart(inputArray, l, r):
return inputArray[:l] + inputArray[r + 1:]
def main():
tests = [
[[2, 3, 2, 3, 4, 5], 2, 4, [2, 3, 5]],
[[2, 4, 10, 1], 0, 2, [1]],
[[5, 3, 2, 3, 4], 1, 1, [5, 2, 3, 4]]
]
for t in tests:
res = removeArrayPart(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: removeArrayPart({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: removeArrayPart({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights remove array part problem
|
Solve Code Fights remove array part problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights remove array part problem
|
#!/usr/local/bin/python
# Code Fights Remove Array Part Problem
def removeArrayPart(inputArray, l, r):
return inputArray[:l] + inputArray[r + 1:]
def main():
tests = [
[[2, 3, 2, 3, 4, 5], 2, 4, [2, 3, 5]],
[[2, 4, 10, 1], 0, 2, [1]],
[[5, 3, 2, 3, 4], 1, 1, [5, 2, 3, 4]]
]
for t in tests:
res = removeArrayPart(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: removeArrayPart({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: removeArrayPart({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights remove array part problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Remove Array Part Problem
def removeArrayPart(inputArray, l, r):
return inputArray[:l] + inputArray[r + 1:]
def main():
tests = [
[[2, 3, 2, 3, 4, 5], 2, 4, [2, 3, 5]],
[[2, 4, 10, 1], 0, 2, [1]],
[[5, 3, 2, 3, 4], 1, 1, [5, 2, 3, 4]]
]
for t in tests:
res = removeArrayPart(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: removeArrayPart({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: removeArrayPart({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights remove array part problem#!/usr/local/bin/python
# Code Fights Remove Array Part Problem
def removeArrayPart(inputArray, l, r):
return inputArray[:l] + inputArray[r + 1:]
def main():
tests = [
[[2, 3, 2, 3, 4, 5], 2, 4, [2, 3, 5]],
[[2, 4, 10, 1], 0, 2, [1]],
[[5, 3, 2, 3, 4], 1, 1, [5, 2, 3, 4]]
]
for t in tests:
res = removeArrayPart(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: removeArrayPart({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: removeArrayPart({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights remove array part problem<commit_after>#!/usr/local/bin/python
# Code Fights Remove Array Part Problem
def removeArrayPart(inputArray, l, r):
return inputArray[:l] + inputArray[r + 1:]
def main():
tests = [
[[2, 3, 2, 3, 4, 5], 2, 4, [2, 3, 5]],
[[2, 4, 10, 1], 0, 2, [1]],
[[5, 3, 2, 3, 4], 1, 1, [5, 2, 3, 4]]
]
for t in tests:
res = removeArrayPart(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: removeArrayPart({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: removeArrayPart({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
|
845700256b6e33d35d49fce1fca57c0fd95a9947
|
examples/slow_task.py
|
examples/slow_task.py
|
from unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
Add usage example for FSlowTask
|
Add usage example for FSlowTask
|
Python
|
mit
|
20tab/UnrealEnginePython,getnamo/UnrealEnginePython,kitelightning/UnrealEnginePython,20tab/UnrealEnginePython,20tab/UnrealEnginePython,getnamo/UnrealEnginePython,kitelightning/UnrealEnginePython,kitelightning/UnrealEnginePython,20tab/UnrealEnginePython,getnamo/UnrealEnginePython,getnamo/UnrealEnginePython,kitelightning/UnrealEnginePython,20tab/UnrealEnginePython,getnamo/UnrealEnginePython,kitelightning/UnrealEnginePython
|
Add usage example for FSlowTask
|
from unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
<commit_before><commit_msg>Add usage example for FSlowTask<commit_after>
|
from unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
Add usage example for FSlowTaskfrom unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
<commit_before><commit_msg>Add usage example for FSlowTask<commit_after>from unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
|
10118cdd99a0e7b11c266ded5491099b82f5634c
|
src/cloudant/design_document.py
|
src/cloudant/design_document.py
|
#!/usr/bin/env python
"""
_design_document_
Class representing a Cloudant design document
"""
from .document import Document
from .views import View
class DesignDocument(Document):
"""
_DesignDocument_
Specialisation of a document to be a design doc containing
the various views, shows, lists etc.
"""
def __init__(self, cloudant_database, document_id=None):
super(DesignDocument, self).__init__(cloudant_database, document_id)
@property
def views(self):
"""accessor property for views dictionary"""
return self['views']
def add_view(self, view_name, map_func, reduce_func=None):
"""
_add_view_
Add a new view to this design document, given a map function
and optional reduce function.
:param view_name: Name of the view
:param map_func: str or Code object containing js map function
:param reduce_func: str or Code object containing js reduce function
"""
v = View(self, view_name, map_func, reduce_func)
self.views[view_name] = v
self.save()
def fetch(self):
"""
_fetch_
Grab the remote document and populate build the View structure
"""
super(DesignDocument, self).fetch()
for view_name, view_def in self.get('views', {}).iteritems():
self['views'][view_name] = View(
self,
view_name,
view_def.get('map'),
view_def.get('reduce')
)
def iterviews(self):
"""
_iterviews_
Iterate over the view name, view instance
"""
for view_name, view in self.views.iteritems():
yield view_name, view
def list_views(self):
"""
_views_
return a list of available views on this design doc
"""
return self.views.keys()
def get_view(self, view_name):
"""
_get_view_
Get a specific view by name.
"""
return self.views.get(view_name)
def info(self):
"""
retrieve the view info data, returns dictionary
GET databasename/_design/test/_info
"""
raise NotImplemented("info not yet implemented")
def cleanup(self):
"""
POST /some_database/_view_cleanup
"""
raise NotImplemented("cleanup not yet implemented")
def compact(self):
"""
POST /some_database/_compact/designname
"""
raise NotImplemented("compact not yet implemented")
|
Copy DesignDocument class to its own module
|
Copy DesignDocument class to its own module
|
Python
|
apache-2.0
|
cloudant/python-cloudant
|
Copy DesignDocument class to its own module
|
#!/usr/bin/env python
"""
_design_document_
Class representing a Cloudant design document
"""
from .document import Document
from .views import View
class DesignDocument(Document):
"""
_DesignDocument_
Specialisation of a document to be a design doc containing
the various views, shows, lists etc.
"""
def __init__(self, cloudant_database, document_id=None):
super(DesignDocument, self).__init__(cloudant_database, document_id)
@property
def views(self):
"""accessor property for views dictionary"""
return self['views']
def add_view(self, view_name, map_func, reduce_func=None):
"""
_add_view_
Add a new view to this design document, given a map function
and optional reduce function.
:param view_name: Name of the view
:param map_func: str or Code object containing js map function
:param reduce_func: str or Code object containing js reduce function
"""
v = View(self, view_name, map_func, reduce_func)
self.views[view_name] = v
self.save()
def fetch(self):
"""
_fetch_
Grab the remote document and populate build the View structure
"""
super(DesignDocument, self).fetch()
for view_name, view_def in self.get('views', {}).iteritems():
self['views'][view_name] = View(
self,
view_name,
view_def.get('map'),
view_def.get('reduce')
)
def iterviews(self):
"""
_iterviews_
Iterate over the view name, view instance
"""
for view_name, view in self.views.iteritems():
yield view_name, view
def list_views(self):
"""
_views_
return a list of available views on this design doc
"""
return self.views.keys()
def get_view(self, view_name):
"""
_get_view_
Get a specific view by name.
"""
return self.views.get(view_name)
def info(self):
"""
retrieve the view info data, returns dictionary
GET databasename/_design/test/_info
"""
raise NotImplemented("info not yet implemented")
def cleanup(self):
"""
POST /some_database/_view_cleanup
"""
raise NotImplemented("cleanup not yet implemented")
def compact(self):
"""
POST /some_database/_compact/designname
"""
raise NotImplemented("compact not yet implemented")
|
<commit_before><commit_msg>Copy DesignDocument class to its own module<commit_after>
|
#!/usr/bin/env python
"""
_design_document_
Class representing a Cloudant design document
"""
from .document import Document
from .views import View
class DesignDocument(Document):
"""
_DesignDocument_
Specialisation of a document to be a design doc containing
the various views, shows, lists etc.
"""
def __init__(self, cloudant_database, document_id=None):
super(DesignDocument, self).__init__(cloudant_database, document_id)
@property
def views(self):
"""accessor property for views dictionary"""
return self['views']
def add_view(self, view_name, map_func, reduce_func=None):
"""
_add_view_
Add a new view to this design document, given a map function
and optional reduce function.
:param view_name: Name of the view
:param map_func: str or Code object containing js map function
:param reduce_func: str or Code object containing js reduce function
"""
v = View(self, view_name, map_func, reduce_func)
self.views[view_name] = v
self.save()
def fetch(self):
"""
_fetch_
Grab the remote document and populate build the View structure
"""
super(DesignDocument, self).fetch()
for view_name, view_def in self.get('views', {}).iteritems():
self['views'][view_name] = View(
self,
view_name,
view_def.get('map'),
view_def.get('reduce')
)
def iterviews(self):
"""
_iterviews_
Iterate over the view name, view instance
"""
for view_name, view in self.views.iteritems():
yield view_name, view
def list_views(self):
"""
_views_
return a list of available views on this design doc
"""
return self.views.keys()
def get_view(self, view_name):
"""
_get_view_
Get a specific view by name.
"""
return self.views.get(view_name)
def info(self):
"""
retrieve the view info data, returns dictionary
GET databasename/_design/test/_info
"""
raise NotImplemented("info not yet implemented")
def cleanup(self):
"""
POST /some_database/_view_cleanup
"""
raise NotImplemented("cleanup not yet implemented")
def compact(self):
"""
POST /some_database/_compact/designname
"""
raise NotImplemented("compact not yet implemented")
|
Copy DesignDocument class to its own module#!/usr/bin/env python
"""
_design_document_
Class representing a Cloudant design document
"""
from .document import Document
from .views import View
class DesignDocument(Document):
"""
_DesignDocument_
Specialisation of a document to be a design doc containing
the various views, shows, lists etc.
"""
def __init__(self, cloudant_database, document_id=None):
super(DesignDocument, self).__init__(cloudant_database, document_id)
@property
def views(self):
"""accessor property for views dictionary"""
return self['views']
def add_view(self, view_name, map_func, reduce_func=None):
"""
_add_view_
Add a new view to this design document, given a map function
and optional reduce function.
:param view_name: Name of the view
:param map_func: str or Code object containing js map function
:param reduce_func: str or Code object containing js reduce function
"""
v = View(self, view_name, map_func, reduce_func)
self.views[view_name] = v
self.save()
def fetch(self):
"""
_fetch_
Grab the remote document and populate build the View structure
"""
super(DesignDocument, self).fetch()
for view_name, view_def in self.get('views', {}).iteritems():
self['views'][view_name] = View(
self,
view_name,
view_def.get('map'),
view_def.get('reduce')
)
def iterviews(self):
"""
_iterviews_
Iterate over the view name, view instance
"""
for view_name, view in self.views.iteritems():
yield view_name, view
def list_views(self):
"""
_views_
return a list of available views on this design doc
"""
return self.views.keys()
def get_view(self, view_name):
"""
_get_view_
Get a specific view by name.
"""
return self.views.get(view_name)
def info(self):
"""
retrieve the view info data, returns dictionary
GET databasename/_design/test/_info
"""
raise NotImplemented("info not yet implemented")
def cleanup(self):
"""
POST /some_database/_view_cleanup
"""
raise NotImplemented("cleanup not yet implemented")
def compact(self):
"""
POST /some_database/_compact/designname
"""
raise NotImplemented("compact not yet implemented")
|
<commit_before><commit_msg>Copy DesignDocument class to its own module<commit_after>#!/usr/bin/env python
"""
_design_document_
Class representing a Cloudant design document
"""
from .document import Document
from .views import View
class DesignDocument(Document):
"""
_DesignDocument_
Specialisation of a document to be a design doc containing
the various views, shows, lists etc.
"""
def __init__(self, cloudant_database, document_id=None):
super(DesignDocument, self).__init__(cloudant_database, document_id)
@property
def views(self):
"""accessor property for views dictionary"""
return self['views']
def add_view(self, view_name, map_func, reduce_func=None):
"""
_add_view_
Add a new view to this design document, given a map function
and optional reduce function.
:param view_name: Name of the view
:param map_func: str or Code object containing js map function
:param reduce_func: str or Code object containing js reduce function
"""
v = View(self, view_name, map_func, reduce_func)
self.views[view_name] = v
self.save()
def fetch(self):
"""
_fetch_
Grab the remote document and populate build the View structure
"""
super(DesignDocument, self).fetch()
for view_name, view_def in self.get('views', {}).iteritems():
self['views'][view_name] = View(
self,
view_name,
view_def.get('map'),
view_def.get('reduce')
)
def iterviews(self):
"""
_iterviews_
Iterate over the view name, view instance
"""
for view_name, view in self.views.iteritems():
yield view_name, view
def list_views(self):
"""
_views_
return a list of available views on this design doc
"""
return self.views.keys()
def get_view(self, view_name):
"""
_get_view_
Get a specific view by name.
"""
return self.views.get(view_name)
def info(self):
"""
retrieve the view info data, returns dictionary
GET databasename/_design/test/_info
"""
raise NotImplemented("info not yet implemented")
def cleanup(self):
"""
POST /some_database/_view_cleanup
"""
raise NotImplemented("cleanup not yet implemented")
def compact(self):
"""
POST /some_database/_compact/designname
"""
raise NotImplemented("compact not yet implemented")
|
|
08b90a093d900576f1f6f11d288335e37cecda3d
|
umibukela/migrations/0012_surveykoboproject.py
|
umibukela/migrations/0012_surveykoboproject.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0011_cycleresultset_published'),
]
operations = [
migrations.CreateModel(
name='SurveyKoboProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField(unique=True)),
('survey', models.ForeignKey(to='umibukela.Survey')),
],
),
]
|
Add SurveyKoboProject which optionally indicates a form/submission origin
|
Add SurveyKoboProject which optionally indicates a form/submission origin
|
Python
|
mit
|
Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela
|
Add SurveyKoboProject which optionally indicates a form/submission origin
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0011_cycleresultset_published'),
]
operations = [
migrations.CreateModel(
name='SurveyKoboProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField(unique=True)),
('survey', models.ForeignKey(to='umibukela.Survey')),
],
),
]
|
<commit_before><commit_msg>Add SurveyKoboProject which optionally indicates a form/submission origin<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0011_cycleresultset_published'),
]
operations = [
migrations.CreateModel(
name='SurveyKoboProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField(unique=True)),
('survey', models.ForeignKey(to='umibukela.Survey')),
],
),
]
|
Add SurveyKoboProject which optionally indicates a form/submission origin# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0011_cycleresultset_published'),
]
operations = [
migrations.CreateModel(
name='SurveyKoboProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField(unique=True)),
('survey', models.ForeignKey(to='umibukela.Survey')),
],
),
]
|
<commit_before><commit_msg>Add SurveyKoboProject which optionally indicates a form/submission origin<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0011_cycleresultset_published'),
]
operations = [
migrations.CreateModel(
name='SurveyKoboProject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.TextField(unique=True)),
('survey', models.ForeignKey(to='umibukela.Survey')),
],
),
]
|
|
ebee9620201ad9609e8b6d6a5322672d4bd52479
|
tests/help_test.py
|
tests/help_test.py
|
import os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assert_('--version' in output)
self.assert_('--help' in output)
self.assert_('--quiet' in output)
self.assert_('--verbose' in output)
self.assert_('--save-debug-log' in output)
self.assert_('--list-settings' in output)
|
Test that --help returns the options we expect it to
|
Test that --help returns the options we expect it to
|
Python
|
apache-2.0
|
hkariti/mopidy,glogiotatidis/mopidy,tkem/mopidy,tkem/mopidy,ZenithDK/mopidy,mokieyue/mopidy,vrs01/mopidy,tkem/mopidy,jmarsik/mopidy,pacificIT/mopidy,jodal/mopidy,vrs01/mopidy,swak/mopidy,rawdlite/mopidy,ali/mopidy,quartz55/mopidy,bacontext/mopidy,bacontext/mopidy,glogiotatidis/mopidy,priestd09/mopidy,kingosticks/mopidy,ali/mopidy,swak/mopidy,diandiankan/mopidy,jodal/mopidy,pacificIT/mopidy,adamcik/mopidy,liamw9534/mopidy,rawdlite/mopidy,hkariti/mopidy,dbrgn/mopidy,kingosticks/mopidy,mokieyue/mopidy,quartz55/mopidy,jcass77/mopidy,jcass77/mopidy,bencevans/mopidy,vrs01/mopidy,SuperStarPL/mopidy,mopidy/mopidy,bacontext/mopidy,dbrgn/mopidy,liamw9534/mopidy,jmarsik/mopidy,bencevans/mopidy,SuperStarPL/mopidy,vrs01/mopidy,pacificIT/mopidy,diandiankan/mopidy,pacificIT/mopidy,hkariti/mopidy,dbrgn/mopidy,ali/mopidy,abarisain/mopidy,woutervanwijk/mopidy,SuperStarPL/mopidy,woutervanwijk/mopidy,priestd09/mopidy,abarisain/mopidy,bacontext/mopidy,rawdlite/mopidy,jcass77/mopidy,diandiankan/mopidy,swak/mopidy,mokieyue/mopidy,ZenithDK/mopidy,quartz55/mopidy,ZenithDK/mopidy,adamcik/mopidy,mokieyue/mopidy,dbrgn/mopidy,jmarsik/mopidy,glogiotatidis/mopidy,mopidy/mopidy,diandiankan/mopidy,SuperStarPL/mopidy,priestd09/mopidy,tkem/mopidy,ZenithDK/mopidy,hkariti/mopidy,ali/mopidy,jodal/mopidy,jmarsik/mopidy,bencevans/mopidy,quartz55/mopidy,kingosticks/mopidy,mopidy/mopidy,swak/mopidy,bencevans/mopidy,adamcik/mopidy,glogiotatidis/mopidy,rawdlite/mopidy
|
Test that --help returns the options we expect it to
|
import os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assert_('--version' in output)
self.assert_('--help' in output)
self.assert_('--quiet' in output)
self.assert_('--verbose' in output)
self.assert_('--save-debug-log' in output)
self.assert_('--list-settings' in output)
|
<commit_before><commit_msg>Test that --help returns the options we expect it to<commit_after>
|
import os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assert_('--version' in output)
self.assert_('--help' in output)
self.assert_('--quiet' in output)
self.assert_('--verbose' in output)
self.assert_('--save-debug-log' in output)
self.assert_('--list-settings' in output)
|
Test that --help returns the options we expect it toimport os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assert_('--version' in output)
self.assert_('--help' in output)
self.assert_('--quiet' in output)
self.assert_('--verbose' in output)
self.assert_('--save-debug-log' in output)
self.assert_('--list-settings' in output)
|
<commit_before><commit_msg>Test that --help returns the options we expect it to<commit_after>import os
import subprocess
import sys
import unittest
import mopidy
class HelpTest(unittest.TestCase):
def test_help_has_mopidy_options(self):
mopidy_dir = os.path.dirname(mopidy.__file__)
args = [sys.executable, mopidy_dir, '--help']
process = subprocess.Popen(args, stdout=subprocess.PIPE)
output = process.communicate()[0]
self.assert_('--version' in output)
self.assert_('--help' in output)
self.assert_('--quiet' in output)
self.assert_('--verbose' in output)
self.assert_('--save-debug-log' in output)
self.assert_('--list-settings' in output)
|
|
d5f34e856d492fbc670eb3020bf2dcaef298cfdb
|
get_percentage_of_unmergeable_PRs.py
|
get_percentage_of_unmergeable_PRs.py
|
#!/usr/bin/env python3
"""Print the percentage of unmergeable PRs and some associated stats."""
import github_tools
Repo = github_tools.get_repo()
merge_true = 0
merge_false = 0
nr_prs = 0
pulls = Repo.get_pulls('open')
for p in pulls:
nr_prs += 1
print("nr " + str(p.number) + ", mergeable:" + str(p.mergeable))
if p.mergeable:
merge_true += 1
else:
merge_false += 1
# sanity check
assert(nr_prs == (merge_true + merge_false))
print('Open PRs: ' + str(nr_prs) +
'\nMergeable: ' + str(merge_true) +
'\nUnmergeable: ' + str(merge_false) +
'\nPercentage unmergeable: ' +
'%.2f' % (100.0 * merge_false / nr_prs))
|
Add first version of percent unmergeable PRs script.
|
Add first version of percent unmergeable PRs script.
|
Python
|
mit
|
bilderbuchi/OF_repo_utilities
|
Add first version of percent unmergeable PRs script.
|
#!/usr/bin/env python3
"""Print the percentage of unmergeable PRs and some associated stats."""
import github_tools
Repo = github_tools.get_repo()
merge_true = 0
merge_false = 0
nr_prs = 0
pulls = Repo.get_pulls('open')
for p in pulls:
nr_prs += 1
print("nr " + str(p.number) + ", mergeable:" + str(p.mergeable))
if p.mergeable:
merge_true += 1
else:
merge_false += 1
# sanity check
assert(nr_prs == (merge_true + merge_false))
print('Open PRs: ' + str(nr_prs) +
'\nMergeable: ' + str(merge_true) +
'\nUnmergeable: ' + str(merge_false) +
'\nPercentage unmergeable: ' +
'%.2f' % (100.0 * merge_false / nr_prs))
|
<commit_before><commit_msg>Add first version of percent unmergeable PRs script.<commit_after>
|
#!/usr/bin/env python3
"""Print the percentage of unmergeable PRs and some associated stats."""
import github_tools
Repo = github_tools.get_repo()
merge_true = 0
merge_false = 0
nr_prs = 0
pulls = Repo.get_pulls('open')
for p in pulls:
nr_prs += 1
print("nr " + str(p.number) + ", mergeable:" + str(p.mergeable))
if p.mergeable:
merge_true += 1
else:
merge_false += 1
# sanity check
assert(nr_prs == (merge_true + merge_false))
print('Open PRs: ' + str(nr_prs) +
'\nMergeable: ' + str(merge_true) +
'\nUnmergeable: ' + str(merge_false) +
'\nPercentage unmergeable: ' +
'%.2f' % (100.0 * merge_false / nr_prs))
|
Add first version of percent unmergeable PRs script.#!/usr/bin/env python3
"""Print the percentage of unmergeable PRs and some associated stats."""
import github_tools
Repo = github_tools.get_repo()
merge_true = 0
merge_false = 0
nr_prs = 0
pulls = Repo.get_pulls('open')
for p in pulls:
nr_prs += 1
print("nr " + str(p.number) + ", mergeable:" + str(p.mergeable))
if p.mergeable:
merge_true += 1
else:
merge_false += 1
# sanity check
assert(nr_prs == (merge_true + merge_false))
print('Open PRs: ' + str(nr_prs) +
'\nMergeable: ' + str(merge_true) +
'\nUnmergeable: ' + str(merge_false) +
'\nPercentage unmergeable: ' +
'%.2f' % (100.0 * merge_false / nr_prs))
|
<commit_before><commit_msg>Add first version of percent unmergeable PRs script.<commit_after>#!/usr/bin/env python3
"""Print the percentage of unmergeable PRs and some associated stats."""
import github_tools
Repo = github_tools.get_repo()
merge_true = 0
merge_false = 0
nr_prs = 0
pulls = Repo.get_pulls('open')
for p in pulls:
nr_prs += 1
print("nr " + str(p.number) + ", mergeable:" + str(p.mergeable))
if p.mergeable:
merge_true += 1
else:
merge_false += 1
# sanity check
assert(nr_prs == (merge_true + merge_false))
print('Open PRs: ' + str(nr_prs) +
'\nMergeable: ' + str(merge_true) +
'\nUnmergeable: ' + str(merge_false) +
'\nPercentage unmergeable: ' +
'%.2f' % (100.0 * merge_false / nr_prs))
|
|
3b7dd989f8b2366a47810e0c1cb6daf76a3c806c
|
opps/core/tests/test_cache.py
|
opps/core/tests/test_cache.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.http import HttpRequest
from opps.core.cache import cache_page
class DecoratorsTest(TestCase):
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
|
Create test cache page new style
|
Create test cache page new style
|
Python
|
mit
|
opps/opps,williamroot/opps,opps/opps,jeanmask/opps,opps/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,YACOWS/opps,williamroot/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,opps/opps
|
Create test cache page new style
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.http import HttpRequest
from opps.core.cache import cache_page
class DecoratorsTest(TestCase):
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
|
<commit_before><commit_msg>Create test cache page new style<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.http import HttpRequest
from opps.core.cache import cache_page
class DecoratorsTest(TestCase):
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
|
Create test cache page new style#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.http import HttpRequest
from opps.core.cache import cache_page
class DecoratorsTest(TestCase):
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
|
<commit_before><commit_msg>Create test cache page new style<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from django.http import HttpRequest
from opps.core.cache import cache_page
class DecoratorsTest(TestCase):
def test_cache_page_new_style(self):
"""
Test that we can call cache_page the new way
"""
def my_view(request):
return "response"
my_view_cached = cache_page(123)(my_view)
self.assertEqual(my_view_cached(HttpRequest()), "response")
my_view_cached2 = cache_page(123, key_prefix="test")(my_view)
self.assertEqual(my_view_cached2(HttpRequest()), "response")
|
|
85359a24d844c19db9d07c268185c9a0310a181d
|
nova/db/sqlalchemy/migrate_repo/versions/178_add_index_to_compute_node_stats.py
|
nova/db/sqlalchemy/migrate_repo/versions/178_add_index_to_compute_node_stats.py
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
TABLE_NAME = 'compute_node_stats'
IDX_NAME = 'compute_node_stats_node_id_and_deleted_idx'
def upgrade(migrate_engine):
"""Add an index to make the scheduler lookups of compute_nodes and joined
compute_node_stats more efficient.
"""
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.drop(migrate_engine)
|
Add an index to compute_node_stats
|
Add an index to compute_node_stats
This will improve the performance of scheduler lookups of compute nodes
and their associated stats.
bug 1177487
Change-Id: I0e04849543916e874ea0ddfc76c3d70ff71c09d0
|
Python
|
apache-2.0
|
ntt-sic/nova,tianweizhang/nova,raildo/nova,mmnelemane/nova,cyx1231st/nova,affo/nova,double12gzh/nova,alvarolopez/nova,rrader/nova-docker-plugin,CEG-FYP-OpenStack/scheduler,mikalstill/nova,DirectXMan12/nova-hacking,shahar-stratoscale/nova,akash1808/nova_test_latest,OpenAcademy-OpenStack/nova-scheduler,Juniper/nova,TwinkleChawla/nova,tudorvio/nova,sridevikoushik31/nova,bigswitch/nova,JioCloud/nova,vladikr/nova_drafts,barnsnake351/nova,Yusuke1987/openstack_template,eharney/nova,felixma/nova,mahak/nova,sacharya/nova,devendermishrajio/nova_test_latest,dawnpower/nova,whitepages/nova,qwefi/nova,fnordahl/nova,rahulunair/nova,devoid/nova,zhimin711/nova,varunarya10/nova_test_latest,Juniper/nova,bclau/nova,cloudbase/nova-virtualbox,Brocade-OpenSource/OpenStack-DNRM-Nova,luogangyi/bcec-nova,klmitch/nova,alexandrucoman/vbox-nova-driver,kimjaejoong/nova,cyx1231st/nova,mikalstill/nova,CloudServer/nova,rickerc/nova_audit,takeshineshiro/nova,gooddata/openstack-nova,ruslanloman/nova,CCI-MOC/nova,edulramirez/nova,phenoxim/nova,devendermishrajio/nova,tanglei528/nova,tealover/nova,blueboxgroup/nova,OpenAcademy-OpenStack/nova-scheduler,nikesh-mahalka/nova,BeyondTheClouds/nova,spring-week-topos/nova-week,sebrandon1/nova,ruslanloman/nova,imsplitbit/nova,zzicewind/nova,Francis-Liu/animated-broccoli,cloudbase/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,JioCloud/nova_test_latest,alexandrucoman/vbox-nova-driver,mmnelemane/nova,maelnor/nova,gooddata/openstack-nova,projectcalico/calico-nova,Metaswitch/calico-nova,raildo/nova,vmturbo/nova,Stavitsky/nova,plumgrid/plumgrid-nova,hanlind/nova,redhat-openstack/nova,apporc/nova,hanlind/nova,viggates/nova,bgxavier/nova,gooddata/openstack-nova,edulramirez/nova,yosshy/nova,jianghuaw/nova,takeshineshiro/nova,jianghuaw/nova,yosshy/nova,j-carpentier/nova,tangfeixiong/nova,jianghuaw/nova,cernops/nova,Yuriy-Leonov/nova,cloudbau/nova,openstack/nova,adelina-t/nova,joker946/nova,berrange/nova,watonyweng/nova,hanlind/nova,devendermishrajio/nova,alaski/nova,ewindisch/nova,rahulunair/nova,zzicewind/nova,badock/nova,sridevikoushik31/nova,whitepages/nova,Yuriy-Leonov/nova,CCI-MOC/nova,TieWei/nova,plumgrid/plumgrid-nova,MountainWei/nova,luogangyi/bcec-nova,vmturbo/nova,zaina/nova,kimjaejoong/nova,akash1808/nova_test_latest,phenoxim/nova,DirectXMan12/nova-hacking,Juniper/nova,CiscoSystems/nova,klmitch/nova,dims/nova,rickerc/nova_audit,petrutlucian94/nova_dev,Tehsmash/nova,SUSE-Cloud/nova,mandeepdhami/nova,petrutlucian94/nova,SUSE-Cloud/nova,mandeepdhami/nova,cernops/nova,angdraug/nova,mgagne/nova,NeCTAR-RC/nova,dims/nova,LoHChina/nova,cloudbase/nova,ted-gould/nova,felixma/nova,klmitch/nova,spring-week-topos/nova-week,shail2810/nova,redhat-openstack/nova,saleemjaveds/https-github.com-openstack-nova,double12gzh/nova,nikesh-mahalka/nova,tangfeixiong/nova,bclau/nova,bigswitch/nova,sebrandon1/nova,leilihh/nova,noironetworks/nova,citrix-openstack-build/nova,belmiromoreira/nova,Metaswitch/calico-nova,noironetworks/nova,barnsnake351/nova,Tehsmash/nova,jeffrey4l/nova,fnordahl/nova,rrader/nova-docker-plugin,j-carpentier/nova,JioCloud/nova,bgxavier/nova,joker946/nova,Stavitsky/nova,vmturbo/nova,sebrandon1/nova,openstack/nova,CiscoSystems/nova,openstack/nova,iuliat/nova,tealover/nova,tianweizhang/nova,jianghuaw/nova,shootstar/novatest,JianyuWang/nova,sridevikoushik31/nova,yatinkumbhare/openstack-nova,iuliat/nova,devoid/nova,mahak/nova,leilihh/nova,thomasem/nova,petrutlucian94/nova,ted-gould/nova,akash1808/nova,eayunstack/nova,JioCloud/nova_test_latest,BeyondTheClouds/nova,mahak/nova,vmturbo/nova,klmitch/nova,scripnichenko/nova,leilihh/novaha,rajalokan/nova,angdraug/nova,gooddata/openstack-nova,isyippee/nova,mikalstill/nova,ewindisch/nova,watonyweng/nova,eonpatapon/nova,blueboxgroup/nova,silenceli/nova,badock/nova,thomasem/nova,shail2810/nova,citrix-openstack-build/nova,apporc/nova,shahar-stratoscale/nova,cernops/nova,rajalokan/nova,eharney/nova,scripnichenko/nova,jeffrey4l/nova,zhimin711/nova,CloudServer/nova,leilihh/novaha,akash1808/nova,belmiromoreira/nova,rajalokan/nova,MountainWei/nova,TieWei/nova,eonpatapon/nova,Yusuke1987/openstack_template,isyippee/nova,Francis-Liu/animated-broccoli,devendermishrajio/nova_test_latest,viggates/nova,CEG-FYP-OpenStack/scheduler,petrutlucian94/nova_dev,affo/nova,tudorvio/nova,Juniper/nova,sacharya/nova,qwefi/nova,ntt-sic/nova,DirectXMan12/nova-hacking,tanglei528/nova,rahulunair/nova,adelina-t/nova,silenceli/nova,virtualopensystems/nova,shootstar/novatest,projectcalico/calico-nova,varunarya10/nova_test_latest,orbitfp7/nova,alvarolopez/nova,sridevikoushik31/nova,dawnpower/nova,zaina/nova,BeyondTheClouds/nova,rajalokan/nova,berrange/nova,eayunstack/nova,alaski/nova,mgagne/nova,cloudbase/nova,virtualopensystems/nova,saleemjaveds/https-github.com-openstack-nova,vladikr/nova_drafts,maelnor/nova,LoHChina/nova,cloudbase/nova-virtualbox,NeCTAR-RC/nova,TwinkleChawla/nova,orbitfp7/nova,cloudbau/nova,JianyuWang/nova,yatinkumbhare/openstack-nova,imsplitbit/nova
|
Add an index to compute_node_stats
This will improve the performance of scheduler lookups of compute nodes
and their associated stats.
bug 1177487
Change-Id: I0e04849543916e874ea0ddfc76c3d70ff71c09d0
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
TABLE_NAME = 'compute_node_stats'
IDX_NAME = 'compute_node_stats_node_id_and_deleted_idx'
def upgrade(migrate_engine):
"""Add an index to make the scheduler lookups of compute_nodes and joined
compute_node_stats more efficient.
"""
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.drop(migrate_engine)
|
<commit_before><commit_msg>Add an index to compute_node_stats
This will improve the performance of scheduler lookups of compute nodes
and their associated stats.
bug 1177487
Change-Id: I0e04849543916e874ea0ddfc76c3d70ff71c09d0<commit_after>
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
TABLE_NAME = 'compute_node_stats'
IDX_NAME = 'compute_node_stats_node_id_and_deleted_idx'
def upgrade(migrate_engine):
"""Add an index to make the scheduler lookups of compute_nodes and joined
compute_node_stats more efficient.
"""
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.drop(migrate_engine)
|
Add an index to compute_node_stats
This will improve the performance of scheduler lookups of compute nodes
and their associated stats.
bug 1177487
Change-Id: I0e04849543916e874ea0ddfc76c3d70ff71c09d0# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
TABLE_NAME = 'compute_node_stats'
IDX_NAME = 'compute_node_stats_node_id_and_deleted_idx'
def upgrade(migrate_engine):
"""Add an index to make the scheduler lookups of compute_nodes and joined
compute_node_stats more efficient.
"""
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.drop(migrate_engine)
|
<commit_before><commit_msg>Add an index to compute_node_stats
This will improve the performance of scheduler lookups of compute nodes
and their associated stats.
bug 1177487
Change-Id: I0e04849543916e874ea0ddfc76c3d70ff71c09d0<commit_after># Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
TABLE_NAME = 'compute_node_stats'
IDX_NAME = 'compute_node_stats_node_id_and_deleted_idx'
def upgrade(migrate_engine):
"""Add an index to make the scheduler lookups of compute_nodes and joined
compute_node_stats more efficient.
"""
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
cn_stats = Table(TABLE_NAME, meta, autoload=True)
idx = Index(IDX_NAME, cn_stats.c.compute_node_id, cn_stats.c.deleted)
idx.drop(migrate_engine)
|
|
d23e5e4cde838c1aa46b0e085955cdb959e6755a
|
tools/win32build/doall.py
|
tools/win32build/doall.py
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
Add top script to generate binaries from scratch.
|
Add top script to generate binaries from scratch.
|
Python
|
bsd-3-clause
|
dato-code/numpy,pbrod/numpy,madphysicist/numpy,Srisai85/numpy,MaPePeR/numpy,MaPePeR/numpy,dimasad/numpy,rhythmsosad/numpy,astrofrog/numpy,andsor/numpy,seberg/numpy,Anwesh43/numpy,mathdd/numpy,behzadnouri/numpy,matthew-brett/numpy,BMJHayward/numpy,Eric89GXL/numpy,jorisvandenbossche/numpy,ekalosak/numpy,naritta/numpy,ESSS/numpy,ViralLeadership/numpy,immerrr/numpy,WillieMaddox/numpy,musically-ut/numpy,jschueller/numpy,Linkid/numpy,dimasad/numpy,Eric89GXL/numpy,jankoslavic/numpy,stuarteberg/numpy,ogrisel/numpy,ewmoore/numpy,endolith/numpy,MichaelAquilina/numpy,stefanv/numpy,numpy/numpy,Anwesh43/numpy,kiwifb/numpy,shoyer/numpy,dwf/numpy,numpy/numpy,madphysicist/numpy,WarrenWeckesser/numpy,bringingheavendown/numpy,pelson/numpy,WarrenWeckesser/numpy,felipebetancur/numpy,NextThought/pypy-numpy,Dapid/numpy,andsor/numpy,embray/numpy,mingwpy/numpy,kiwifb/numpy,immerrr/numpy,ChanderG/numpy,mindw/numpy,githubmlai/numpy,skwbc/numpy,ChristopherHogan/numpy,ogrisel/numpy,bringingheavendown/numpy,tdsmith/numpy,hainm/numpy,groutr/numpy,moreati/numpy,ContinuumIO/numpy,pyparallel/numpy,BabeNovelty/numpy,kirillzhuravlev/numpy,ahaldane/numpy,gmcastil/numpy,mindw/numpy,joferkington/numpy,empeeu/numpy,b-carter/numpy,pdebuyl/numpy,Srisai85/numpy,cjermain/numpy,ddasilva/numpy,leifdenby/numpy,dato-code/numpy,larsmans/numpy,Yusa95/numpy,brandon-rhodes/numpy,mhvk/numpy,hainm/numpy,CMartelLML/numpy,Srisai85/numpy,brandon-rhodes/numpy,jakirkham/numpy,ViralLeadership/numpy,dwf/numpy,immerrr/numpy,embray/numpy,seberg/numpy,ChanderG/numpy,endolith/numpy,dwillmer/numpy,AustereCuriosity/numpy,madphysicist/numpy,BMJHayward/numpy,jschueller/numpy,ahaldane/numpy,ddasilva/numpy,astrofrog/numpy,WillieMaddox/numpy,nguyentu1602/numpy,kirillzhuravlev/numpy,dwf/numpy,felipebetancur/numpy,rherault-insa/numpy,andsor/numpy,kiwifb/numpy,pizzathief/numpy,charris/numpy,njase/numpy,tynn/numpy,njase/numpy,rajathkumarmp/numpy,sinhrks/numpy,pizzathief/numpy,numpy/numpy-refactor,shoyer/numpy,sonnyhu/numpy,AustereCuriosity/numpy,bmorris3/numpy,sinhrks/numpy,sinhrks/numpy,ChanderG/numpy,astrofrog/numpy,rgommers/numpy,numpy/numpy,rudimeier/numpy,astrofrog/numpy,CMartelLML/numpy,MichaelAquilina/numpy,Anwesh43/numpy,charris/numpy,rmcgibbo/numpy,GrimDerp/numpy,pdebuyl/numpy,sigma-random/numpy,skymanaditya1/numpy,ddasilva/numpy,shoyer/numpy,empeeu/numpy,SiccarPoint/numpy,mathdd/numpy,sonnyhu/numpy,dato-code/numpy,rajathkumarmp/numpy,yiakwy/numpy,matthew-brett/numpy,skymanaditya1/numpy,bringingheavendown/numpy,rherault-insa/numpy,stuarteberg/numpy,SunghanKim/numpy,sonnyhu/numpy,tacaswell/numpy,sigma-random/numpy,SunghanKim/numpy,nguyentu1602/numpy,simongibbons/numpy,rhythmsosad/numpy,Yusa95/numpy,KaelChen/numpy,AustereCuriosity/numpy,mathdd/numpy,skwbc/numpy,ChristopherHogan/numpy,pdebuyl/numpy,pelson/numpy,ajdawson/numpy,trankmichael/numpy,pizzathief/numpy,grlee77/numpy,jschueller/numpy,brandon-rhodes/numpy,utke1/numpy,ahaldane/numpy,gmcastil/numpy,ContinuumIO/numpy,dch312/numpy,SunghanKim/numpy,bmorris3/numpy,BabeNovelty/numpy,anntzer/numpy,ssanderson/numpy,joferkington/numpy,pdebuyl/numpy,drasmuss/numpy,madphysicist/numpy,bertrand-l/numpy,bertrand-l/numpy,NextThought/pypy-numpy,yiakwy/numpy,kirillzhuravlev/numpy,GaZ3ll3/numpy,MSeifert04/numpy,anntzer/numpy,yiakwy/numpy,mathdd/numpy,numpy/numpy-refactor,dch312/numpy,Dapid/numpy,NextThought/pypy-numpy,jorisvandenbossche/numpy,nbeaver/numpy,mhvk/numpy,GrimDerp/numpy,ajdawson/numpy,rmcgibbo/numpy,numpy/numpy-refactor,utke1/numpy,cowlicks/numpy,maniteja123/numpy,embray/numpy,rherault-insa/numpy,solarjoe/numpy,immerrr/numpy,pelson/numpy,gfyoung/numpy,tacaswell/numpy,WarrenWeckesser/numpy,jakirkham/numpy,simongibbons/numpy,cowlicks/numpy,trankmichael/numpy,nguyentu1602/numpy,stefanv/numpy,ogrisel/numpy,leifdenby/numpy,GrimDerp/numpy,stefanv/numpy,Eric89GXL/numpy,ogrisel/numpy,MaPePeR/numpy,ewmoore/numpy,mortada/numpy,dch312/numpy,mingwpy/numpy,SiccarPoint/numpy,dwillmer/numpy,dimasad/numpy,abalkin/numpy,hainm/numpy,mortada/numpy,nbeaver/numpy,SunghanKim/numpy,moreati/numpy,MaPePeR/numpy,GrimDerp/numpy,rmcgibbo/numpy,matthew-brett/numpy,rgommers/numpy,GaZ3ll3/numpy,numpy/numpy,cowlicks/numpy,MSeifert04/numpy,mingwpy/numpy,jankoslavic/numpy,trankmichael/numpy,Yusa95/numpy,chatcannon/numpy,pbrod/numpy,dimasad/numpy,grlee77/numpy,rgommers/numpy,nguyentu1602/numpy,Linkid/numpy,Linkid/numpy,chiffa/numpy,drasmuss/numpy,jonathanunderwood/numpy,astrofrog/numpy,MichaelAquilina/numpy,grlee77/numpy,jakirkham/numpy,ekalosak/numpy,sigma-random/numpy,cjermain/numpy,endolith/numpy,tacaswell/numpy,MSeifert04/numpy,skymanaditya1/numpy,endolith/numpy,CMartelLML/numpy,MSeifert04/numpy,githubmlai/numpy,ajdawson/numpy,ajdawson/numpy,has2k1/numpy,groutr/numpy,mortada/numpy,bmorris3/numpy,BabeNovelty/numpy,pizzathief/numpy,CMartelLML/numpy,empeeu/numpy,skymanaditya1/numpy,Linkid/numpy,seberg/numpy,WarrenWeckesser/numpy,tdsmith/numpy,argriffing/numpy,has2k1/numpy,matthew-brett/numpy,dwf/numpy,gmcastil/numpy,MichaelAquilina/numpy,ContinuumIO/numpy,rhythmsosad/numpy,seberg/numpy,embray/numpy,ESSS/numpy,cjermain/numpy,larsmans/numpy,stuarteberg/numpy,bertrand-l/numpy,chiffa/numpy,NextThought/pypy-numpy,BMJHayward/numpy,trankmichael/numpy,GaZ3ll3/numpy,githubmlai/numpy,mhvk/numpy,mindw/numpy,felipebetancur/numpy,argriffing/numpy,musically-ut/numpy,pyparallel/numpy,ogrisel/numpy,SiccarPoint/numpy,ewmoore/numpy,jorisvandenbossche/numpy,jonathanunderwood/numpy,sonnyhu/numpy,solarjoe/numpy,argriffing/numpy,naritta/numpy,mhvk/numpy,numpy/numpy-refactor,ekalosak/numpy,BMJHayward/numpy,KaelChen/numpy,pbrod/numpy,Dapid/numpy,chatcannon/numpy,musically-ut/numpy,madphysicist/numpy,abalkin/numpy,chatcannon/numpy,githubmlai/numpy,mhvk/numpy,bmorris3/numpy,Anwesh43/numpy,ahaldane/numpy,dwf/numpy,matthew-brett/numpy,has2k1/numpy,mingwpy/numpy,sigma-random/numpy,maniteja123/numpy,joferkington/numpy,mwiebe/numpy,rudimeier/numpy,gfyoung/numpy,jorisvandenbossche/numpy,stefanv/numpy,dato-code/numpy,rudimeier/numpy,njase/numpy,joferkington/numpy,tynn/numpy,Srisai85/numpy,musically-ut/numpy,stefanv/numpy,charris/numpy,hainm/numpy,pizzathief/numpy,tynn/numpy,stuarteberg/numpy,has2k1/numpy,pbrod/numpy,ESSS/numpy,mattip/numpy,dwillmer/numpy,shoyer/numpy,ViralLeadership/numpy,yiakwy/numpy,jakirkham/numpy,Yusa95/numpy,jakirkham/numpy,grlee77/numpy,chiffa/numpy,grlee77/numpy,rhythmsosad/numpy,ewmoore/numpy,dch312/numpy,naritta/numpy,pyparallel/numpy,anntzer/numpy,maniteja123/numpy,mwiebe/numpy,GaZ3ll3/numpy,mindw/numpy,jankoslavic/numpy,leifdenby/numpy,charris/numpy,ChristopherHogan/numpy,tdsmith/numpy,ChristopherHogan/numpy,anntzer/numpy,rmcgibbo/numpy,gfyoung/numpy,andsor/numpy,abalkin/numpy,naritta/numpy,KaelChen/numpy,pelson/numpy,MSeifert04/numpy,ewmoore/numpy,ssanderson/numpy,ChanderG/numpy,mattip/numpy,b-carter/numpy,brandon-rhodes/numpy,WarrenWeckesser/numpy,ssanderson/numpy,dwillmer/numpy,embray/numpy,b-carter/numpy,empeeu/numpy,behzadnouri/numpy,BabeNovelty/numpy,cowlicks/numpy,drasmuss/numpy,WillieMaddox/numpy,jankoslavic/numpy,tdsmith/numpy,mattip/numpy,mortada/numpy,simongibbons/numpy,mattip/numpy,cjermain/numpy,numpy/numpy-refactor,mwiebe/numpy,nbeaver/numpy,shoyer/numpy,pelson/numpy,kirillzhuravlev/numpy,SiccarPoint/numpy,simongibbons/numpy,rajathkumarmp/numpy,skwbc/numpy,jonathanunderwood/numpy,solarjoe/numpy,sinhrks/numpy,groutr/numpy,rudimeier/numpy,behzadnouri/numpy,jorisvandenbossche/numpy,moreati/numpy,utke1/numpy,jschueller/numpy,ahaldane/numpy,larsmans/numpy,larsmans/numpy,rgommers/numpy,Eric89GXL/numpy,felipebetancur/numpy,pbrod/numpy,KaelChen/numpy,simongibbons/numpy,ekalosak/numpy,rajathkumarmp/numpy
|
Add top script to generate binaries from scratch.
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
<commit_before><commit_msg>Add top script to generate binaries from scratch.<commit_after>
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
Add top script to generate binaries from scratch.import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
<commit_before><commit_msg>Add top script to generate binaries from scratch.<commit_after>import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
|
58c64960b1ea646138b0a423cce14bbd86c18391
|
choose_rand_packages.py
|
choose_rand_packages.py
|
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages you want to " +
"choose from")
parser.add_argument("output_file")
parser.add_argument("-n", type=int)
args = parser.parse_args()
packages = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n > len(packages):
args.n = len(packages)
output_file = open(args.output_file, 'w')
rand_packages = np.random.choice(packages, args.n, replace=False)
output_file.write('\n'.join(rand_packages))
output_file.close()
|
Add script to randomly choose n packages
|
Add script to randomly choose n packages
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
Python
|
bsd-3-clause
|
ContinuumIO/pypi-conda-builds
|
Add script to randomly choose n packages
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages you want to " +
"choose from")
parser.add_argument("output_file")
parser.add_argument("-n", type=int)
args = parser.parse_args()
packages = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n > len(packages):
args.n = len(packages)
output_file = open(args.output_file, 'w')
rand_packages = np.random.choice(packages, args.n, replace=False)
output_file.write('\n'.join(rand_packages))
output_file.close()
|
<commit_before><commit_msg>Add script to randomly choose n packages
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>
|
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages you want to " +
"choose from")
parser.add_argument("output_file")
parser.add_argument("-n", type=int)
args = parser.parse_args()
packages = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n > len(packages):
args.n = len(packages)
output_file = open(args.output_file, 'w')
rand_packages = np.random.choice(packages, args.n, replace=False)
output_file.write('\n'.join(rand_packages))
output_file.close()
|
Add script to randomly choose n packages
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages you want to " +
"choose from")
parser.add_argument("output_file")
parser.add_argument("-n", type=int)
args = parser.parse_args()
packages = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n > len(packages):
args.n = len(packages)
output_file = open(args.output_file, 'w')
rand_packages = np.random.choice(packages, args.n, replace=False)
output_file.write('\n'.join(rand_packages))
output_file.close()
|
<commit_before><commit_msg>Add script to randomly choose n packages
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages you want to " +
"choose from")
parser.add_argument("output_file")
parser.add_argument("-n", type=int)
args = parser.parse_args()
packages = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n > len(packages):
args.n = len(packages)
output_file = open(args.output_file, 'w')
rand_packages = np.random.choice(packages, args.n, replace=False)
output_file.write('\n'.join(rand_packages))
output_file.close()
|
|
7c3cf6ca93e4d234b9766a5c1eafbf0fe4d1104c
|
liblinesdk/api/permissions.py
|
liblinesdk/api/permissions.py
|
# coding: utf-8
import requests
def get(access_token):
h={'Authorization': 'Bearer ' + access_token}
r=requests.get('https://api.line.me/v1/permissions', headers=h)
print 'status code: ', r.status_code
print 'headers: ', r.headers
print 'content: ', r.content
|
Add permission list checking feature
|
feat: Add permission list checking feature
|
Python
|
mit
|
mrexmelle/liblinesdk-py
|
feat: Add permission list checking feature
|
# coding: utf-8
import requests
def get(access_token):
h={'Authorization': 'Bearer ' + access_token}
r=requests.get('https://api.line.me/v1/permissions', headers=h)
print 'status code: ', r.status_code
print 'headers: ', r.headers
print 'content: ', r.content
|
<commit_before><commit_msg>feat: Add permission list checking feature<commit_after>
|
# coding: utf-8
import requests
def get(access_token):
h={'Authorization': 'Bearer ' + access_token}
r=requests.get('https://api.line.me/v1/permissions', headers=h)
print 'status code: ', r.status_code
print 'headers: ', r.headers
print 'content: ', r.content
|
feat: Add permission list checking feature# coding: utf-8
import requests
def get(access_token):
h={'Authorization': 'Bearer ' + access_token}
r=requests.get('https://api.line.me/v1/permissions', headers=h)
print 'status code: ', r.status_code
print 'headers: ', r.headers
print 'content: ', r.content
|
<commit_before><commit_msg>feat: Add permission list checking feature<commit_after># coding: utf-8
import requests
def get(access_token):
h={'Authorization': 'Bearer ' + access_token}
r=requests.get('https://api.line.me/v1/permissions', headers=h)
print 'status code: ', r.status_code
print 'headers: ', r.headers
print 'content: ', r.content
|
|
532b4f9ff9b32ba0b427f015b9a18ebce0b1e91b
|
cms/plugins/snippet/models.py
|
cms/plugins/snippet/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=256, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=255, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
Use max_length=255 instead of 256 to be nice to MySQL indexing
|
Use max_length=255 instead of 256 to be nice to MySQL indexing
Signed-off-by: Patrick Lauber <13f2d5de65599746f4871ec052d451e6ee4f4f27@divio.ch>
|
Python
|
bsd-3-clause
|
vad/django-cms,SachaMPS/django-cms,chkir/django-cms,philippze/django-cms,vxsx/django-cms,intip/django-cms,donce/django-cms,benzkji/django-cms,AlexProfi/django-cms,rsalmaso/django-cms,foobacca/django-cms,memnonila/django-cms,robmagee/django-cms,evildmp/django-cms,ScholzVolkmer/django-cms,donce/django-cms,saintbird/django-cms,VillageAlliance/django-cms,pbs/django-cms,divio/django-cms,jrief/django-cms,qnub/django-cms,astagi/django-cms,czpython/django-cms,vad/django-cms,philippze/django-cms,stefanfoulis/django-cms,kk9599/django-cms,SachaMPS/django-cms,webu/django-cms,wuzhihui1123/django-cms,sephii/django-cms,webu/django-cms,wyg3958/django-cms,MagicSolutions/django-cms,memnonila/django-cms,rsalmaso/django-cms,petecummings/django-cms,jeffreylu9/django-cms,ojii/django-cms,intgr/django-cms,Jaccorot/django-cms,stefanfoulis/django-cms,isotoma/django-cms,stefanw/django-cms,rryan/django-cms,isotoma/django-cms,SmithsonianEnterprises/django-cms,jrief/django-cms,stefanw/django-cms,rscnt/django-cms,VillageAlliance/django-cms,adaptivelogic/django-cms,liuyisiyisi/django-cms,jproffitt/django-cms,mkoistinen/django-cms,ScholzVolkmer/django-cms,evildmp/django-cms,datakortet/django-cms,SinnerSchraderMobileMirrors/django-cms,stefanw/django-cms,donce/django-cms,chmberl/django-cms,Jaccorot/django-cms,benzkji/django-cms,nimbis/django-cms,SofiaReis/django-cms,josjevv/django-cms,memnonila/django-cms,stefanfoulis/django-cms,youprofit/django-cms,nimbis/django-cms,wyg3958/django-cms,yakky/django-cms,jalaziz/django-cms-grappelli-old,SachaMPS/django-cms,Vegasvikk/django-cms,Livefyre/django-cms,pixbuffer/django-cms,isotoma/django-cms,mkoistinen/django-cms,czpython/django-cms,yakky/django-cms,divio/django-cms,qnub/django-cms,driesdesmet/django-cms,adaptivelogic/django-cms,foobacca/django-cms,kk9599/django-cms,evildmp/django-cms,iddqd1/django-cms,iddqd1/django-cms,leture/django-cms,chmberl/django-cms,intgr/django-cms,evildmp/django-cms,irudayarajisawa/django-cms,VillageAlliance/django-cms,timgraham/django-cms,owers19856/django-cms,benzkji/django-cms,ojii/django-cms,Livefyre/django-cms,webu/django-cms,andyzsf/django-cms,datakortet/django-cms,DylannCordel/django-cms,chrisglass/django-cms,intip/django-cms,mkoistinen/django-cms,pancentric/django-cms,intgr/django-cms,SofiaReis/django-cms,benzkji/django-cms,petecummings/django-cms,selecsosi/django-cms,bittner/django-cms,leture/django-cms,frnhr/django-cms,frnhr/django-cms,SofiaReis/django-cms,stefanw/django-cms,iddqd1/django-cms,owers19856/django-cms,FinalAngel/django-cms,owers19856/django-cms,pancentric/django-cms,foobacca/django-cms,MagicSolutions/django-cms,vxsx/django-cms,leture/django-cms,foobacca/django-cms,netzkolchose/django-cms,yakky/django-cms,rscnt/django-cms,FinalAngel/django-cms,jproffitt/django-cms,sephii/django-cms,pbs/django-cms,kk9599/django-cms,360youlun/django-cms,emiquelito/django-cms-2.0,nostalgiaz/django-cms,jrief/django-cms,wuzhihui1123/django-cms,dhorelik/django-cms,cyberintruder/django-cms,frnhr/django-cms,chkir/django-cms,sephii/django-cms,vstoykov/django-cms,intgr/django-cms,pixbuffer/django-cms,philippze/django-cms,andyzsf/django-cms,andyzsf/django-cms,vstoykov/django-cms,rscnt/django-cms,rsalmaso/django-cms,selecsosi/django-cms,jsma/django-cms,dhorelik/django-cms,ScholzVolkmer/django-cms,jproffitt/django-cms,360youlun/django-cms,jsma/django-cms,astagi/django-cms,emiquelito/django-cms-2.0,saintbird/django-cms,jsma/django-cms,youprofit/django-cms,FinalAngel/django-cms,intip/django-cms,nimbis/django-cms,jalaziz/django-cms-grappelli-old,takeshineshiro/django-cms,takeshineshiro/django-cms,wuzhihui1123/django-cms,AlexProfi/django-cms,selecsosi/django-cms,andyzsf/django-cms,rryan/django-cms,farhaadila/django-cms,360youlun/django-cms,AlexProfi/django-cms,liuyisiyisi/django-cms,rsalmaso/django-cms,robmagee/django-cms,dhorelik/django-cms,Livefyre/django-cms,jeffreylu9/django-cms,intip/django-cms,sephii/django-cms,Vegasvikk/django-cms,pbs/django-cms,chkir/django-cms,farhaadila/django-cms,keimlink/django-cms,stefanfoulis/django-cms,bittner/django-cms,jeffreylu9/django-cms,datakortet/django-cms,emiquelito/django-cms-2.0,Livefyre/django-cms,irudayarajisawa/django-cms,vxsx/django-cms,jrief/django-cms,saintbird/django-cms,Vegasvikk/django-cms,yakky/django-cms,sznekol/django-cms,cyberintruder/django-cms,czpython/django-cms,cyberintruder/django-cms,nimbis/django-cms,adaptivelogic/django-cms,SinnerSchraderMobileMirrors/django-cms,11craft/django-cms,jeffreylu9/django-cms,sznekol/django-cms,11craft/django-cms,takeshineshiro/django-cms,robmagee/django-cms,MagicSolutions/django-cms,jrclaramunt/django-cms,netzkolchose/django-cms,youprofit/django-cms,wyg3958/django-cms,jrclaramunt/django-cms,chrisglass/django-cms,11craft/django-cms,josjevv/django-cms,Jaccorot/django-cms,isotoma/django-cms,11craft/django-cms,vxsx/django-cms,datakortet/django-cms,divio/django-cms,bittner/django-cms,sznekol/django-cms,DylannCordel/django-cms,frnhr/django-cms,keimlink/django-cms,josjevv/django-cms,timgraham/django-cms,nostalgiaz/django-cms,petecummings/django-cms,rryan/django-cms,rryan/django-cms,czpython/django-cms,keimlink/django-cms,driesdesmet/django-cms,jrclaramunt/django-cms,wuzhihui1123/django-cms,pancentric/django-cms,divio/django-cms,DylannCordel/django-cms,jsma/django-cms,mkoistinen/django-cms,SmithsonianEnterprises/django-cms,selecsosi/django-cms,netzkolchose/django-cms,ojii/django-cms,farhaadila/django-cms,vad/django-cms,jproffitt/django-cms,qnub/django-cms,timgraham/django-cms,nostalgiaz/django-cms,driesdesmet/django-cms,SinnerSchraderMobileMirrors/django-cms,pbs/django-cms,liuyisiyisi/django-cms,irudayarajisawa/django-cms,vstoykov/django-cms,astagi/django-cms,bittner/django-cms,jalaziz/django-cms-grappelli-old,netzkolchose/django-cms,vad/django-cms,nostalgiaz/django-cms,pixbuffer/django-cms,FinalAngel/django-cms,SmithsonianEnterprises/django-cms,chmberl/django-cms
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=256, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
Use max_length=255 instead of 256 to be nice to MySQL indexing
Signed-off-by: Patrick Lauber <13f2d5de65599746f4871ec052d451e6ee4f4f27@divio.ch>
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=255, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
<commit_before>from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=256, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
<commit_msg>Use max_length=255 instead of 256 to be nice to MySQL indexing
Signed-off-by: Patrick Lauber <13f2d5de65599746f4871ec052d451e6ee4f4f27@divio.ch><commit_after>
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=255, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=256, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
Use max_length=255 instead of 256 to be nice to MySQL indexing
Signed-off-by: Patrick Lauber <13f2d5de65599746f4871ec052d451e6ee4f4f27@divio.ch>from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=255, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
<commit_before>from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=256, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
<commit_msg>Use max_length=255 instead of 256 to be nice to MySQL indexing
Signed-off-by: Patrick Lauber <13f2d5de65599746f4871ec052d451e6ee4f4f27@divio.ch><commit_after>from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
import reversion
# Stores the actual data
class Snippet(models.Model):
"""
A snippet of HTML or a Django template
"""
name = models.CharField(_("name"), max_length=255, unique=True)
html = models.TextField(_("HTML"), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
# Plugin model - just a pointer to Snippet
class SnippetPtr(CMSPlugin):
snippet = models.ForeignKey(Snippet)
class Meta:
verbose_name = _("Snippet")
if 'reversion' in settings.INSTALLED_APPS:
# We don't both with SnippetPtr, since all the data is actually in Snippet
reversion.register(Snippet)
|
31292e0648cd8c22011de5051030927579b794c8
|
qtpy/tests/test_sip.py
|
qtpy/tests/test_sip.py
|
import pytest
def test_sip():
"""Test the qtpy.sip namespace"""
sip = pytest.importorskip("qtpy.sip")
assert sip.assign is not None
assert sip.cast is not None
assert sip.delete is not None
assert sip.dump is not None
assert sip.enableautoconversion is not None
assert sip.isdeleted is not None
assert sip.ispycreated is not None
assert sip.ispyowned is not None
assert sip.setdeleted is not None
assert sip.settracemask is not None
assert sip.simplewrapper is not None
assert sip.transferback is not None
assert sip.transferto is not None
assert sip.unwrapinstance is not None
assert sip.voidptr is not None
assert sip.wrapinstance is not None
assert sip.wrapper is not None
assert sip.wrappertype is not None
|
Add tests for sip module
|
Add tests for sip module
|
Python
|
mit
|
spyder-ide/qtpy
|
Add tests for sip module
|
import pytest
def test_sip():
"""Test the qtpy.sip namespace"""
sip = pytest.importorskip("qtpy.sip")
assert sip.assign is not None
assert sip.cast is not None
assert sip.delete is not None
assert sip.dump is not None
assert sip.enableautoconversion is not None
assert sip.isdeleted is not None
assert sip.ispycreated is not None
assert sip.ispyowned is not None
assert sip.setdeleted is not None
assert sip.settracemask is not None
assert sip.simplewrapper is not None
assert sip.transferback is not None
assert sip.transferto is not None
assert sip.unwrapinstance is not None
assert sip.voidptr is not None
assert sip.wrapinstance is not None
assert sip.wrapper is not None
assert sip.wrappertype is not None
|
<commit_before><commit_msg>Add tests for sip module<commit_after>
|
import pytest
def test_sip():
"""Test the qtpy.sip namespace"""
sip = pytest.importorskip("qtpy.sip")
assert sip.assign is not None
assert sip.cast is not None
assert sip.delete is not None
assert sip.dump is not None
assert sip.enableautoconversion is not None
assert sip.isdeleted is not None
assert sip.ispycreated is not None
assert sip.ispyowned is not None
assert sip.setdeleted is not None
assert sip.settracemask is not None
assert sip.simplewrapper is not None
assert sip.transferback is not None
assert sip.transferto is not None
assert sip.unwrapinstance is not None
assert sip.voidptr is not None
assert sip.wrapinstance is not None
assert sip.wrapper is not None
assert sip.wrappertype is not None
|
Add tests for sip moduleimport pytest
def test_sip():
"""Test the qtpy.sip namespace"""
sip = pytest.importorskip("qtpy.sip")
assert sip.assign is not None
assert sip.cast is not None
assert sip.delete is not None
assert sip.dump is not None
assert sip.enableautoconversion is not None
assert sip.isdeleted is not None
assert sip.ispycreated is not None
assert sip.ispyowned is not None
assert sip.setdeleted is not None
assert sip.settracemask is not None
assert sip.simplewrapper is not None
assert sip.transferback is not None
assert sip.transferto is not None
assert sip.unwrapinstance is not None
assert sip.voidptr is not None
assert sip.wrapinstance is not None
assert sip.wrapper is not None
assert sip.wrappertype is not None
|
<commit_before><commit_msg>Add tests for sip module<commit_after>import pytest
def test_sip():
"""Test the qtpy.sip namespace"""
sip = pytest.importorskip("qtpy.sip")
assert sip.assign is not None
assert sip.cast is not None
assert sip.delete is not None
assert sip.dump is not None
assert sip.enableautoconversion is not None
assert sip.isdeleted is not None
assert sip.ispycreated is not None
assert sip.ispyowned is not None
assert sip.setdeleted is not None
assert sip.settracemask is not None
assert sip.simplewrapper is not None
assert sip.transferback is not None
assert sip.transferto is not None
assert sip.unwrapinstance is not None
assert sip.voidptr is not None
assert sip.wrapinstance is not None
assert sip.wrapper is not None
assert sip.wrappertype is not None
|
|
d5392679e7ee3aa22b44a5553cca69b632bf5991
|
server/tests/forms/test_RegistrationForm.py
|
server/tests/forms/test_RegistrationForm.py
|
import wtforms_json
import pytest
from forms.RegistrationForm import RegistrationForm
wtforms_json.init()
class TestRegistrationForm:
def test_valid(self):
json = {
'username': 'someusername',
'password': 'password',
'confirm': 'confirm',
'email': 'someemail@email.com'
}
form = RegistrationForm.from_json(json)
assert (form.validate())
|
Add test for valid registration form
|
Add test for valid registration form
|
Python
|
mit
|
ganemone/ontheside,ganemone/ontheside,ganemone/ontheside
|
Add test for valid registration form
|
import wtforms_json
import pytest
from forms.RegistrationForm import RegistrationForm
wtforms_json.init()
class TestRegistrationForm:
def test_valid(self):
json = {
'username': 'someusername',
'password': 'password',
'confirm': 'confirm',
'email': 'someemail@email.com'
}
form = RegistrationForm.from_json(json)
assert (form.validate())
|
<commit_before><commit_msg>Add test for valid registration form<commit_after>
|
import wtforms_json
import pytest
from forms.RegistrationForm import RegistrationForm
wtforms_json.init()
class TestRegistrationForm:
def test_valid(self):
json = {
'username': 'someusername',
'password': 'password',
'confirm': 'confirm',
'email': 'someemail@email.com'
}
form = RegistrationForm.from_json(json)
assert (form.validate())
|
Add test for valid registration formimport wtforms_json
import pytest
from forms.RegistrationForm import RegistrationForm
wtforms_json.init()
class TestRegistrationForm:
def test_valid(self):
json = {
'username': 'someusername',
'password': 'password',
'confirm': 'confirm',
'email': 'someemail@email.com'
}
form = RegistrationForm.from_json(json)
assert (form.validate())
|
<commit_before><commit_msg>Add test for valid registration form<commit_after>import wtforms_json
import pytest
from forms.RegistrationForm import RegistrationForm
wtforms_json.init()
class TestRegistrationForm:
def test_valid(self):
json = {
'username': 'someusername',
'password': 'password',
'confirm': 'confirm',
'email': 'someemail@email.com'
}
form = RegistrationForm.from_json(json)
assert (form.validate())
|
|
d4d3f51482c8422f26317b99378aae90196b3fe5
|
plugins/camera/take_photo.py
|
plugins/camera/take_photo.py
|
outputs = []
files = []
def process_message(data):
if data['channel'].startswith("D"):
outputs.append([data['channel'], "I shall send you an image!" ])
files.append([data['channel'], "image.png" ])
|
Add start of photo plugin
|
Add start of photo plugin
|
Python
|
mit
|
martinpeck/peckbot
|
Add start of photo plugin
|
outputs = []
files = []
def process_message(data):
if data['channel'].startswith("D"):
outputs.append([data['channel'], "I shall send you an image!" ])
files.append([data['channel'], "image.png" ])
|
<commit_before><commit_msg>Add start of photo plugin<commit_after>
|
outputs = []
files = []
def process_message(data):
if data['channel'].startswith("D"):
outputs.append([data['channel'], "I shall send you an image!" ])
files.append([data['channel'], "image.png" ])
|
Add start of photo pluginoutputs = []
files = []
def process_message(data):
if data['channel'].startswith("D"):
outputs.append([data['channel'], "I shall send you an image!" ])
files.append([data['channel'], "image.png" ])
|
<commit_before><commit_msg>Add start of photo plugin<commit_after>outputs = []
files = []
def process_message(data):
if data['channel'].startswith("D"):
outputs.append([data['channel'], "I shall send you an image!" ])
files.append([data['channel'], "image.png" ])
|
|
b43b085b14c1d6f9fc07bf8c4ef6c3dca3c1041f
|
glaciercmd/command_delete_archive_from_vault.py
|
glaciercmd/command_delete_archive_from_vault.py
|
import boto
from boto.glacier.exceptions import UnexpectedHTTPResponseError
class CommandDeleteArchiveFromVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[4])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[4])
else:
try:
vault.delete_archive(args[2])
print "Archive deleted: '{}'".format(args[2])
except UnexpectedHTTPResponseError as error:
print "Archive can not be deleted:\n\t {}".format(error)
def accept(self, args):
return len(args) >= 4 and args[0] == 'delete' and args[1] == 'archive' and args[3] == 'from'
def command_init():
return CommandDeleteArchiveFromVault()
|
Add command to delete archives
|
Add command to delete archives
|
Python
|
mit
|
carsonmcdonald/glacier-cmd
|
Add command to delete archives
|
import boto
from boto.glacier.exceptions import UnexpectedHTTPResponseError
class CommandDeleteArchiveFromVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[4])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[4])
else:
try:
vault.delete_archive(args[2])
print "Archive deleted: '{}'".format(args[2])
except UnexpectedHTTPResponseError as error:
print "Archive can not be deleted:\n\t {}".format(error)
def accept(self, args):
return len(args) >= 4 and args[0] == 'delete' and args[1] == 'archive' and args[3] == 'from'
def command_init():
return CommandDeleteArchiveFromVault()
|
<commit_before><commit_msg>Add command to delete archives<commit_after>
|
import boto
from boto.glacier.exceptions import UnexpectedHTTPResponseError
class CommandDeleteArchiveFromVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[4])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[4])
else:
try:
vault.delete_archive(args[2])
print "Archive deleted: '{}'".format(args[2])
except UnexpectedHTTPResponseError as error:
print "Archive can not be deleted:\n\t {}".format(error)
def accept(self, args):
return len(args) >= 4 and args[0] == 'delete' and args[1] == 'archive' and args[3] == 'from'
def command_init():
return CommandDeleteArchiveFromVault()
|
Add command to delete archivesimport boto
from boto.glacier.exceptions import UnexpectedHTTPResponseError
class CommandDeleteArchiveFromVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[4])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[4])
else:
try:
vault.delete_archive(args[2])
print "Archive deleted: '{}'".format(args[2])
except UnexpectedHTTPResponseError as error:
print "Archive can not be deleted:\n\t {}".format(error)
def accept(self, args):
return len(args) >= 4 and args[0] == 'delete' and args[1] == 'archive' and args[3] == 'from'
def command_init():
return CommandDeleteArchiveFromVault()
|
<commit_before><commit_msg>Add command to delete archives<commit_after>import boto
from boto.glacier.exceptions import UnexpectedHTTPResponseError
class CommandDeleteArchiveFromVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[4])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[4])
else:
try:
vault.delete_archive(args[2])
print "Archive deleted: '{}'".format(args[2])
except UnexpectedHTTPResponseError as error:
print "Archive can not be deleted:\n\t {}".format(error)
def accept(self, args):
return len(args) >= 4 and args[0] == 'delete' and args[1] == 'archive' and args[3] == 'from'
def command_init():
return CommandDeleteArchiveFromVault()
|
|
1696a97a735b3eb26ddaf445c4258e0faac880a4
|
lintcode/Medium/098_Sort_List.py
|
lintcode/Medium/098_Sort_List.py
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity.
"""
def sortList(self, head):
# write your code here
# Quick Sort
# if (head is None or head.next is None):
# return head
# if (head.next.next is None):
# dummy = ListNode(0)
# if (head.val > head.next.val):
# dummy.next = head.next
# dummy.next.next = head
# dummy.next.next.next = None
# else:
# dummy.next = head
# return dummy.next
# dummy = ListNode(0)
# dummy.next = head
# mid = head
# tmp = head.next
# left = ListNode(0)
# right = ListNode(0)
# tmpLeft = left
# tmpRight = right
# while (tmp):
# if (tmp.val < mid.val):
# tmpLeft.next = tmp
# tmpLeft = tmpLeft.next
# else:
# tmpRight.next = tmp
# tmpRight = tmpRight.next
# tmp = tmp.next
# tmpLeft.next = None
# tmpRight.next = None
# dummy.next = self.sortList(left.next)
# tmp = dummy
# while (tmp.next):
# tmp = tmp.next
# tmp.next = mid
# tmp.next.next = self.sortList(right.next)
# return dummy.next
# Merge Sort
if (head is None or head.next is None):
return head
slow = head
fast = head
while (fast and fast.next and fast.next.next):
slow = slow.next
fast = fast.next.next
right = slow.next
slow.next = None
left = head
left = self.sortList(left)
right = self.sortList(right)
dummy = ListNode(0)
tmp = dummy
while (left and right):
if (left.val < right.val):
tmp.next = left
left = left.next
else:
tmp.next = right
right = right.next
tmp = tmp.next
if (left is None):
tmp.next = right
else:
tmp.next = left
return dummy.next
|
Add solution to lintcode question 98
|
Add solution to lintcode question 98
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 98
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity.
"""
def sortList(self, head):
# write your code here
# Quick Sort
# if (head is None or head.next is None):
# return head
# if (head.next.next is None):
# dummy = ListNode(0)
# if (head.val > head.next.val):
# dummy.next = head.next
# dummy.next.next = head
# dummy.next.next.next = None
# else:
# dummy.next = head
# return dummy.next
# dummy = ListNode(0)
# dummy.next = head
# mid = head
# tmp = head.next
# left = ListNode(0)
# right = ListNode(0)
# tmpLeft = left
# tmpRight = right
# while (tmp):
# if (tmp.val < mid.val):
# tmpLeft.next = tmp
# tmpLeft = tmpLeft.next
# else:
# tmpRight.next = tmp
# tmpRight = tmpRight.next
# tmp = tmp.next
# tmpLeft.next = None
# tmpRight.next = None
# dummy.next = self.sortList(left.next)
# tmp = dummy
# while (tmp.next):
# tmp = tmp.next
# tmp.next = mid
# tmp.next.next = self.sortList(right.next)
# return dummy.next
# Merge Sort
if (head is None or head.next is None):
return head
slow = head
fast = head
while (fast and fast.next and fast.next.next):
slow = slow.next
fast = fast.next.next
right = slow.next
slow.next = None
left = head
left = self.sortList(left)
right = self.sortList(right)
dummy = ListNode(0)
tmp = dummy
while (left and right):
if (left.val < right.val):
tmp.next = left
left = left.next
else:
tmp.next = right
right = right.next
tmp = tmp.next
if (left is None):
tmp.next = right
else:
tmp.next = left
return dummy.next
|
<commit_before><commit_msg>Add solution to lintcode question 98<commit_after>
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity.
"""
def sortList(self, head):
# write your code here
# Quick Sort
# if (head is None or head.next is None):
# return head
# if (head.next.next is None):
# dummy = ListNode(0)
# if (head.val > head.next.val):
# dummy.next = head.next
# dummy.next.next = head
# dummy.next.next.next = None
# else:
# dummy.next = head
# return dummy.next
# dummy = ListNode(0)
# dummy.next = head
# mid = head
# tmp = head.next
# left = ListNode(0)
# right = ListNode(0)
# tmpLeft = left
# tmpRight = right
# while (tmp):
# if (tmp.val < mid.val):
# tmpLeft.next = tmp
# tmpLeft = tmpLeft.next
# else:
# tmpRight.next = tmp
# tmpRight = tmpRight.next
# tmp = tmp.next
# tmpLeft.next = None
# tmpRight.next = None
# dummy.next = self.sortList(left.next)
# tmp = dummy
# while (tmp.next):
# tmp = tmp.next
# tmp.next = mid
# tmp.next.next = self.sortList(right.next)
# return dummy.next
# Merge Sort
if (head is None or head.next is None):
return head
slow = head
fast = head
while (fast and fast.next and fast.next.next):
slow = slow.next
fast = fast.next.next
right = slow.next
slow.next = None
left = head
left = self.sortList(left)
right = self.sortList(right)
dummy = ListNode(0)
tmp = dummy
while (left and right):
if (left.val < right.val):
tmp.next = left
left = left.next
else:
tmp.next = right
right = right.next
tmp = tmp.next
if (left is None):
tmp.next = right
else:
tmp.next = left
return dummy.next
|
Add solution to lintcode question 98"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity.
"""
def sortList(self, head):
# write your code here
# Quick Sort
# if (head is None or head.next is None):
# return head
# if (head.next.next is None):
# dummy = ListNode(0)
# if (head.val > head.next.val):
# dummy.next = head.next
# dummy.next.next = head
# dummy.next.next.next = None
# else:
# dummy.next = head
# return dummy.next
# dummy = ListNode(0)
# dummy.next = head
# mid = head
# tmp = head.next
# left = ListNode(0)
# right = ListNode(0)
# tmpLeft = left
# tmpRight = right
# while (tmp):
# if (tmp.val < mid.val):
# tmpLeft.next = tmp
# tmpLeft = tmpLeft.next
# else:
# tmpRight.next = tmp
# tmpRight = tmpRight.next
# tmp = tmp.next
# tmpLeft.next = None
# tmpRight.next = None
# dummy.next = self.sortList(left.next)
# tmp = dummy
# while (tmp.next):
# tmp = tmp.next
# tmp.next = mid
# tmp.next.next = self.sortList(right.next)
# return dummy.next
# Merge Sort
if (head is None or head.next is None):
return head
slow = head
fast = head
while (fast and fast.next and fast.next.next):
slow = slow.next
fast = fast.next.next
right = slow.next
slow.next = None
left = head
left = self.sortList(left)
right = self.sortList(right)
dummy = ListNode(0)
tmp = dummy
while (left and right):
if (left.val < right.val):
tmp.next = left
left = left.next
else:
tmp.next = right
right = right.next
tmp = tmp.next
if (left is None):
tmp.next = right
else:
tmp.next = left
return dummy.next
|
<commit_before><commit_msg>Add solution to lintcode question 98<commit_after>"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of the linked list.
@return: You should return the head of the sorted linked list,
using constant space complexity.
"""
def sortList(self, head):
# write your code here
# Quick Sort
# if (head is None or head.next is None):
# return head
# if (head.next.next is None):
# dummy = ListNode(0)
# if (head.val > head.next.val):
# dummy.next = head.next
# dummy.next.next = head
# dummy.next.next.next = None
# else:
# dummy.next = head
# return dummy.next
# dummy = ListNode(0)
# dummy.next = head
# mid = head
# tmp = head.next
# left = ListNode(0)
# right = ListNode(0)
# tmpLeft = left
# tmpRight = right
# while (tmp):
# if (tmp.val < mid.val):
# tmpLeft.next = tmp
# tmpLeft = tmpLeft.next
# else:
# tmpRight.next = tmp
# tmpRight = tmpRight.next
# tmp = tmp.next
# tmpLeft.next = None
# tmpRight.next = None
# dummy.next = self.sortList(left.next)
# tmp = dummy
# while (tmp.next):
# tmp = tmp.next
# tmp.next = mid
# tmp.next.next = self.sortList(right.next)
# return dummy.next
# Merge Sort
if (head is None or head.next is None):
return head
slow = head
fast = head
while (fast and fast.next and fast.next.next):
slow = slow.next
fast = fast.next.next
right = slow.next
slow.next = None
left = head
left = self.sortList(left)
right = self.sortList(right)
dummy = ListNode(0)
tmp = dummy
while (left and right):
if (left.val < right.val):
tmp.next = left
left = left.next
else:
tmp.next = right
right = right.next
tmp = tmp.next
if (left is None):
tmp.next = right
else:
tmp.next = left
return dummy.next
|
|
b2f27b6186c118887c42d4bf5834e869dacf6a66
|
galaxy/main/migrations/0058_stargazer_role_not_null.py
|
galaxy/main/migrations/0058_stargazer_role_not_null.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0057_stargazer_role_reference'),
]
operations = [
migrations.AlterField(
model_name='stargazer',
name='role',
field=models.ForeignKey(related_name='stars', to='main.Role'),
),
]
|
Make model field `Stargazer.role` not nullable
|
Make model field `Stargazer.role` not nullable
|
Python
|
apache-2.0
|
chouseknecht/galaxy,chouseknecht/galaxy,chouseknecht/galaxy,chouseknecht/galaxy
|
Make model field `Stargazer.role` not nullable
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0057_stargazer_role_reference'),
]
operations = [
migrations.AlterField(
model_name='stargazer',
name='role',
field=models.ForeignKey(related_name='stars', to='main.Role'),
),
]
|
<commit_before><commit_msg>Make model field `Stargazer.role` not nullable<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0057_stargazer_role_reference'),
]
operations = [
migrations.AlterField(
model_name='stargazer',
name='role',
field=models.ForeignKey(related_name='stars', to='main.Role'),
),
]
|
Make model field `Stargazer.role` not nullable# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0057_stargazer_role_reference'),
]
operations = [
migrations.AlterField(
model_name='stargazer',
name='role',
field=models.ForeignKey(related_name='stars', to='main.Role'),
),
]
|
<commit_before><commit_msg>Make model field `Stargazer.role` not nullable<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0057_stargazer_role_reference'),
]
operations = [
migrations.AlterField(
model_name='stargazer',
name='role',
field=models.ForeignKey(related_name='stars', to='main.Role'),
),
]
|
|
a9d263f1d8e30f1bc93b51a29679039e76fede9d
|
common/djangoapps/student/management/commands/edraak_migrate_userprofile_name_en.py
|
common/djangoapps/student/management/commands/edraak_migrate_userprofile_name_en.py
|
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import connection, migrations
from django.db.utils import OperationalError
from student.models import UserProfile
def check_name_en():
"""
Check whether (name_en) exists or not. This is helpful when migrating (devstack) data, and for data that is
already migrated
:return: (True) if the field exists, (False) otherwise
"""
result = True
# To avoid writing (DB-Engine Dependent) query to check for the column, we will
# apply a simple SELECT statement that will fail when the column is not present
verified = UserProfile.objects.raw('SELECT id, name_en from auth_userprofile where id = 0;')
try:
for _ in verified:
pass
except OperationalError:
result = False
return result
class Command(BaseCommand):
help = 'Migrate database column (auth_userprofile.name_en) into (auth_userprofile.meta) and drop the column'
def handle(self, *args, **options):
if check_name_en():
print('Database column (name_en) found in (auth_userprofile) table. Applying migration...')
old_data = UserProfile.objects.raw(
"SELECT id, meta, name_en as db_name_en FROM auth_userprofile WHERE (name_en IS NOT NULL) AND (name_en != '');"
)
# Copying from (name_en) to (meta)
for user_profile in old_data:
user_profile.name_en = user_profile.db_name_en
user_profile.save()
# Dropping (name_en) from the table
print('Data copied to (meta) field. Dropping old column (name_en)...')
connection.cursor().execute("ALTER TABLE auth_userprofile DROP COLUMN name_en;")
print('All done!')
else:
print('Database column (name_en) not found. Migration has been already applied!')
|
Add a Command to Migrate UserProfile.name_en
|
Add a Command to Migrate UserProfile.name_en
|
Python
|
agpl-3.0
|
Edraak/edraak-platform,Edraak/edraak-platform,Edraak/edraak-platform,Edraak/edraak-platform
|
Add a Command to Migrate UserProfile.name_en
|
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import connection, migrations
from django.db.utils import OperationalError
from student.models import UserProfile
def check_name_en():
"""
Check whether (name_en) exists or not. This is helpful when migrating (devstack) data, and for data that is
already migrated
:return: (True) if the field exists, (False) otherwise
"""
result = True
# To avoid writing (DB-Engine Dependent) query to check for the column, we will
# apply a simple SELECT statement that will fail when the column is not present
verified = UserProfile.objects.raw('SELECT id, name_en from auth_userprofile where id = 0;')
try:
for _ in verified:
pass
except OperationalError:
result = False
return result
class Command(BaseCommand):
help = 'Migrate database column (auth_userprofile.name_en) into (auth_userprofile.meta) and drop the column'
def handle(self, *args, **options):
if check_name_en():
print('Database column (name_en) found in (auth_userprofile) table. Applying migration...')
old_data = UserProfile.objects.raw(
"SELECT id, meta, name_en as db_name_en FROM auth_userprofile WHERE (name_en IS NOT NULL) AND (name_en != '');"
)
# Copying from (name_en) to (meta)
for user_profile in old_data:
user_profile.name_en = user_profile.db_name_en
user_profile.save()
# Dropping (name_en) from the table
print('Data copied to (meta) field. Dropping old column (name_en)...')
connection.cursor().execute("ALTER TABLE auth_userprofile DROP COLUMN name_en;")
print('All done!')
else:
print('Database column (name_en) not found. Migration has been already applied!')
|
<commit_before><commit_msg>Add a Command to Migrate UserProfile.name_en<commit_after>
|
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import connection, migrations
from django.db.utils import OperationalError
from student.models import UserProfile
def check_name_en():
"""
Check whether (name_en) exists or not. This is helpful when migrating (devstack) data, and for data that is
already migrated
:return: (True) if the field exists, (False) otherwise
"""
result = True
# To avoid writing (DB-Engine Dependent) query to check for the column, we will
# apply a simple SELECT statement that will fail when the column is not present
verified = UserProfile.objects.raw('SELECT id, name_en from auth_userprofile where id = 0;')
try:
for _ in verified:
pass
except OperationalError:
result = False
return result
class Command(BaseCommand):
help = 'Migrate database column (auth_userprofile.name_en) into (auth_userprofile.meta) and drop the column'
def handle(self, *args, **options):
if check_name_en():
print('Database column (name_en) found in (auth_userprofile) table. Applying migration...')
old_data = UserProfile.objects.raw(
"SELECT id, meta, name_en as db_name_en FROM auth_userprofile WHERE (name_en IS NOT NULL) AND (name_en != '');"
)
# Copying from (name_en) to (meta)
for user_profile in old_data:
user_profile.name_en = user_profile.db_name_en
user_profile.save()
# Dropping (name_en) from the table
print('Data copied to (meta) field. Dropping old column (name_en)...')
connection.cursor().execute("ALTER TABLE auth_userprofile DROP COLUMN name_en;")
print('All done!')
else:
print('Database column (name_en) not found. Migration has been already applied!')
|
Add a Command to Migrate UserProfile.name_enfrom __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import connection, migrations
from django.db.utils import OperationalError
from student.models import UserProfile
def check_name_en():
"""
Check whether (name_en) exists or not. This is helpful when migrating (devstack) data, and for data that is
already migrated
:return: (True) if the field exists, (False) otherwise
"""
result = True
# To avoid writing (DB-Engine Dependent) query to check for the column, we will
# apply a simple SELECT statement that will fail when the column is not present
verified = UserProfile.objects.raw('SELECT id, name_en from auth_userprofile where id = 0;')
try:
for _ in verified:
pass
except OperationalError:
result = False
return result
class Command(BaseCommand):
help = 'Migrate database column (auth_userprofile.name_en) into (auth_userprofile.meta) and drop the column'
def handle(self, *args, **options):
if check_name_en():
print('Database column (name_en) found in (auth_userprofile) table. Applying migration...')
old_data = UserProfile.objects.raw(
"SELECT id, meta, name_en as db_name_en FROM auth_userprofile WHERE (name_en IS NOT NULL) AND (name_en != '');"
)
# Copying from (name_en) to (meta)
for user_profile in old_data:
user_profile.name_en = user_profile.db_name_en
user_profile.save()
# Dropping (name_en) from the table
print('Data copied to (meta) field. Dropping old column (name_en)...')
connection.cursor().execute("ALTER TABLE auth_userprofile DROP COLUMN name_en;")
print('All done!')
else:
print('Database column (name_en) not found. Migration has been already applied!')
|
<commit_before><commit_msg>Add a Command to Migrate UserProfile.name_en<commit_after>from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import connection, migrations
from django.db.utils import OperationalError
from student.models import UserProfile
def check_name_en():
"""
Check whether (name_en) exists or not. This is helpful when migrating (devstack) data, and for data that is
already migrated
:return: (True) if the field exists, (False) otherwise
"""
result = True
# To avoid writing (DB-Engine Dependent) query to check for the column, we will
# apply a simple SELECT statement that will fail when the column is not present
verified = UserProfile.objects.raw('SELECT id, name_en from auth_userprofile where id = 0;')
try:
for _ in verified:
pass
except OperationalError:
result = False
return result
class Command(BaseCommand):
help = 'Migrate database column (auth_userprofile.name_en) into (auth_userprofile.meta) and drop the column'
def handle(self, *args, **options):
if check_name_en():
print('Database column (name_en) found in (auth_userprofile) table. Applying migration...')
old_data = UserProfile.objects.raw(
"SELECT id, meta, name_en as db_name_en FROM auth_userprofile WHERE (name_en IS NOT NULL) AND (name_en != '');"
)
# Copying from (name_en) to (meta)
for user_profile in old_data:
user_profile.name_en = user_profile.db_name_en
user_profile.save()
# Dropping (name_en) from the table
print('Data copied to (meta) field. Dropping old column (name_en)...')
connection.cursor().execute("ALTER TABLE auth_userprofile DROP COLUMN name_en;")
print('All done!')
else:
print('Database column (name_en) not found. Migration has been already applied!')
|
|
9a32463bd0ee5f90b004fac3cb53a0adfd9b4534
|
src/ggrc/migrations/versions/20160414223705_7a9b715ec504_add_slug_to_assessment_template.py
|
src/ggrc/migrations/versions/20160414223705_7a9b715ec504_add_slug_to_assessment_template.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add slug to assessment template
Create Date: 2016-04-14 22:37:05.135072
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = "7a9b715ec504"
down_revision = "4e9b71cece04"
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
"assessment_templates",
sa.Column("slug", sa.String(length=250), nullable=False)
)
assessment_templates_table = table(
"assessment_templates",
column('id', sa.Integer),
column('slug', sa.Integer)
)
op.execute(assessment_templates_table.update().values(
slug=func.concat(
op.inline_literal("TEMPLATE-"),
assessment_templates_table.c.id,
),
))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column("assessment_templates", "slug")
|
Add slug column to assessment template table
|
Add slug column to assessment template table
|
Python
|
apache-2.0
|
NejcZupec/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core
|
Add slug column to assessment template table
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add slug to assessment template
Create Date: 2016-04-14 22:37:05.135072
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = "7a9b715ec504"
down_revision = "4e9b71cece04"
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
"assessment_templates",
sa.Column("slug", sa.String(length=250), nullable=False)
)
assessment_templates_table = table(
"assessment_templates",
column('id', sa.Integer),
column('slug', sa.Integer)
)
op.execute(assessment_templates_table.update().values(
slug=func.concat(
op.inline_literal("TEMPLATE-"),
assessment_templates_table.c.id,
),
))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column("assessment_templates", "slug")
|
<commit_before><commit_msg>Add slug column to assessment template table<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add slug to assessment template
Create Date: 2016-04-14 22:37:05.135072
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = "7a9b715ec504"
down_revision = "4e9b71cece04"
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
"assessment_templates",
sa.Column("slug", sa.String(length=250), nullable=False)
)
assessment_templates_table = table(
"assessment_templates",
column('id', sa.Integer),
column('slug', sa.Integer)
)
op.execute(assessment_templates_table.update().values(
slug=func.concat(
op.inline_literal("TEMPLATE-"),
assessment_templates_table.c.id,
),
))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column("assessment_templates", "slug")
|
Add slug column to assessment template table# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add slug to assessment template
Create Date: 2016-04-14 22:37:05.135072
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = "7a9b715ec504"
down_revision = "4e9b71cece04"
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
"assessment_templates",
sa.Column("slug", sa.String(length=250), nullable=False)
)
assessment_templates_table = table(
"assessment_templates",
column('id', sa.Integer),
column('slug', sa.Integer)
)
op.execute(assessment_templates_table.update().values(
slug=func.concat(
op.inline_literal("TEMPLATE-"),
assessment_templates_table.c.id,
),
))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column("assessment_templates", "slug")
|
<commit_before><commit_msg>Add slug column to assessment template table<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""
Add slug to assessment template
Create Date: 2016-04-14 22:37:05.135072
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column
from sqlalchemy.sql import func
from sqlalchemy.sql import table
# revision identifiers, used by Alembic.
revision = "7a9b715ec504"
down_revision = "4e9b71cece04"
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
"assessment_templates",
sa.Column("slug", sa.String(length=250), nullable=False)
)
assessment_templates_table = table(
"assessment_templates",
column('id', sa.Integer),
column('slug', sa.Integer)
)
op.execute(assessment_templates_table.update().values(
slug=func.concat(
op.inline_literal("TEMPLATE-"),
assessment_templates_table.c.id,
),
))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column("assessment_templates", "slug")
|
|
22abc8c12bea0202fede404c81a6218100e6d0aa
|
src/ggrc/migrations/versions/20160417113424_4f0077b3393f_add_commentable_for_assessments.py
|
src/ggrc/migrations/versions/20160417113424_4f0077b3393f_add_commentable_for_assessments.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Request comment notifications.
Create Date: 2016-03-21 11:07:07.327760
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '33459bd8b70d'
down_revision = '3914dbf78dc1'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'assessments',
sa.Column('recipients', sa.String(length=250), nullable=True)
)
op.add_column(
'assessments',
sa.Column('send_by_default', sa.Boolean(), nullable=True)
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('assessments', 'send_by_default')
op.drop_column('assessments', 'recipients')
|
Add comment columns to assessments table
|
Add comment columns to assessments table
|
Python
|
apache-2.0
|
j0gurt/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core
|
Add comment columns to assessments table
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Request comment notifications.
Create Date: 2016-03-21 11:07:07.327760
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '33459bd8b70d'
down_revision = '3914dbf78dc1'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'assessments',
sa.Column('recipients', sa.String(length=250), nullable=True)
)
op.add_column(
'assessments',
sa.Column('send_by_default', sa.Boolean(), nullable=True)
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('assessments', 'send_by_default')
op.drop_column('assessments', 'recipients')
|
<commit_before><commit_msg>Add comment columns to assessments table<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Request comment notifications.
Create Date: 2016-03-21 11:07:07.327760
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '33459bd8b70d'
down_revision = '3914dbf78dc1'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'assessments',
sa.Column('recipients', sa.String(length=250), nullable=True)
)
op.add_column(
'assessments',
sa.Column('send_by_default', sa.Boolean(), nullable=True)
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('assessments', 'send_by_default')
op.drop_column('assessments', 'recipients')
|
Add comment columns to assessments table# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Request comment notifications.
Create Date: 2016-03-21 11:07:07.327760
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '33459bd8b70d'
down_revision = '3914dbf78dc1'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'assessments',
sa.Column('recipients', sa.String(length=250), nullable=True)
)
op.add_column(
'assessments',
sa.Column('send_by_default', sa.Boolean(), nullable=True)
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('assessments', 'send_by_default')
op.drop_column('assessments', 'recipients')
|
<commit_before><commit_msg>Add comment columns to assessments table<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""Request comment notifications.
Create Date: 2016-03-21 11:07:07.327760
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '33459bd8b70d'
down_revision = '3914dbf78dc1'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'assessments',
sa.Column('recipients', sa.String(length=250), nullable=True)
)
op.add_column(
'assessments',
sa.Column('send_by_default', sa.Boolean(), nullable=True)
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.drop_column('assessments', 'send_by_default')
op.drop_column('assessments', 'recipients')
|
|
0c08e612be2516c3a9b53a641b1a982c609d3913
|
dipy/core/tests/test_qball.py
|
dipy/core/tests/test_qball.py
|
""" Testing qball
"""
import numpy as np
import dipy.core.qball as qball
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.testing import parametric
@parametric
def test_real_sph_harm():
real_sh = qball.real_sph_harm(0, 0, 0, 0)
yield assert_true(True)
yield assert_false(True)
|
TEST - skeleton of tests for qball
|
TEST - skeleton of tests for qball
|
Python
|
bsd-3-clause
|
sinkpoint/dipy,matthieudumont/dipy,rfdougherty/dipy,maurozucchelli/dipy,FrancoisRheaultUS/dipy,Messaoud-Boudjada/dipy,jyeatman/dipy,mdesco/dipy,maurozucchelli/dipy,maurozucchelli/dipy,samuelstjean/dipy,beni55/dipy,Messaoud-Boudjada/dipy,StongeEtienne/dipy,villalonreina/dipy,JohnGriffiths/dipy,jyeatman/dipy,beni55/dipy,sinkpoint/dipy,StongeEtienne/dipy,JohnGriffiths/dipy,samuelstjean/dipy,rfdougherty/dipy,nilgoyyou/dipy,FrancoisRheaultUS/dipy,villalonreina/dipy,matthieudumont/dipy,nilgoyyou/dipy,mdesco/dipy,demianw/dipy,oesteban/dipy,demianw/dipy,oesteban/dipy,samuelstjean/dipy
|
TEST - skeleton of tests for qball
|
""" Testing qball
"""
import numpy as np
import dipy.core.qball as qball
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.testing import parametric
@parametric
def test_real_sph_harm():
real_sh = qball.real_sph_harm(0, 0, 0, 0)
yield assert_true(True)
yield assert_false(True)
|
<commit_before><commit_msg>TEST - skeleton of tests for qball<commit_after>
|
""" Testing qball
"""
import numpy as np
import dipy.core.qball as qball
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.testing import parametric
@parametric
def test_real_sph_harm():
real_sh = qball.real_sph_harm(0, 0, 0, 0)
yield assert_true(True)
yield assert_false(True)
|
TEST - skeleton of tests for qball""" Testing qball
"""
import numpy as np
import dipy.core.qball as qball
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.testing import parametric
@parametric
def test_real_sph_harm():
real_sh = qball.real_sph_harm(0, 0, 0, 0)
yield assert_true(True)
yield assert_false(True)
|
<commit_before><commit_msg>TEST - skeleton of tests for qball<commit_after>""" Testing qball
"""
import numpy as np
import dipy.core.qball as qball
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.testing import parametric
@parametric
def test_real_sph_harm():
real_sh = qball.real_sph_harm(0, 0, 0, 0)
yield assert_true(True)
yield assert_false(True)
|
|
10c6add532215a8acb1a690010298c4c7c91517c
|
find_dups.py
|
find_dups.py
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
Add script to email users of name-conflicting packages.
|
Add script to email users of name-conflicting packages.
|
Python
|
bsd-3-clause
|
pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi
|
Add script to email users of name-conflicting packages.
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
<commit_before><commit_msg>Add script to email users of name-conflicting packages.<commit_after>
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
Add script to email users of name-conflicting packages.import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
<commit_before><commit_msg>Add script to email users of name-conflicting packages.<commit_after>import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
|
ee8067047e86b8f6aa8581b8b8f18e45383ce03e
|
talkoohakemisto/migrations/versions/1ef4e5f61dac_add_finnish_collations_to_all_text_columns.py
|
talkoohakemisto/migrations/versions/1ef4e5f61dac_add_finnish_collations_to_all_text_columns.py
|
"""Add Finnish collations to all text columns
Revision ID: 1ef4e5f61dac
Revises: 485b2296735
Create Date: 2014-02-09 21:51:35.842781
"""
# revision identifiers, used by Alembic.
revision = '1ef4e5f61dac'
down_revision = '485b2296735'
from alembic import op
def upgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
def downgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
'''
)
|
Add Finnish collation to all text columns
|
Add Finnish collation to all text columns
|
Python
|
mit
|
talkoopaiva/talkoohakemisto-api
|
Add Finnish collation to all text columns
|
"""Add Finnish collations to all text columns
Revision ID: 1ef4e5f61dac
Revises: 485b2296735
Create Date: 2014-02-09 21:51:35.842781
"""
# revision identifiers, used by Alembic.
revision = '1ef4e5f61dac'
down_revision = '485b2296735'
from alembic import op
def upgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
def downgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
'''
)
|
<commit_before><commit_msg>Add Finnish collation to all text columns<commit_after>
|
"""Add Finnish collations to all text columns
Revision ID: 1ef4e5f61dac
Revises: 485b2296735
Create Date: 2014-02-09 21:51:35.842781
"""
# revision identifiers, used by Alembic.
revision = '1ef4e5f61dac'
down_revision = '485b2296735'
from alembic import op
def upgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
def downgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
'''
)
|
Add Finnish collation to all text columns"""Add Finnish collations to all text columns
Revision ID: 1ef4e5f61dac
Revises: 485b2296735
Create Date: 2014-02-09 21:51:35.842781
"""
# revision identifiers, used by Alembic.
revision = '1ef4e5f61dac'
down_revision = '485b2296735'
from alembic import op
def upgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
def downgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
'''
)
|
<commit_before><commit_msg>Add Finnish collation to all text columns<commit_after>"""Add Finnish collations to all text columns
Revision ID: 1ef4e5f61dac
Revises: 485b2296735
Create Date: 2014-02-09 21:51:35.842781
"""
# revision identifiers, used by Alembic.
revision = '1ef4e5f61dac'
down_revision = '485b2296735'
from alembic import op
def upgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
COLLATE "fi_FI.utf8"
'''
)
def downgrade():
op.execute(
'''
ALTER TABLE municipality
ALTER COLUMN name
TYPE varchar(20)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work_type
ALTER COLUMN name
TYPE varchar(50)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN name
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN organizer
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN description
TYPE text
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN street_address
TYPE varchar(100)
'''
)
op.execute(
'''
ALTER TABLE voluntary_work
ALTER_COLUMN contact_email
TYPE varchar(100)
'''
)
|
|
0d979e1ea235014470f1357331130da46488eb66
|
test_against_full_data_set.py
|
test_against_full_data_set.py
|
from csvreader import read_patient_csv
from patient_solver import solve_for_patient
from patient_state import PatientState
def test_against_real_data():
patients = read_patient_csv();
params = PatientState.schnider_params()
for patient in patients[6:7]:
results = solve_for_patient(patient, params)
for result in results["predicted_and_measured"]:
print "predicted: %f, measured: %f" % result
if __name__ == '__main__':
test_against_real_data()
|
Add a test which compares the schnider params against the full data set
|
Add a test which compares the schnider params against the full data set
|
Python
|
mit
|
JMathiszig-Lee/Propofol
|
Add a test which compares the schnider params against the full data set
|
from csvreader import read_patient_csv
from patient_solver import solve_for_patient
from patient_state import PatientState
def test_against_real_data():
patients = read_patient_csv();
params = PatientState.schnider_params()
for patient in patients[6:7]:
results = solve_for_patient(patient, params)
for result in results["predicted_and_measured"]:
print "predicted: %f, measured: %f" % result
if __name__ == '__main__':
test_against_real_data()
|
<commit_before><commit_msg>Add a test which compares the schnider params against the full data set<commit_after>
|
from csvreader import read_patient_csv
from patient_solver import solve_for_patient
from patient_state import PatientState
def test_against_real_data():
patients = read_patient_csv();
params = PatientState.schnider_params()
for patient in patients[6:7]:
results = solve_for_patient(patient, params)
for result in results["predicted_and_measured"]:
print "predicted: %f, measured: %f" % result
if __name__ == '__main__':
test_against_real_data()
|
Add a test which compares the schnider params against the full data setfrom csvreader import read_patient_csv
from patient_solver import solve_for_patient
from patient_state import PatientState
def test_against_real_data():
patients = read_patient_csv();
params = PatientState.schnider_params()
for patient in patients[6:7]:
results = solve_for_patient(patient, params)
for result in results["predicted_and_measured"]:
print "predicted: %f, measured: %f" % result
if __name__ == '__main__':
test_against_real_data()
|
<commit_before><commit_msg>Add a test which compares the schnider params against the full data set<commit_after>from csvreader import read_patient_csv
from patient_solver import solve_for_patient
from patient_state import PatientState
def test_against_real_data():
patients = read_patient_csv();
params = PatientState.schnider_params()
for patient in patients[6:7]:
results = solve_for_patient(patient, params)
for result in results["predicted_and_measured"]:
print "predicted: %f, measured: %f" % result
if __name__ == '__main__':
test_against_real_data()
|
|
9763156fbf2ccfbc5679c2264593917aa416bc24
|
tests/unit/test_soundcloud_track.py
|
tests/unit/test_soundcloud_track.py
|
from nose.tools import * # noqa
import datetime
from pmg.models.soundcloud_track import SoundcloudTrack
from pmg.models import db, File, Event, EventFile
from tests import PMGTestCase
class TestUser(PMGTestCase):
def test_get_unstarted_query(self):
event = Event(
date=datetime.datetime.today(),
title="Test event",
type="committee-meeting",
)
file = File(
title="Test Audio",
file_mime="audio/mp3",
file_path="tmp/file",
file_bytes="1",
origname="Test file",
description="Test file",
playtime="3min",
# event_files=db.relationship("EventFile", lazy=True),
)
event_file = EventFile(
event=event,
file=file
)
db.session.add(event)
db.session.add(file)
db.session.add(event_file)
db.session.commit()
query = SoundcloudTrack.get_unstarted_query()
self.assertEquals(1, query.count())
self.assertIn(file, query.all())
|
Add unit test for Soundcloud get_unstarted_query function
|
Add unit test for Soundcloud get_unstarted_query function
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add unit test for Soundcloud get_unstarted_query function
|
from nose.tools import * # noqa
import datetime
from pmg.models.soundcloud_track import SoundcloudTrack
from pmg.models import db, File, Event, EventFile
from tests import PMGTestCase
class TestUser(PMGTestCase):
def test_get_unstarted_query(self):
event = Event(
date=datetime.datetime.today(),
title="Test event",
type="committee-meeting",
)
file = File(
title="Test Audio",
file_mime="audio/mp3",
file_path="tmp/file",
file_bytes="1",
origname="Test file",
description="Test file",
playtime="3min",
# event_files=db.relationship("EventFile", lazy=True),
)
event_file = EventFile(
event=event,
file=file
)
db.session.add(event)
db.session.add(file)
db.session.add(event_file)
db.session.commit()
query = SoundcloudTrack.get_unstarted_query()
self.assertEquals(1, query.count())
self.assertIn(file, query.all())
|
<commit_before><commit_msg>Add unit test for Soundcloud get_unstarted_query function<commit_after>
|
from nose.tools import * # noqa
import datetime
from pmg.models.soundcloud_track import SoundcloudTrack
from pmg.models import db, File, Event, EventFile
from tests import PMGTestCase
class TestUser(PMGTestCase):
def test_get_unstarted_query(self):
event = Event(
date=datetime.datetime.today(),
title="Test event",
type="committee-meeting",
)
file = File(
title="Test Audio",
file_mime="audio/mp3",
file_path="tmp/file",
file_bytes="1",
origname="Test file",
description="Test file",
playtime="3min",
# event_files=db.relationship("EventFile", lazy=True),
)
event_file = EventFile(
event=event,
file=file
)
db.session.add(event)
db.session.add(file)
db.session.add(event_file)
db.session.commit()
query = SoundcloudTrack.get_unstarted_query()
self.assertEquals(1, query.count())
self.assertIn(file, query.all())
|
Add unit test for Soundcloud get_unstarted_query functionfrom nose.tools import * # noqa
import datetime
from pmg.models.soundcloud_track import SoundcloudTrack
from pmg.models import db, File, Event, EventFile
from tests import PMGTestCase
class TestUser(PMGTestCase):
def test_get_unstarted_query(self):
event = Event(
date=datetime.datetime.today(),
title="Test event",
type="committee-meeting",
)
file = File(
title="Test Audio",
file_mime="audio/mp3",
file_path="tmp/file",
file_bytes="1",
origname="Test file",
description="Test file",
playtime="3min",
# event_files=db.relationship("EventFile", lazy=True),
)
event_file = EventFile(
event=event,
file=file
)
db.session.add(event)
db.session.add(file)
db.session.add(event_file)
db.session.commit()
query = SoundcloudTrack.get_unstarted_query()
self.assertEquals(1, query.count())
self.assertIn(file, query.all())
|
<commit_before><commit_msg>Add unit test for Soundcloud get_unstarted_query function<commit_after>from nose.tools import * # noqa
import datetime
from pmg.models.soundcloud_track import SoundcloudTrack
from pmg.models import db, File, Event, EventFile
from tests import PMGTestCase
class TestUser(PMGTestCase):
def test_get_unstarted_query(self):
event = Event(
date=datetime.datetime.today(),
title="Test event",
type="committee-meeting",
)
file = File(
title="Test Audio",
file_mime="audio/mp3",
file_path="tmp/file",
file_bytes="1",
origname="Test file",
description="Test file",
playtime="3min",
# event_files=db.relationship("EventFile", lazy=True),
)
event_file = EventFile(
event=event,
file=file
)
db.session.add(event)
db.session.add(file)
db.session.add(event_file)
db.session.commit()
query = SoundcloudTrack.get_unstarted_query()
self.assertEquals(1, query.count())
self.assertIn(file, query.all())
|
|
3c8e0de0c5e39ee773ff4860d5dff651741e31fa
|
plugin/tests/test_mockcontext.py
|
plugin/tests/test_mockcontext.py
|
from cloudify.mocks import (MockCloudifyContext,
MockNodeInstanceContext,
)
class MockNodeInstanceContextRelationships(MockNodeInstanceContext):
def __init__(self, id=None, runtime_properties=None, relationships=None):
super(MockNodeInstanceContextRelationships, self).__init__(
id,
runtime_properties)
self._relationships = []
if relationships:
for i, rel in enumerate(relationships):
self._relationships.append(MockRelationshipContext(
rel['node_id'],
rel['relationship_properties'],
rel['relationship_type'])
)
self._instance = MockNodeInstanceContext(id, runtime_properties)
@property
def relationships(self):
return self._relationships
@property
def instance(self):
return self._instance
class MockRelationshipContext(object):
def __init__(self, node_id=None, runtime_properties=None, type=None):
self._target = MockNodeInstanceContextRelationships(node_id, runtime_properties)
self._type = type
@property
def target(self):
return self._target
@property
def type(self):
return self._type
class MockCloudifyContextRelationships(MockCloudifyContext):
def __init__(self,
node_id=None,
node_name=None,
blueprint_id=None,
deployment_id=None,
execution_id=None,
properties=None,
runtime_properties=None,
capabilities=None,
related=None,
source=None,
target=None,
operation=None,
resources=None,
provider_context=None,
bootstrap_context=None,
relationships=None):
super(MockCloudifyContextRelationships, self).__init__(
node_id,
node_name,
blueprint_id,
deployment_id,
execution_id,
properties,
capabilities,
related,
source,
target,
operation,
resources,
provider_context,
bootstrap_context,
runtime_properties)
self._instance = MockNodeInstanceContextRelationships(
node_id,
runtime_properties,
relationships)
''' How to use the MockCloudifyContextRelationships :
New Property: relationships, Mandatory: No
Required inputs:
- 'node_id': the id of the node (str),
- 'relationship_type': the type of the relationship (str),
- 'relationship_properties': a dict of properties
ctx = MockCloudifyContextRelationships(
node_id='id_nod',
node_name='mock',
blueprint_id='id_blue',
properties={'prop_1': 'prop_1'},
runtime_properties={'run_prop_1': 'run_prop_1'},
relationships=[ {'node_id': 'id_nod',
'relationship_type': 'type',
'relationship_properties':
{'runtime_relation': 'runtime_relation'}
},
{'node_id': 'id_nod_2',
'relationship_type': 'type_2',
'relationship_properties':
{'runtime_relation': 'runtime_relation_2'}
}
]
)
'''
|
ADD (94) class MockCloudifyContextRelationships to mock relationships behaviour
|
ADD (94) class MockCloudifyContextRelationships to mock relationships behaviour
|
Python
|
apache-2.0
|
fastconnect/cloudify-azure-plugin
|
ADD (94) class MockCloudifyContextRelationships to mock relationships behaviour
|
from cloudify.mocks import (MockCloudifyContext,
MockNodeInstanceContext,
)
class MockNodeInstanceContextRelationships(MockNodeInstanceContext):
def __init__(self, id=None, runtime_properties=None, relationships=None):
super(MockNodeInstanceContextRelationships, self).__init__(
id,
runtime_properties)
self._relationships = []
if relationships:
for i, rel in enumerate(relationships):
self._relationships.append(MockRelationshipContext(
rel['node_id'],
rel['relationship_properties'],
rel['relationship_type'])
)
self._instance = MockNodeInstanceContext(id, runtime_properties)
@property
def relationships(self):
return self._relationships
@property
def instance(self):
return self._instance
class MockRelationshipContext(object):
def __init__(self, node_id=None, runtime_properties=None, type=None):
self._target = MockNodeInstanceContextRelationships(node_id, runtime_properties)
self._type = type
@property
def target(self):
return self._target
@property
def type(self):
return self._type
class MockCloudifyContextRelationships(MockCloudifyContext):
def __init__(self,
node_id=None,
node_name=None,
blueprint_id=None,
deployment_id=None,
execution_id=None,
properties=None,
runtime_properties=None,
capabilities=None,
related=None,
source=None,
target=None,
operation=None,
resources=None,
provider_context=None,
bootstrap_context=None,
relationships=None):
super(MockCloudifyContextRelationships, self).__init__(
node_id,
node_name,
blueprint_id,
deployment_id,
execution_id,
properties,
capabilities,
related,
source,
target,
operation,
resources,
provider_context,
bootstrap_context,
runtime_properties)
self._instance = MockNodeInstanceContextRelationships(
node_id,
runtime_properties,
relationships)
''' How to use the MockCloudifyContextRelationships :
New Property: relationships, Mandatory: No
Required inputs:
- 'node_id': the id of the node (str),
- 'relationship_type': the type of the relationship (str),
- 'relationship_properties': a dict of properties
ctx = MockCloudifyContextRelationships(
node_id='id_nod',
node_name='mock',
blueprint_id='id_blue',
properties={'prop_1': 'prop_1'},
runtime_properties={'run_prop_1': 'run_prop_1'},
relationships=[ {'node_id': 'id_nod',
'relationship_type': 'type',
'relationship_properties':
{'runtime_relation': 'runtime_relation'}
},
{'node_id': 'id_nod_2',
'relationship_type': 'type_2',
'relationship_properties':
{'runtime_relation': 'runtime_relation_2'}
}
]
)
'''
|
<commit_before><commit_msg>ADD (94) class MockCloudifyContextRelationships to mock relationships behaviour<commit_after>
|
from cloudify.mocks import (MockCloudifyContext,
MockNodeInstanceContext,
)
class MockNodeInstanceContextRelationships(MockNodeInstanceContext):
def __init__(self, id=None, runtime_properties=None, relationships=None):
super(MockNodeInstanceContextRelationships, self).__init__(
id,
runtime_properties)
self._relationships = []
if relationships:
for i, rel in enumerate(relationships):
self._relationships.append(MockRelationshipContext(
rel['node_id'],
rel['relationship_properties'],
rel['relationship_type'])
)
self._instance = MockNodeInstanceContext(id, runtime_properties)
@property
def relationships(self):
return self._relationships
@property
def instance(self):
return self._instance
class MockRelationshipContext(object):
def __init__(self, node_id=None, runtime_properties=None, type=None):
self._target = MockNodeInstanceContextRelationships(node_id, runtime_properties)
self._type = type
@property
def target(self):
return self._target
@property
def type(self):
return self._type
class MockCloudifyContextRelationships(MockCloudifyContext):
def __init__(self,
node_id=None,
node_name=None,
blueprint_id=None,
deployment_id=None,
execution_id=None,
properties=None,
runtime_properties=None,
capabilities=None,
related=None,
source=None,
target=None,
operation=None,
resources=None,
provider_context=None,
bootstrap_context=None,
relationships=None):
super(MockCloudifyContextRelationships, self).__init__(
node_id,
node_name,
blueprint_id,
deployment_id,
execution_id,
properties,
capabilities,
related,
source,
target,
operation,
resources,
provider_context,
bootstrap_context,
runtime_properties)
self._instance = MockNodeInstanceContextRelationships(
node_id,
runtime_properties,
relationships)
''' How to use the MockCloudifyContextRelationships :
New Property: relationships, Mandatory: No
Required inputs:
- 'node_id': the id of the node (str),
- 'relationship_type': the type of the relationship (str),
- 'relationship_properties': a dict of properties
ctx = MockCloudifyContextRelationships(
node_id='id_nod',
node_name='mock',
blueprint_id='id_blue',
properties={'prop_1': 'prop_1'},
runtime_properties={'run_prop_1': 'run_prop_1'},
relationships=[ {'node_id': 'id_nod',
'relationship_type': 'type',
'relationship_properties':
{'runtime_relation': 'runtime_relation'}
},
{'node_id': 'id_nod_2',
'relationship_type': 'type_2',
'relationship_properties':
{'runtime_relation': 'runtime_relation_2'}
}
]
)
'''
|
ADD (94) class MockCloudifyContextRelationships to mock relationships behaviourfrom cloudify.mocks import (MockCloudifyContext,
MockNodeInstanceContext,
)
class MockNodeInstanceContextRelationships(MockNodeInstanceContext):
def __init__(self, id=None, runtime_properties=None, relationships=None):
super(MockNodeInstanceContextRelationships, self).__init__(
id,
runtime_properties)
self._relationships = []
if relationships:
for i, rel in enumerate(relationships):
self._relationships.append(MockRelationshipContext(
rel['node_id'],
rel['relationship_properties'],
rel['relationship_type'])
)
self._instance = MockNodeInstanceContext(id, runtime_properties)
@property
def relationships(self):
return self._relationships
@property
def instance(self):
return self._instance
class MockRelationshipContext(object):
def __init__(self, node_id=None, runtime_properties=None, type=None):
self._target = MockNodeInstanceContextRelationships(node_id, runtime_properties)
self._type = type
@property
def target(self):
return self._target
@property
def type(self):
return self._type
class MockCloudifyContextRelationships(MockCloudifyContext):
def __init__(self,
node_id=None,
node_name=None,
blueprint_id=None,
deployment_id=None,
execution_id=None,
properties=None,
runtime_properties=None,
capabilities=None,
related=None,
source=None,
target=None,
operation=None,
resources=None,
provider_context=None,
bootstrap_context=None,
relationships=None):
super(MockCloudifyContextRelationships, self).__init__(
node_id,
node_name,
blueprint_id,
deployment_id,
execution_id,
properties,
capabilities,
related,
source,
target,
operation,
resources,
provider_context,
bootstrap_context,
runtime_properties)
self._instance = MockNodeInstanceContextRelationships(
node_id,
runtime_properties,
relationships)
''' How to use the MockCloudifyContextRelationships :
New Property: relationships, Mandatory: No
Required inputs:
- 'node_id': the id of the node (str),
- 'relationship_type': the type of the relationship (str),
- 'relationship_properties': a dict of properties
ctx = MockCloudifyContextRelationships(
node_id='id_nod',
node_name='mock',
blueprint_id='id_blue',
properties={'prop_1': 'prop_1'},
runtime_properties={'run_prop_1': 'run_prop_1'},
relationships=[ {'node_id': 'id_nod',
'relationship_type': 'type',
'relationship_properties':
{'runtime_relation': 'runtime_relation'}
},
{'node_id': 'id_nod_2',
'relationship_type': 'type_2',
'relationship_properties':
{'runtime_relation': 'runtime_relation_2'}
}
]
)
'''
|
<commit_before><commit_msg>ADD (94) class MockCloudifyContextRelationships to mock relationships behaviour<commit_after>from cloudify.mocks import (MockCloudifyContext,
MockNodeInstanceContext,
)
class MockNodeInstanceContextRelationships(MockNodeInstanceContext):
def __init__(self, id=None, runtime_properties=None, relationships=None):
super(MockNodeInstanceContextRelationships, self).__init__(
id,
runtime_properties)
self._relationships = []
if relationships:
for i, rel in enumerate(relationships):
self._relationships.append(MockRelationshipContext(
rel['node_id'],
rel['relationship_properties'],
rel['relationship_type'])
)
self._instance = MockNodeInstanceContext(id, runtime_properties)
@property
def relationships(self):
return self._relationships
@property
def instance(self):
return self._instance
class MockRelationshipContext(object):
def __init__(self, node_id=None, runtime_properties=None, type=None):
self._target = MockNodeInstanceContextRelationships(node_id, runtime_properties)
self._type = type
@property
def target(self):
return self._target
@property
def type(self):
return self._type
class MockCloudifyContextRelationships(MockCloudifyContext):
def __init__(self,
node_id=None,
node_name=None,
blueprint_id=None,
deployment_id=None,
execution_id=None,
properties=None,
runtime_properties=None,
capabilities=None,
related=None,
source=None,
target=None,
operation=None,
resources=None,
provider_context=None,
bootstrap_context=None,
relationships=None):
super(MockCloudifyContextRelationships, self).__init__(
node_id,
node_name,
blueprint_id,
deployment_id,
execution_id,
properties,
capabilities,
related,
source,
target,
operation,
resources,
provider_context,
bootstrap_context,
runtime_properties)
self._instance = MockNodeInstanceContextRelationships(
node_id,
runtime_properties,
relationships)
''' How to use the MockCloudifyContextRelationships :
New Property: relationships, Mandatory: No
Required inputs:
- 'node_id': the id of the node (str),
- 'relationship_type': the type of the relationship (str),
- 'relationship_properties': a dict of properties
ctx = MockCloudifyContextRelationships(
node_id='id_nod',
node_name='mock',
blueprint_id='id_blue',
properties={'prop_1': 'prop_1'},
runtime_properties={'run_prop_1': 'run_prop_1'},
relationships=[ {'node_id': 'id_nod',
'relationship_type': 'type',
'relationship_properties':
{'runtime_relation': 'runtime_relation'}
},
{'node_id': 'id_nod_2',
'relationship_type': 'type_2',
'relationship_properties':
{'runtime_relation': 'runtime_relation_2'}
}
]
)
'''
|
|
28cc02a3b2918cf7baca36e15749fce57a76ea66
|
website/management/commands/delete_documents.py
|
website/management/commands/delete_documents.py
|
from django.core.management.base import BaseCommand
from document.models import Document, Kamerstuk
import scraper.documents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('dossier_id', nargs='+', type=int)
def handle(self, *args, **options):
# dossier_id = 33885
dossier_id = options['dossier_id'][0]
dossier_docs = Document.objects.filter(dossier_id=dossier_id)
for doc in dossier_docs:
doc.delete()
|
Create command to delete documents of a given dossier
|
Create command to delete documents of a given dossier
|
Python
|
mit
|
openkamer/openkamer,openkamer/openkamer,openkamer/openkamer,openkamer/openkamer
|
Create command to delete documents of a given dossier
|
from django.core.management.base import BaseCommand
from document.models import Document, Kamerstuk
import scraper.documents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('dossier_id', nargs='+', type=int)
def handle(self, *args, **options):
# dossier_id = 33885
dossier_id = options['dossier_id'][0]
dossier_docs = Document.objects.filter(dossier_id=dossier_id)
for doc in dossier_docs:
doc.delete()
|
<commit_before><commit_msg>Create command to delete documents of a given dossier<commit_after>
|
from django.core.management.base import BaseCommand
from document.models import Document, Kamerstuk
import scraper.documents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('dossier_id', nargs='+', type=int)
def handle(self, *args, **options):
# dossier_id = 33885
dossier_id = options['dossier_id'][0]
dossier_docs = Document.objects.filter(dossier_id=dossier_id)
for doc in dossier_docs:
doc.delete()
|
Create command to delete documents of a given dossierfrom django.core.management.base import BaseCommand
from document.models import Document, Kamerstuk
import scraper.documents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('dossier_id', nargs='+', type=int)
def handle(self, *args, **options):
# dossier_id = 33885
dossier_id = options['dossier_id'][0]
dossier_docs = Document.objects.filter(dossier_id=dossier_id)
for doc in dossier_docs:
doc.delete()
|
<commit_before><commit_msg>Create command to delete documents of a given dossier<commit_after>from django.core.management.base import BaseCommand
from document.models import Document, Kamerstuk
import scraper.documents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('dossier_id', nargs='+', type=int)
def handle(self, *args, **options):
# dossier_id = 33885
dossier_id = options['dossier_id'][0]
dossier_docs = Document.objects.filter(dossier_id=dossier_id)
for doc in dossier_docs:
doc.delete()
|
|
927396038d147b633bee31988cf1e016258c5320
|
scripts/diff_incar.py
|
scripts/diff_incar.py
|
#!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
Add a script for easy diffing of two Incars.
|
Add a script for easy diffing of two Incars.
|
Python
|
mit
|
rousseab/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,ctoher/pymatgen,Dioptas/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,rousseab/pymatgen,yanikou19/pymatgen,sonium0/pymatgen,yanikou19/pymatgen,migueldiascosta/pymatgen,Dioptas/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,sonium0/pymatgen,sonium0/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen
|
Add a script for easy diffing of two Incars.
|
#!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
<commit_before><commit_msg>Add a script for easy diffing of two Incars.<commit_after>
|
#!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
Add a script for easy diffing of two Incars.#!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
<commit_before><commit_msg>Add a script for easy diffing of two Incars.<commit_after>#!/usr/bin/env python
'''
Created on Nov 12, 2011
'''
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Nov 12, 2011"
import sys
import itertools
from pymatgen.io.vaspio import Incar
from pymatgen.util.string_utils import str_aligned
filepath1 = sys.argv[1]
filepath2 = sys.argv[2]
incar1 = Incar.from_file(filepath1)
incar2 = Incar.from_file(filepath2)
def format_lists(v):
if isinstance(v, (tuple, list)):
return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)])
return v
d = incar1.diff(incar2)
output = [['SAME PARAMS','', '']]
output.append(['---------------','', ''])
output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"])
output.append(['','', ''])
output.append(['DIFFERENT PARAM','', ''])
output.append(['---------------','', ''])
output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"])
print str_aligned(output, ['', filepath1, filepath2])
|
|
e6d4bc772098fb8a1c0948889c05d51f0cc3a101
|
examples/tests/test_satellites.py
|
examples/tests/test_satellites.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bdbcontrib.verify_notebook import run_and_verify_notebook
SATELLITES=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"satellites", "Satellites")
def check_satellites(cell):
"""Check pyout cell contents for reasonableness in the Satellites notebook."""
# Should raise on error.
print repr(cell)
def test_satellites():
run_and_verify_notebook(SATELLITES, content_tester=check_satellites)
|
Test the satellites notebook specifically (though no satellite-specific tests yet).
|
Test the satellites notebook specifically (though no satellite-specific tests yet).
|
Python
|
apache-2.0
|
probcomp/bdbcontrib,probcomp/bdbcontrib
|
Test the satellites notebook specifically (though no satellite-specific tests yet).
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bdbcontrib.verify_notebook import run_and_verify_notebook
SATELLITES=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"satellites", "Satellites")
def check_satellites(cell):
"""Check pyout cell contents for reasonableness in the Satellites notebook."""
# Should raise on error.
print repr(cell)
def test_satellites():
run_and_verify_notebook(SATELLITES, content_tester=check_satellites)
|
<commit_before><commit_msg>Test the satellites notebook specifically (though no satellite-specific tests yet).<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bdbcontrib.verify_notebook import run_and_verify_notebook
SATELLITES=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"satellites", "Satellites")
def check_satellites(cell):
"""Check pyout cell contents for reasonableness in the Satellites notebook."""
# Should raise on error.
print repr(cell)
def test_satellites():
run_and_verify_notebook(SATELLITES, content_tester=check_satellites)
|
Test the satellites notebook specifically (though no satellite-specific tests yet).# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bdbcontrib.verify_notebook import run_and_verify_notebook
SATELLITES=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"satellites", "Satellites")
def check_satellites(cell):
"""Check pyout cell contents for reasonableness in the Satellites notebook."""
# Should raise on error.
print repr(cell)
def test_satellites():
run_and_verify_notebook(SATELLITES, content_tester=check_satellites)
|
<commit_before><commit_msg>Test the satellites notebook specifically (though no satellite-specific tests yet).<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from bdbcontrib.verify_notebook import run_and_verify_notebook
SATELLITES=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"satellites", "Satellites")
def check_satellites(cell):
"""Check pyout cell contents for reasonableness in the Satellites notebook."""
# Should raise on error.
print repr(cell)
def test_satellites():
run_and_verify_notebook(SATELLITES, content_tester=check_satellites)
|
|
59399420a415c3cb890936dcc499ddf81ef0f438
|
features/memberships/querysets.py
|
features/memberships/querysets.py
|
from django.db import models
from django.db.models import Case, When, Value, IntegerField, Sum
from django.utils.timezone import now, timedelta
class MembershipQuerySet(models.QuerySet):
def order_by_gestalt_activity(self, gestalt):
a_week_ago = now() - timedelta(days=7)
a_month_ago = now() - timedelta(days=30)
three_months_ago = now() - timedelta(days=90)
six_months_ago = now() - timedelta(days=180)
content_score = Case(*[
When(
group__associations__content__versions__author=gestalt,
group__associations__content__versions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 48),
(a_month_ago, 32),
(three_months_ago, 16),
(six_months_ago, 8)
)], default=1, output_field=IntegerField())
content_contrib_score = Case(*[
When(
group__associations__content__contributions__author=gestalt,
group__associations__content__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
conversation_contrib_score = Case(*[
When(
group__associations__conversation__contributions__author=gestalt,
group__associations__conversation__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
return self \
.annotate(activity=Sum(
content_score
+ content_contrib_score
+ conversation_contrib_score,
output_field=IntegerField())) \
.order_by('-activity')
|
Add queryset for ordering memberships by activity
|
Add queryset for ordering memberships by activity
|
Python
|
agpl-3.0
|
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
|
Add queryset for ordering memberships by activity
|
from django.db import models
from django.db.models import Case, When, Value, IntegerField, Sum
from django.utils.timezone import now, timedelta
class MembershipQuerySet(models.QuerySet):
def order_by_gestalt_activity(self, gestalt):
a_week_ago = now() - timedelta(days=7)
a_month_ago = now() - timedelta(days=30)
three_months_ago = now() - timedelta(days=90)
six_months_ago = now() - timedelta(days=180)
content_score = Case(*[
When(
group__associations__content__versions__author=gestalt,
group__associations__content__versions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 48),
(a_month_ago, 32),
(three_months_ago, 16),
(six_months_ago, 8)
)], default=1, output_field=IntegerField())
content_contrib_score = Case(*[
When(
group__associations__content__contributions__author=gestalt,
group__associations__content__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
conversation_contrib_score = Case(*[
When(
group__associations__conversation__contributions__author=gestalt,
group__associations__conversation__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
return self \
.annotate(activity=Sum(
content_score
+ content_contrib_score
+ conversation_contrib_score,
output_field=IntegerField())) \
.order_by('-activity')
|
<commit_before><commit_msg>Add queryset for ordering memberships by activity<commit_after>
|
from django.db import models
from django.db.models import Case, When, Value, IntegerField, Sum
from django.utils.timezone import now, timedelta
class MembershipQuerySet(models.QuerySet):
def order_by_gestalt_activity(self, gestalt):
a_week_ago = now() - timedelta(days=7)
a_month_ago = now() - timedelta(days=30)
three_months_ago = now() - timedelta(days=90)
six_months_ago = now() - timedelta(days=180)
content_score = Case(*[
When(
group__associations__content__versions__author=gestalt,
group__associations__content__versions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 48),
(a_month_ago, 32),
(three_months_ago, 16),
(six_months_ago, 8)
)], default=1, output_field=IntegerField())
content_contrib_score = Case(*[
When(
group__associations__content__contributions__author=gestalt,
group__associations__content__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
conversation_contrib_score = Case(*[
When(
group__associations__conversation__contributions__author=gestalt,
group__associations__conversation__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
return self \
.annotate(activity=Sum(
content_score
+ content_contrib_score
+ conversation_contrib_score,
output_field=IntegerField())) \
.order_by('-activity')
|
Add queryset for ordering memberships by activityfrom django.db import models
from django.db.models import Case, When, Value, IntegerField, Sum
from django.utils.timezone import now, timedelta
class MembershipQuerySet(models.QuerySet):
def order_by_gestalt_activity(self, gestalt):
a_week_ago = now() - timedelta(days=7)
a_month_ago = now() - timedelta(days=30)
three_months_ago = now() - timedelta(days=90)
six_months_ago = now() - timedelta(days=180)
content_score = Case(*[
When(
group__associations__content__versions__author=gestalt,
group__associations__content__versions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 48),
(a_month_ago, 32),
(three_months_ago, 16),
(six_months_ago, 8)
)], default=1, output_field=IntegerField())
content_contrib_score = Case(*[
When(
group__associations__content__contributions__author=gestalt,
group__associations__content__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
conversation_contrib_score = Case(*[
When(
group__associations__conversation__contributions__author=gestalt,
group__associations__conversation__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
return self \
.annotate(activity=Sum(
content_score
+ content_contrib_score
+ conversation_contrib_score,
output_field=IntegerField())) \
.order_by('-activity')
|
<commit_before><commit_msg>Add queryset for ordering memberships by activity<commit_after>from django.db import models
from django.db.models import Case, When, Value, IntegerField, Sum
from django.utils.timezone import now, timedelta
class MembershipQuerySet(models.QuerySet):
def order_by_gestalt_activity(self, gestalt):
a_week_ago = now() - timedelta(days=7)
a_month_ago = now() - timedelta(days=30)
three_months_ago = now() - timedelta(days=90)
six_months_ago = now() - timedelta(days=180)
content_score = Case(*[
When(
group__associations__content__versions__author=gestalt,
group__associations__content__versions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 48),
(a_month_ago, 32),
(three_months_ago, 16),
(six_months_ago, 8)
)], default=1, output_field=IntegerField())
content_contrib_score = Case(*[
When(
group__associations__content__contributions__author=gestalt,
group__associations__content__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
conversation_contrib_score = Case(*[
When(
group__associations__conversation__contributions__author=gestalt,
group__associations__conversation__contributions__time_created__gte=time,
then=Value(modifier)
) for time, modifier in (
(a_week_ago, 12),
(a_month_ago, 8),
(three_months_ago, 4),
(six_months_ago, 2)
)], default=0, output_field=IntegerField())
return self \
.annotate(activity=Sum(
content_score
+ content_contrib_score
+ conversation_contrib_score,
output_field=IntegerField())) \
.order_by('-activity')
|
|
3c2f0786d7d092c2e1a57036c93e92ea6d67fe7c
|
gen_homebrew_formula.py
|
gen_homebrew_formula.py
|
#!/usr/bin/env python
# encoding: utf-8
'''
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
'''
from __future__ import print_function, unicode_literals
import io
import os
import sys
from textwrap import indent
import sqlitebiter
from subprocrunner import SubprocessRunner
def main():
formula_body = []
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding="utf8") as f:
formula_body.append('desc "{}"'.format(f.read().strip()))
base_url = "https://github.com/thombashi/{pkg}/releases/download/v{version}".format(
pkg=sqlitebiter.__name__, version=sqlitebiter.__version__)
proc = SubprocessRunner("wget {base}/{pkg}_macos_sha256.txt -O -".format(
base=base_url, pkg=sqlitebiter.__name__))
if proc.run() != 0:
print(proc.stderr, file=sys.stderr)
return proc.returncode
formula_body.extend([
'homepage "https://github.com/thombashi/{}"'.format(sqlitebiter.__name__),
'url "{bin_url}"'.format(bin_url="{base}/{pkg}_macos_amd64.tar.gz".format(
base=base_url, pkg=sqlitebiter.__name__)),
'version "{}"'.format(sqlitebiter.__version__),
'sha256 "{sha256}"'.format(sha256=proc.stdout.split()[0]),
'',
'def install',
' bin.install "{}"'.format(sqlitebiter.__name__),
'end',
])
print("class Sqlitebiter < Formula")
print(indent("\n".join(formula_body), " "))
print("end")
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a script to create homebrew formula
|
Add a script to create homebrew formula
|
Python
|
mit
|
thombashi/sqlitebiter,thombashi/sqlitebiter
|
Add a script to create homebrew formula
|
#!/usr/bin/env python
# encoding: utf-8
'''
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
'''
from __future__ import print_function, unicode_literals
import io
import os
import sys
from textwrap import indent
import sqlitebiter
from subprocrunner import SubprocessRunner
def main():
formula_body = []
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding="utf8") as f:
formula_body.append('desc "{}"'.format(f.read().strip()))
base_url = "https://github.com/thombashi/{pkg}/releases/download/v{version}".format(
pkg=sqlitebiter.__name__, version=sqlitebiter.__version__)
proc = SubprocessRunner("wget {base}/{pkg}_macos_sha256.txt -O -".format(
base=base_url, pkg=sqlitebiter.__name__))
if proc.run() != 0:
print(proc.stderr, file=sys.stderr)
return proc.returncode
formula_body.extend([
'homepage "https://github.com/thombashi/{}"'.format(sqlitebiter.__name__),
'url "{bin_url}"'.format(bin_url="{base}/{pkg}_macos_amd64.tar.gz".format(
base=base_url, pkg=sqlitebiter.__name__)),
'version "{}"'.format(sqlitebiter.__version__),
'sha256 "{sha256}"'.format(sha256=proc.stdout.split()[0]),
'',
'def install',
' bin.install "{}"'.format(sqlitebiter.__name__),
'end',
])
print("class Sqlitebiter < Formula")
print(indent("\n".join(formula_body), " "))
print("end")
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to create homebrew formula<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
'''
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
'''
from __future__ import print_function, unicode_literals
import io
import os
import sys
from textwrap import indent
import sqlitebiter
from subprocrunner import SubprocessRunner
def main():
formula_body = []
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding="utf8") as f:
formula_body.append('desc "{}"'.format(f.read().strip()))
base_url = "https://github.com/thombashi/{pkg}/releases/download/v{version}".format(
pkg=sqlitebiter.__name__, version=sqlitebiter.__version__)
proc = SubprocessRunner("wget {base}/{pkg}_macos_sha256.txt -O -".format(
base=base_url, pkg=sqlitebiter.__name__))
if proc.run() != 0:
print(proc.stderr, file=sys.stderr)
return proc.returncode
formula_body.extend([
'homepage "https://github.com/thombashi/{}"'.format(sqlitebiter.__name__),
'url "{bin_url}"'.format(bin_url="{base}/{pkg}_macos_amd64.tar.gz".format(
base=base_url, pkg=sqlitebiter.__name__)),
'version "{}"'.format(sqlitebiter.__version__),
'sha256 "{sha256}"'.format(sha256=proc.stdout.split()[0]),
'',
'def install',
' bin.install "{}"'.format(sqlitebiter.__name__),
'end',
])
print("class Sqlitebiter < Formula")
print(indent("\n".join(formula_body), " "))
print("end")
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a script to create homebrew formula#!/usr/bin/env python
# encoding: utf-8
'''
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
'''
from __future__ import print_function, unicode_literals
import io
import os
import sys
from textwrap import indent
import sqlitebiter
from subprocrunner import SubprocessRunner
def main():
formula_body = []
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding="utf8") as f:
formula_body.append('desc "{}"'.format(f.read().strip()))
base_url = "https://github.com/thombashi/{pkg}/releases/download/v{version}".format(
pkg=sqlitebiter.__name__, version=sqlitebiter.__version__)
proc = SubprocessRunner("wget {base}/{pkg}_macos_sha256.txt -O -".format(
base=base_url, pkg=sqlitebiter.__name__))
if proc.run() != 0:
print(proc.stderr, file=sys.stderr)
return proc.returncode
formula_body.extend([
'homepage "https://github.com/thombashi/{}"'.format(sqlitebiter.__name__),
'url "{bin_url}"'.format(bin_url="{base}/{pkg}_macos_amd64.tar.gz".format(
base=base_url, pkg=sqlitebiter.__name__)),
'version "{}"'.format(sqlitebiter.__version__),
'sha256 "{sha256}"'.format(sha256=proc.stdout.split()[0]),
'',
'def install',
' bin.install "{}"'.format(sqlitebiter.__name__),
'end',
])
print("class Sqlitebiter < Formula")
print(indent("\n".join(formula_body), " "))
print("end")
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to create homebrew formula<commit_after>#!/usr/bin/env python
# encoding: utf-8
'''
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
'''
from __future__ import print_function, unicode_literals
import io
import os
import sys
from textwrap import indent
import sqlitebiter
from subprocrunner import SubprocessRunner
def main():
formula_body = []
with io.open(os.path.join("docs", "pages", "introduction", "summary.txt"), encoding="utf8") as f:
formula_body.append('desc "{}"'.format(f.read().strip()))
base_url = "https://github.com/thombashi/{pkg}/releases/download/v{version}".format(
pkg=sqlitebiter.__name__, version=sqlitebiter.__version__)
proc = SubprocessRunner("wget {base}/{pkg}_macos_sha256.txt -O -".format(
base=base_url, pkg=sqlitebiter.__name__))
if proc.run() != 0:
print(proc.stderr, file=sys.stderr)
return proc.returncode
formula_body.extend([
'homepage "https://github.com/thombashi/{}"'.format(sqlitebiter.__name__),
'url "{bin_url}"'.format(bin_url="{base}/{pkg}_macos_amd64.tar.gz".format(
base=base_url, pkg=sqlitebiter.__name__)),
'version "{}"'.format(sqlitebiter.__version__),
'sha256 "{sha256}"'.format(sha256=proc.stdout.split()[0]),
'',
'def install',
' bin.install "{}"'.format(sqlitebiter.__name__),
'end',
])
print("class Sqlitebiter < Formula")
print(indent("\n".join(formula_body), " "))
print("end")
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
338217886caebbc34150b1c7575aa3ee845ed5cd
|
IPython/lib/tests/test_pretty.py
|
IPython/lib/tests/test_pretty.py
|
"""Tests for IPython.lib.pretty.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.lib import pretty
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
nt.assert_equals(gotoutput, expectedoutput)
|
Add test for the indentation fix.
|
Add test for the indentation fix.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add test for the indentation fix.
|
"""Tests for IPython.lib.pretty.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.lib import pretty
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
nt.assert_equals(gotoutput, expectedoutput)
|
<commit_before><commit_msg>Add test for the indentation fix.<commit_after>
|
"""Tests for IPython.lib.pretty.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.lib import pretty
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
nt.assert_equals(gotoutput, expectedoutput)
|
Add test for the indentation fix."""Tests for IPython.lib.pretty.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.lib import pretty
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
nt.assert_equals(gotoutput, expectedoutput)
|
<commit_before><commit_msg>Add test for the indentation fix.<commit_after>"""Tests for IPython.lib.pretty.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2011, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import nose.tools as nt
# Our own imports
from IPython.lib import pretty
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class MyList(object):
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for (i, child) in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
def test_indentation():
"""Test correct indentation in groups"""
count = 40
gotoutput = pretty.pretty(MyList(range(count)))
expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
nt.assert_equals(gotoutput, expectedoutput)
|
|
2571453a54b195adc6a961b20d798e95ab885f67
|
selfdrive/debug/filter_log_message.py
|
selfdrive/debug/filter_log_message.py
|
#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
Add script to nicely print logMessages
|
Add script to nicely print logMessages
|
Python
|
mit
|
commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot
|
Add script to nicely print logMessages
|
#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
<commit_before><commit_msg>Add script to nicely print logMessages<commit_after>
|
#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
Add script to nicely print logMessages#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
<commit_before><commit_msg>Add script to nicely print logMessages<commit_after>#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
|
737dd2fdcfc097ed367fecbd83ded37807f2d1d7
|
tools/parse_shadertoy_json.py
|
tools/parse_shadertoy_json.py
|
# parse_shadertoy_json.py
from __future__ import print_function
import json
import os
from PIL import Image
jsonfile = "ldXXDj.txt"
#jsonfile = "4lX3RB.txt"
j = json.loads(open(jsonfile).read())
#print(json.dumps(j,indent=1))
info = j['Shader']['info']
print("Title: " + info['name'])
print("Author: " + info['username'])
print("Description: " + info['description'])
print("Tags: " + ', '.join(info['tags']))
renderpass = j['Shader']['renderpass']
for r in renderpass:
print(r['type'])
# Pull out textures
for t in r['inputs']:
texfile = os.path.basename(t['src'])
print(" tex" + str(t['channel']) + ": " + texfile, end='')
img = Image.open(texfile)
px = img.load()
print(" size: ", img.size, end='')
for i in range(5):
print(" " + str(px[i,0]), end='')
print("")
# TODO: Save to header
print(" code: " + str(len(r['code'])) + " bytes")
#print(r['code'])
# TODO: Save to header
|
Add a python script to pull relevant fields out of Shadertoy json.
|
Add a python script to pull relevant fields out of Shadertoy json.
|
Python
|
mit
|
jimbo00000/kinderegg
|
Add a python script to pull relevant fields out of Shadertoy json.
|
# parse_shadertoy_json.py
from __future__ import print_function
import json
import os
from PIL import Image
jsonfile = "ldXXDj.txt"
#jsonfile = "4lX3RB.txt"
j = json.loads(open(jsonfile).read())
#print(json.dumps(j,indent=1))
info = j['Shader']['info']
print("Title: " + info['name'])
print("Author: " + info['username'])
print("Description: " + info['description'])
print("Tags: " + ', '.join(info['tags']))
renderpass = j['Shader']['renderpass']
for r in renderpass:
print(r['type'])
# Pull out textures
for t in r['inputs']:
texfile = os.path.basename(t['src'])
print(" tex" + str(t['channel']) + ": " + texfile, end='')
img = Image.open(texfile)
px = img.load()
print(" size: ", img.size, end='')
for i in range(5):
print(" " + str(px[i,0]), end='')
print("")
# TODO: Save to header
print(" code: " + str(len(r['code'])) + " bytes")
#print(r['code'])
# TODO: Save to header
|
<commit_before><commit_msg>Add a python script to pull relevant fields out of Shadertoy json.<commit_after>
|
# parse_shadertoy_json.py
from __future__ import print_function
import json
import os
from PIL import Image
jsonfile = "ldXXDj.txt"
#jsonfile = "4lX3RB.txt"
j = json.loads(open(jsonfile).read())
#print(json.dumps(j,indent=1))
info = j['Shader']['info']
print("Title: " + info['name'])
print("Author: " + info['username'])
print("Description: " + info['description'])
print("Tags: " + ', '.join(info['tags']))
renderpass = j['Shader']['renderpass']
for r in renderpass:
print(r['type'])
# Pull out textures
for t in r['inputs']:
texfile = os.path.basename(t['src'])
print(" tex" + str(t['channel']) + ": " + texfile, end='')
img = Image.open(texfile)
px = img.load()
print(" size: ", img.size, end='')
for i in range(5):
print(" " + str(px[i,0]), end='')
print("")
# TODO: Save to header
print(" code: " + str(len(r['code'])) + " bytes")
#print(r['code'])
# TODO: Save to header
|
Add a python script to pull relevant fields out of Shadertoy json.# parse_shadertoy_json.py
from __future__ import print_function
import json
import os
from PIL import Image
jsonfile = "ldXXDj.txt"
#jsonfile = "4lX3RB.txt"
j = json.loads(open(jsonfile).read())
#print(json.dumps(j,indent=1))
info = j['Shader']['info']
print("Title: " + info['name'])
print("Author: " + info['username'])
print("Description: " + info['description'])
print("Tags: " + ', '.join(info['tags']))
renderpass = j['Shader']['renderpass']
for r in renderpass:
print(r['type'])
# Pull out textures
for t in r['inputs']:
texfile = os.path.basename(t['src'])
print(" tex" + str(t['channel']) + ": " + texfile, end='')
img = Image.open(texfile)
px = img.load()
print(" size: ", img.size, end='')
for i in range(5):
print(" " + str(px[i,0]), end='')
print("")
# TODO: Save to header
print(" code: " + str(len(r['code'])) + " bytes")
#print(r['code'])
# TODO: Save to header
|
<commit_before><commit_msg>Add a python script to pull relevant fields out of Shadertoy json.<commit_after># parse_shadertoy_json.py
from __future__ import print_function
import json
import os
from PIL import Image
jsonfile = "ldXXDj.txt"
#jsonfile = "4lX3RB.txt"
j = json.loads(open(jsonfile).read())
#print(json.dumps(j,indent=1))
info = j['Shader']['info']
print("Title: " + info['name'])
print("Author: " + info['username'])
print("Description: " + info['description'])
print("Tags: " + ', '.join(info['tags']))
renderpass = j['Shader']['renderpass']
for r in renderpass:
print(r['type'])
# Pull out textures
for t in r['inputs']:
texfile = os.path.basename(t['src'])
print(" tex" + str(t['channel']) + ": " + texfile, end='')
img = Image.open(texfile)
px = img.load()
print(" size: ", img.size, end='')
for i in range(5):
print(" " + str(px[i,0]), end='')
print("")
# TODO: Save to header
print(" code: " + str(len(r['code'])) + " bytes")
#print(r['code'])
# TODO: Save to header
|
|
51b38b4b8d56f5d8fca186646b30831c12e92c7a
|
lbuild/buildlog.py
|
lbuild/buildlog.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import logging
from .exception import BlobException
LOGGER = logging.getLogger('lbuild.buildlog')
class Operation:
def __init__(self, module, filename_in: str, filename_out: str):
self.module = module.fullname
self.modulepath = module.path
self.filename_in = filename_in
self.filename_out = filename_out
class BuildLog:
def __init__(self):
self.operations = {}
def log(self, module, filename_in: str, filename_out: str):
operation = Operation(module, filename_in, filename_out)
previous = self.operations.get(filename_out, None)
if previous is not None:
raise BlobException("Overwrite file '{}' with template '{}'. Previously "
"generated from template '{}'.".format(filename_out,
filename_in,
previous.filename_in))
self.operations[filename_out] = operation
|
Prepare class for collecting build operations.
|
Prepare class for collecting build operations.
|
Python
|
bsd-2-clause
|
dergraaf/library-builder,dergraaf/library-builder
|
Prepare class for collecting build operations.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import logging
from .exception import BlobException
LOGGER = logging.getLogger('lbuild.buildlog')
class Operation:
def __init__(self, module, filename_in: str, filename_out: str):
self.module = module.fullname
self.modulepath = module.path
self.filename_in = filename_in
self.filename_out = filename_out
class BuildLog:
def __init__(self):
self.operations = {}
def log(self, module, filename_in: str, filename_out: str):
operation = Operation(module, filename_in, filename_out)
previous = self.operations.get(filename_out, None)
if previous is not None:
raise BlobException("Overwrite file '{}' with template '{}'. Previously "
"generated from template '{}'.".format(filename_out,
filename_in,
previous.filename_in))
self.operations[filename_out] = operation
|
<commit_before><commit_msg>Prepare class for collecting build operations.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import logging
from .exception import BlobException
LOGGER = logging.getLogger('lbuild.buildlog')
class Operation:
def __init__(self, module, filename_in: str, filename_out: str):
self.module = module.fullname
self.modulepath = module.path
self.filename_in = filename_in
self.filename_out = filename_out
class BuildLog:
def __init__(self):
self.operations = {}
def log(self, module, filename_in: str, filename_out: str):
operation = Operation(module, filename_in, filename_out)
previous = self.operations.get(filename_out, None)
if previous is not None:
raise BlobException("Overwrite file '{}' with template '{}'. Previously "
"generated from template '{}'.".format(filename_out,
filename_in,
previous.filename_in))
self.operations[filename_out] = operation
|
Prepare class for collecting build operations.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import logging
from .exception import BlobException
LOGGER = logging.getLogger('lbuild.buildlog')
class Operation:
def __init__(self, module, filename_in: str, filename_out: str):
self.module = module.fullname
self.modulepath = module.path
self.filename_in = filename_in
self.filename_out = filename_out
class BuildLog:
def __init__(self):
self.operations = {}
def log(self, module, filename_in: str, filename_out: str):
operation = Operation(module, filename_in, filename_out)
previous = self.operations.get(filename_out, None)
if previous is not None:
raise BlobException("Overwrite file '{}' with template '{}'. Previously "
"generated from template '{}'.".format(filename_out,
filename_in,
previous.filename_in))
self.operations[filename_out] = operation
|
<commit_before><commit_msg>Prepare class for collecting build operations.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import logging
from .exception import BlobException
LOGGER = logging.getLogger('lbuild.buildlog')
class Operation:
def __init__(self, module, filename_in: str, filename_out: str):
self.module = module.fullname
self.modulepath = module.path
self.filename_in = filename_in
self.filename_out = filename_out
class BuildLog:
def __init__(self):
self.operations = {}
def log(self, module, filename_in: str, filename_out: str):
operation = Operation(module, filename_in, filename_out)
previous = self.operations.get(filename_out, None)
if previous is not None:
raise BlobException("Overwrite file '{}' with template '{}'. Previously "
"generated from template '{}'.".format(filename_out,
filename_in,
previous.filename_in))
self.operations[filename_out] = operation
|
|
fd060b61a61b0918ff2e7ebb978f2210f9e0678b
|
ctypeslib/test/test_toolchain.py
|
ctypeslib/test/test_toolchain.py
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
Test the complete h2xml and xml2py toolchain on Windows by running it over 'windows.h'.
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@60459 6015fed2-1504-0410-9fe1-9d1591cc4771
|
Python
|
mit
|
luzfcb/ctypeslib,trolldbois/ctypeslib,trolldbois/ctypeslib,luzfcb/ctypeslib,luzfcb/ctypeslib,trolldbois/ctypeslib
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@60459 6015fed2-1504-0410-9fe1-9d1591cc4771
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@60459 6015fed2-1504-0410-9fe1-9d1591cc4771<commit_after>
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@60459 6015fed2-1504-0410-9fe1-9d1591cc4771import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
git-svn-id: ac2c3632cb6543e7ab5fafd132c7fe15057a1882@60459 6015fed2-1504-0410-9fe1-9d1591cc4771<commit_after>import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
|
04211395630cd42c0f9033b162791515bdf918dd
|
karspexet/venue/management/commands/build_seats.py
|
karspexet/venue/management/commands/build_seats.py
|
import csv
from django.core.management.base import BaseCommand
from karspexet.venue.models import Seat, SeatingGroup, Venue
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("venue-id", type=int)
parser.add_argument("file")
def handle(self, *args, **options):
venue = Venue.objects.get(pk=options["venue-id"])
with open(options["file"]) as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
groupings = {}
for group_name, seat_name, x_pos, y_pos in reader:
group = groupings.get(group_name, None)
if group is not None:
group = SeatingGroup.objects.create(
name=group_name,
venue=venue
)
groupings[group_name] = group
Seat.objects.create(
group=group,
name=seat_name,
x_pos=x_pos,
y_pos=y_pos,
)
|
Add a management command for adding seats based on a seatmap file
|
Add a management command for adding seats based on a seatmap file
This can be used to import seats and pricing groups from text-files
|
Python
|
mit
|
Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet,Karspexet/Karspexet
|
Add a management command for adding seats based on a seatmap file
This can be used to import seats and pricing groups from text-files
|
import csv
from django.core.management.base import BaseCommand
from karspexet.venue.models import Seat, SeatingGroup, Venue
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("venue-id", type=int)
parser.add_argument("file")
def handle(self, *args, **options):
venue = Venue.objects.get(pk=options["venue-id"])
with open(options["file"]) as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
groupings = {}
for group_name, seat_name, x_pos, y_pos in reader:
group = groupings.get(group_name, None)
if group is not None:
group = SeatingGroup.objects.create(
name=group_name,
venue=venue
)
groupings[group_name] = group
Seat.objects.create(
group=group,
name=seat_name,
x_pos=x_pos,
y_pos=y_pos,
)
|
<commit_before><commit_msg>Add a management command for adding seats based on a seatmap file
This can be used to import seats and pricing groups from text-files<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from karspexet.venue.models import Seat, SeatingGroup, Venue
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("venue-id", type=int)
parser.add_argument("file")
def handle(self, *args, **options):
venue = Venue.objects.get(pk=options["venue-id"])
with open(options["file"]) as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
groupings = {}
for group_name, seat_name, x_pos, y_pos in reader:
group = groupings.get(group_name, None)
if group is not None:
group = SeatingGroup.objects.create(
name=group_name,
venue=venue
)
groupings[group_name] = group
Seat.objects.create(
group=group,
name=seat_name,
x_pos=x_pos,
y_pos=y_pos,
)
|
Add a management command for adding seats based on a seatmap file
This can be used to import seats and pricing groups from text-filesimport csv
from django.core.management.base import BaseCommand
from karspexet.venue.models import Seat, SeatingGroup, Venue
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("venue-id", type=int)
parser.add_argument("file")
def handle(self, *args, **options):
venue = Venue.objects.get(pk=options["venue-id"])
with open(options["file"]) as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
groupings = {}
for group_name, seat_name, x_pos, y_pos in reader:
group = groupings.get(group_name, None)
if group is not None:
group = SeatingGroup.objects.create(
name=group_name,
venue=venue
)
groupings[group_name] = group
Seat.objects.create(
group=group,
name=seat_name,
x_pos=x_pos,
y_pos=y_pos,
)
|
<commit_before><commit_msg>Add a management command for adding seats based on a seatmap file
This can be used to import seats and pricing groups from text-files<commit_after>import csv
from django.core.management.base import BaseCommand
from karspexet.venue.models import Seat, SeatingGroup, Venue
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("venue-id", type=int)
parser.add_argument("file")
def handle(self, *args, **options):
venue = Venue.objects.get(pk=options["venue-id"])
with open(options["file"]) as csv_file:
reader = csv.reader(csv_file, delimiter="\t")
groupings = {}
for group_name, seat_name, x_pos, y_pos in reader:
group = groupings.get(group_name, None)
if group is not None:
group = SeatingGroup.objects.create(
name=group_name,
venue=venue
)
groupings[group_name] = group
Seat.objects.create(
group=group,
name=seat_name,
x_pos=x_pos,
y_pos=y_pos,
)
|
|
ca427f926e7298442aa9a1481d59aa003cd7f0bb
|
indra/tests/test_reading_files.py
|
indra/tests/test_reading_files.py
|
from os import path
from indra.tools.reading.read_files import read_files, get_readers
from nose.plugins.attrib import attr
@attr('slow', 'nonpublic')
def test_read_files():
"Test that the system can read files."
# Create the test files.
example_files = []
# Get txt content
abstract_txt = ("This is a paper that contains the phrase: MEK "
"phosphorylates ERK.")
with open('test_abstract.txt', 'w') as f:
f.write(abstract_txt)
example_files.append('test_abstract.txt')
# Get nxml content
pmc_test_fpath = path.join(path.dirname(path.abspath(__file__)),
'pmc_cont_example.nxml')
if path.exists(pmc_test_fpath):
example_files.append(pmc_test_fpath)
assert len(example_files), "No content available to test."
# Now read them.
readers = get_readers()
outputs = read_files(example_files, readers)
N_out = len(outputs)
N_exp = 2*len(example_files)
assert N_out == N_exp, "Expected %d outputs, got %d." % (N_exp, N_out)
|
Add test for read_files, removed from indra_db.
|
Add test for read_files, removed from indra_db.
|
Python
|
bsd-2-clause
|
johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,pvtodorov/indra,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra
|
Add test for read_files, removed from indra_db.
|
from os import path
from indra.tools.reading.read_files import read_files, get_readers
from nose.plugins.attrib import attr
@attr('slow', 'nonpublic')
def test_read_files():
"Test that the system can read files."
# Create the test files.
example_files = []
# Get txt content
abstract_txt = ("This is a paper that contains the phrase: MEK "
"phosphorylates ERK.")
with open('test_abstract.txt', 'w') as f:
f.write(abstract_txt)
example_files.append('test_abstract.txt')
# Get nxml content
pmc_test_fpath = path.join(path.dirname(path.abspath(__file__)),
'pmc_cont_example.nxml')
if path.exists(pmc_test_fpath):
example_files.append(pmc_test_fpath)
assert len(example_files), "No content available to test."
# Now read them.
readers = get_readers()
outputs = read_files(example_files, readers)
N_out = len(outputs)
N_exp = 2*len(example_files)
assert N_out == N_exp, "Expected %d outputs, got %d." % (N_exp, N_out)
|
<commit_before><commit_msg>Add test for read_files, removed from indra_db.<commit_after>
|
from os import path
from indra.tools.reading.read_files import read_files, get_readers
from nose.plugins.attrib import attr
@attr('slow', 'nonpublic')
def test_read_files():
"Test that the system can read files."
# Create the test files.
example_files = []
# Get txt content
abstract_txt = ("This is a paper that contains the phrase: MEK "
"phosphorylates ERK.")
with open('test_abstract.txt', 'w') as f:
f.write(abstract_txt)
example_files.append('test_abstract.txt')
# Get nxml content
pmc_test_fpath = path.join(path.dirname(path.abspath(__file__)),
'pmc_cont_example.nxml')
if path.exists(pmc_test_fpath):
example_files.append(pmc_test_fpath)
assert len(example_files), "No content available to test."
# Now read them.
readers = get_readers()
outputs = read_files(example_files, readers)
N_out = len(outputs)
N_exp = 2*len(example_files)
assert N_out == N_exp, "Expected %d outputs, got %d." % (N_exp, N_out)
|
Add test for read_files, removed from indra_db.from os import path
from indra.tools.reading.read_files import read_files, get_readers
from nose.plugins.attrib import attr
@attr('slow', 'nonpublic')
def test_read_files():
"Test that the system can read files."
# Create the test files.
example_files = []
# Get txt content
abstract_txt = ("This is a paper that contains the phrase: MEK "
"phosphorylates ERK.")
with open('test_abstract.txt', 'w') as f:
f.write(abstract_txt)
example_files.append('test_abstract.txt')
# Get nxml content
pmc_test_fpath = path.join(path.dirname(path.abspath(__file__)),
'pmc_cont_example.nxml')
if path.exists(pmc_test_fpath):
example_files.append(pmc_test_fpath)
assert len(example_files), "No content available to test."
# Now read them.
readers = get_readers()
outputs = read_files(example_files, readers)
N_out = len(outputs)
N_exp = 2*len(example_files)
assert N_out == N_exp, "Expected %d outputs, got %d." % (N_exp, N_out)
|
<commit_before><commit_msg>Add test for read_files, removed from indra_db.<commit_after>from os import path
from indra.tools.reading.read_files import read_files, get_readers
from nose.plugins.attrib import attr
@attr('slow', 'nonpublic')
def test_read_files():
"Test that the system can read files."
# Create the test files.
example_files = []
# Get txt content
abstract_txt = ("This is a paper that contains the phrase: MEK "
"phosphorylates ERK.")
with open('test_abstract.txt', 'w') as f:
f.write(abstract_txt)
example_files.append('test_abstract.txt')
# Get nxml content
pmc_test_fpath = path.join(path.dirname(path.abspath(__file__)),
'pmc_cont_example.nxml')
if path.exists(pmc_test_fpath):
example_files.append(pmc_test_fpath)
assert len(example_files), "No content available to test."
# Now read them.
readers = get_readers()
outputs = read_files(example_files, readers)
N_out = len(outputs)
N_exp = 2*len(example_files)
assert N_out == N_exp, "Expected %d outputs, got %d." % (N_exp, N_out)
|
|
b6ac7ed0f8318cb708b3e49a7019891f702d7f4a
|
h2o-py/tests/testdir_algos/deepwater/pyunit_multiclass_deepwater.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_multiclass_deepwater.py
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
Add PyUnit for DeepWater cat/dog/mouse image classification.
|
Add PyUnit for DeepWater cat/dog/mouse image classification.
|
Python
|
apache-2.0
|
h2oai/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,spennihana/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,mathemage/h2o-3,spennihana/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-dev
|
Add PyUnit for DeepWater cat/dog/mouse image classification.
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
<commit_before><commit_msg>Add PyUnit for DeepWater cat/dog/mouse image classification.<commit_after>
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
Add PyUnit for DeepWater cat/dog/mouse image classification.from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
<commit_before><commit_msg>Add PyUnit for DeepWater cat/dog/mouse image classification.<commit_after>from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_multi():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=50, rate=1e-3)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_multi)
else:
deepwater_multi()
|
|
2f41b1b9441700eb68331927ded7ef6f25e192bb
|
test/test_orphaned.py
|
test/test_orphaned.py
|
#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestOrphaned(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
#Change ME :)
def test_orphaned(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assert_(host.state == 'UP')
#self.assert_(host.state_type == 'HARD')
svc.schedule()
print svc.actions
self.sched.get_new_actions()
for c in self.sched.checks.values():
print c
# simulate a orphaned situation
c.t_to_go = now - 301
c.status = 'inpoller'
self.sched.check_orphaned()
# Should be available to poller now :)
for c in self.sched.checks.values():
self.assert_(c.status == 'scheduled')
if __name__ == '__main__':
unittest.main()
|
Add : first orphaned test.
|
Add : first orphaned test.
|
Python
|
agpl-3.0
|
tal-nino/shinken,Aimage/shinken,staute/shinken_package,KerkhoffTechnologies/shinken,savoirfairelinux/shinken,tal-nino/shinken,lets-software/shinken,Simage/shinken,fpeyre/shinken,fpeyre/shinken,h4wkmoon/shinken,Aimage/shinken,rledisez/shinken,claneys/shinken,baloo/shinken,Alignak-monitoring/alignak,fpeyre/shinken,Simage/shinken,titilambert/alignak,savoirfairelinux/shinken,xorpaul/shinken,Simage/shinken,claneys/shinken,Aimage/shinken,baloo/shinken,h4wkmoon/shinken,titilambert/alignak,savoirfairelinux/shinken,naparuba/shinken,baloo/shinken,Alignak-monitoring/alignak,dfranco/shinken,naparuba/shinken,geektophe/shinken,gst/alignak,gst/alignak,xorpaul/shinken,rednach/krill,claneys/shinken,mohierf/shinken,staute/shinken_package,naparuba/shinken,h4wkmoon/shinken,mohierf/shinken,kaji-project/shinken,dfranco/shinken,ddurieux/alignak,staute/shinken_package,Aimage/shinken,staute/shinken_deb,staute/shinken_deb,peeyush-tm/shinken,rledisez/shinken,claneys/shinken,h4wkmoon/shinken,fpeyre/shinken,lets-software/shinken,xorpaul/shinken,xorpaul/shinken,rednach/krill,ddurieux/alignak,Simage/shinken,staute/shinken_deb,mohierf/shinken,geektophe/shinken,peeyush-tm/shinken,peeyush-tm/shinken,dfranco/shinken,mohierf/shinken,tal-nino/shinken,Simage/shinken,staute/shinken_package,rledisez/shinken,lets-software/shinken,lets-software/shinken,rledisez/shinken,geektophe/shinken,savoirfairelinux/shinken,Aimage/shinken,titilambert/alignak,baloo/shinken,KerkhoffTechnologies/shinken,ddurieux/alignak,kaji-project/shinken,geektophe/shinken,staute/shinken_deb,naparuba/shinken,staute/shinken_package,mohierf/shinken,h4wkmoon/shinken,kaji-project/shinken,rednach/krill,rednach/krill,naparuba/shinken,Aimage/shinken,kaji-project/shinken,titilambert/alignak,rledisez/shinken,h4wkmoon/shinken,xorpaul/shinken,savoirfairelinux/shinken,rednach/krill,staute/shinken_deb,claneys/shinken,tal-nino/shinken,kaji-project/shinken,tal-nino/shinken,ddurieux/alignak,geektophe/shinken,peeyush-tm/shinken,KerkhoffTechnologies/shinken,dfranco/shinken,dfranco/shinken,gst/alignak,peeyush-tm/shinken,lets-software/shinken,kaji-project/shinken,fpeyre/shinken,staute/shinken_deb,fpeyre/shinken,Simage/shinken,KerkhoffTechnologies/shinken,KerkhoffTechnologies/shinken,rednach/krill,h4wkmoon/shinken,geektophe/shinken,gst/alignak,KerkhoffTechnologies/shinken,claneys/shinken,peeyush-tm/shinken,dfranco/shinken,xorpaul/shinken,naparuba/shinken,ddurieux/alignak,savoirfairelinux/shinken,baloo/shinken,staute/shinken_package,kaji-project/shinken,mohierf/shinken,xorpaul/shinken,lets-software/shinken,tal-nino/shinken,baloo/shinken,rledisez/shinken,ddurieux/alignak,h4wkmoon/shinken,xorpaul/shinken
|
Add : first orphaned test.
|
#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestOrphaned(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
#Change ME :)
def test_orphaned(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assert_(host.state == 'UP')
#self.assert_(host.state_type == 'HARD')
svc.schedule()
print svc.actions
self.sched.get_new_actions()
for c in self.sched.checks.values():
print c
# simulate a orphaned situation
c.t_to_go = now - 301
c.status = 'inpoller'
self.sched.check_orphaned()
# Should be available to poller now :)
for c in self.sched.checks.values():
self.assert_(c.status == 'scheduled')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add : first orphaned test.<commit_after>
|
#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestOrphaned(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
#Change ME :)
def test_orphaned(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assert_(host.state == 'UP')
#self.assert_(host.state_type == 'HARD')
svc.schedule()
print svc.actions
self.sched.get_new_actions()
for c in self.sched.checks.values():
print c
# simulate a orphaned situation
c.t_to_go = now - 301
c.status = 'inpoller'
self.sched.check_orphaned()
# Should be available to poller now :)
for c in self.sched.checks.values():
self.assert_(c.status == 'scheduled')
if __name__ == '__main__':
unittest.main()
|
Add : first orphaned test.#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestOrphaned(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
#Change ME :)
def test_orphaned(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assert_(host.state == 'UP')
#self.assert_(host.state_type == 'HARD')
svc.schedule()
print svc.actions
self.sched.get_new_actions()
for c in self.sched.checks.values():
print c
# simulate a orphaned situation
c.t_to_go = now - 301
c.status = 'inpoller'
self.sched.check_orphaned()
# Should be available to poller now :)
for c in self.sched.checks.values():
self.assert_(c.status == 'scheduled')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add : first orphaned test.<commit_after>#!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
from shinken_test import *
class TestOrphaned(ShinkenTest):
#Uncomment this is you want to use a specific configuration
#for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_1r_1h_1s.cfg')
#Change ME :)
def test_orphaned(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
#self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
#self.assert_(host.state == 'UP')
#self.assert_(host.state_type == 'HARD')
svc.schedule()
print svc.actions
self.sched.get_new_actions()
for c in self.sched.checks.values():
print c
# simulate a orphaned situation
c.t_to_go = now - 301
c.status = 'inpoller'
self.sched.check_orphaned()
# Should be available to poller now :)
for c in self.sched.checks.values():
self.assert_(c.status == 'scheduled')
if __name__ == '__main__':
unittest.main()
|
|
812402088f8df8b3ba20fb8ee041b6779f2b5171
|
Orange/tests/test_widgets_outputs.py
|
Orange/tests/test_widgets_outputs.py
|
import re
import unittest
import importlib.util
from Orange.canvas.registry import global_registry
class TestWidgetOutputs(unittest.TestCase):
def test_outputs(self):
re_send = re.compile('\\n\s+self.send\("([^"]*)"')
registry = global_registry()
errors = []
for desc in registry.widgets():
signal_names = {output.name for output in desc.outputs}
module_name, class_name = desc.qualified_name.rsplit(".", 1)
fname = importlib.util.find_spec(module_name).origin
widget_code = open(fname).read()
used = set(re_send.findall(widget_code))
undeclared = used - signal_names
if undeclared:
errors.append("- {} ({})".
format(desc.name, ", ".join(undeclared)))
if errors:
self.fail("Some widgets send to undeclared outputs:\n"+"\n".
join(errors))
|
Add tests for declarations of widget outputs
|
Add tests for declarations of widget outputs
|
Python
|
bsd-2-clause
|
qPCR4vir/orange3,cheral/orange3,qPCR4vir/orange3,qPCR4vir/orange3,kwikadi/orange3,marinkaz/orange3,kwikadi/orange3,kwikadi/orange3,cheral/orange3,cheral/orange3,cheral/orange3,kwikadi/orange3,marinkaz/orange3,qPCR4vir/orange3,kwikadi/orange3,cheral/orange3,marinkaz/orange3,marinkaz/orange3,qPCR4vir/orange3,marinkaz/orange3,kwikadi/orange3,qPCR4vir/orange3,cheral/orange3,marinkaz/orange3
|
Add tests for declarations of widget outputs
|
import re
import unittest
import importlib.util
from Orange.canvas.registry import global_registry
class TestWidgetOutputs(unittest.TestCase):
def test_outputs(self):
re_send = re.compile('\\n\s+self.send\("([^"]*)"')
registry = global_registry()
errors = []
for desc in registry.widgets():
signal_names = {output.name for output in desc.outputs}
module_name, class_name = desc.qualified_name.rsplit(".", 1)
fname = importlib.util.find_spec(module_name).origin
widget_code = open(fname).read()
used = set(re_send.findall(widget_code))
undeclared = used - signal_names
if undeclared:
errors.append("- {} ({})".
format(desc.name, ", ".join(undeclared)))
if errors:
self.fail("Some widgets send to undeclared outputs:\n"+"\n".
join(errors))
|
<commit_before><commit_msg>Add tests for declarations of widget outputs<commit_after>
|
import re
import unittest
import importlib.util
from Orange.canvas.registry import global_registry
class TestWidgetOutputs(unittest.TestCase):
def test_outputs(self):
re_send = re.compile('\\n\s+self.send\("([^"]*)"')
registry = global_registry()
errors = []
for desc in registry.widgets():
signal_names = {output.name for output in desc.outputs}
module_name, class_name = desc.qualified_name.rsplit(".", 1)
fname = importlib.util.find_spec(module_name).origin
widget_code = open(fname).read()
used = set(re_send.findall(widget_code))
undeclared = used - signal_names
if undeclared:
errors.append("- {} ({})".
format(desc.name, ", ".join(undeclared)))
if errors:
self.fail("Some widgets send to undeclared outputs:\n"+"\n".
join(errors))
|
Add tests for declarations of widget outputsimport re
import unittest
import importlib.util
from Orange.canvas.registry import global_registry
class TestWidgetOutputs(unittest.TestCase):
def test_outputs(self):
re_send = re.compile('\\n\s+self.send\("([^"]*)"')
registry = global_registry()
errors = []
for desc in registry.widgets():
signal_names = {output.name for output in desc.outputs}
module_name, class_name = desc.qualified_name.rsplit(".", 1)
fname = importlib.util.find_spec(module_name).origin
widget_code = open(fname).read()
used = set(re_send.findall(widget_code))
undeclared = used - signal_names
if undeclared:
errors.append("- {} ({})".
format(desc.name, ", ".join(undeclared)))
if errors:
self.fail("Some widgets send to undeclared outputs:\n"+"\n".
join(errors))
|
<commit_before><commit_msg>Add tests for declarations of widget outputs<commit_after>import re
import unittest
import importlib.util
from Orange.canvas.registry import global_registry
class TestWidgetOutputs(unittest.TestCase):
def test_outputs(self):
re_send = re.compile('\\n\s+self.send\("([^"]*)"')
registry = global_registry()
errors = []
for desc in registry.widgets():
signal_names = {output.name for output in desc.outputs}
module_name, class_name = desc.qualified_name.rsplit(".", 1)
fname = importlib.util.find_spec(module_name).origin
widget_code = open(fname).read()
used = set(re_send.findall(widget_code))
undeclared = used - signal_names
if undeclared:
errors.append("- {} ({})".
format(desc.name, ", ".join(undeclared)))
if errors:
self.fail("Some widgets send to undeclared outputs:\n"+"\n".
join(errors))
|
|
337aacb65af7db9fe5f80ac0058560d465fbe103
|
planetstack/model_policies/model_policy_Network.py
|
planetstack/model_policies/model_policy_Network.py
|
from core.models import *
def handle(network):
# network deployments are not visible to users. We must ensure
# networks are deployed at all deploymets available to their slices.
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
network_deployments = NetworkDeployments.objects.all()
network_deploy_lookup = defaultdict(list)
for network_deployment in network_deployments:
network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
expected_deployments = slice_deploy_lookup[network.owner]
for expected_deployment in expected_deployments:
if network not in network_deploy_lookup or \
expected_deployment not in network_deploy_lookup[network]:
nd = NetworkDeployments(network=network, deployment=expected_deployment)
nd.save()
|
Add new network objects to all deployments
|
Policy: Add new network objects to all deployments
|
Python
|
apache-2.0
|
opencord/xos,cboling/xos,zdw/xos,xmaruto/mcord,xmaruto/mcord,cboling/xos,jermowery/xos,open-cloud/xos,zdw/xos,wathsalav/xos,xmaruto/mcord,zdw/xos,jermowery/xos,open-cloud/xos,cboling/xos,jermowery/xos,wathsalav/xos,jermowery/xos,opencord/xos,cboling/xos,xmaruto/mcord,wathsalav/xos,open-cloud/xos,wathsalav/xos,cboling/xos,opencord/xos,zdw/xos
|
Policy: Add new network objects to all deployments
|
from core.models import *
def handle(network):
# network deployments are not visible to users. We must ensure
# networks are deployed at all deploymets available to their slices.
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
network_deployments = NetworkDeployments.objects.all()
network_deploy_lookup = defaultdict(list)
for network_deployment in network_deployments:
network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
expected_deployments = slice_deploy_lookup[network.owner]
for expected_deployment in expected_deployments:
if network not in network_deploy_lookup or \
expected_deployment not in network_deploy_lookup[network]:
nd = NetworkDeployments(network=network, deployment=expected_deployment)
nd.save()
|
<commit_before><commit_msg>Policy: Add new network objects to all deployments<commit_after>
|
from core.models import *
def handle(network):
# network deployments are not visible to users. We must ensure
# networks are deployed at all deploymets available to their slices.
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
network_deployments = NetworkDeployments.objects.all()
network_deploy_lookup = defaultdict(list)
for network_deployment in network_deployments:
network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
expected_deployments = slice_deploy_lookup[network.owner]
for expected_deployment in expected_deployments:
if network not in network_deploy_lookup or \
expected_deployment not in network_deploy_lookup[network]:
nd = NetworkDeployments(network=network, deployment=expected_deployment)
nd.save()
|
Policy: Add new network objects to all deploymentsfrom core.models import *
def handle(network):
# network deployments are not visible to users. We must ensure
# networks are deployed at all deploymets available to their slices.
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
network_deployments = NetworkDeployments.objects.all()
network_deploy_lookup = defaultdict(list)
for network_deployment in network_deployments:
network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
expected_deployments = slice_deploy_lookup[network.owner]
for expected_deployment in expected_deployments:
if network not in network_deploy_lookup or \
expected_deployment not in network_deploy_lookup[network]:
nd = NetworkDeployments(network=network, deployment=expected_deployment)
nd.save()
|
<commit_before><commit_msg>Policy: Add new network objects to all deployments<commit_after>from core.models import *
def handle(network):
# network deployments are not visible to users. We must ensure
# networks are deployed at all deploymets available to their slices.
slice_deployments = SliceDeployments.objects.all()
slice_deploy_lookup = defaultdict(list)
for slice_deployment in slice_deployments:
slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
network_deployments = NetworkDeployments.objects.all()
network_deploy_lookup = defaultdict(list)
for network_deployment in network_deployments:
network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
expected_deployments = slice_deploy_lookup[network.owner]
for expected_deployment in expected_deployments:
if network not in network_deploy_lookup or \
expected_deployment not in network_deploy_lookup[network]:
nd = NetworkDeployments(network=network, deployment=expected_deployment)
nd.save()
|
|
799ee0bcc52a345116375650da86e714dea612cf
|
bin/MongoInsert.py
|
bin/MongoInsert.py
|
from py2neo import Graph
import os.path
from flask import Flask
app = Flask(__name__)
from pymongo import MongoClient
#TODO: delete document before entry
graph = Graph(os.environ['neoURL'])
MONGO_URL = os.environ['connectURL']
connection = MongoClient(MONGO_URL)
db = connection.githublive.pusheventCapped
def SimilarRepositories(Inputrepo):
output = []
outputString =""
mongo_record_insert = {}
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\">"
path3 = "</a>"
query1= """MATCH (me)-[r1:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]->(stuff)<-[r2:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]-(repo) """
query2="WHERE me.url = " + "\"" + Inputrepo + "\""
query3=""" AND type (r1) = type (r2)
RETURN repo.name as reponame, repo.url as url, count(stuff) as count
ORDER BY count(stuff) DESC LIMIT 5"""
query = query1 + query2 + query3
result = graph.cypher.execute(query)
for entries in result:
output.append(path1 + entries.url.encode('utf-8').strip() + path2 + entries.reponame.encode('utf-8').strip() + path3)
if len(output) !=0 :
outputString += ", ".join(output)
#Delete old entry
#db.remove({'type':'similarrepositories','url': Inputrepo})
#Insert to MongoDB
mongo_record_insert = {'type': 'similarrepositories', 'url': Inputrepo, 'similar': outputString}
db.insert(mongo_record_insert)
def FindAllRepositories():
query = "MATCH (n:`Repository`) RETURN n.url as url"
result = graph.cypher.execute(query)
for entries in result:
print "Processing ..... ", entries.url
SimilarRepositories(entries.url)
FindAllRepositories()
|
Insert similar repositories in Mongo
|
Insert similar repositories in Mongo
|
Python
|
mit
|
harishvc/githubanalytics,harishvc/githubanalytics,harishvc/githubanalytics
|
Insert similar repositories in Mongo
|
from py2neo import Graph
import os.path
from flask import Flask
app = Flask(__name__)
from pymongo import MongoClient
#TODO: delete document before entry
graph = Graph(os.environ['neoURL'])
MONGO_URL = os.environ['connectURL']
connection = MongoClient(MONGO_URL)
db = connection.githublive.pusheventCapped
def SimilarRepositories(Inputrepo):
output = []
outputString =""
mongo_record_insert = {}
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\">"
path3 = "</a>"
query1= """MATCH (me)-[r1:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]->(stuff)<-[r2:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]-(repo) """
query2="WHERE me.url = " + "\"" + Inputrepo + "\""
query3=""" AND type (r1) = type (r2)
RETURN repo.name as reponame, repo.url as url, count(stuff) as count
ORDER BY count(stuff) DESC LIMIT 5"""
query = query1 + query2 + query3
result = graph.cypher.execute(query)
for entries in result:
output.append(path1 + entries.url.encode('utf-8').strip() + path2 + entries.reponame.encode('utf-8').strip() + path3)
if len(output) !=0 :
outputString += ", ".join(output)
#Delete old entry
#db.remove({'type':'similarrepositories','url': Inputrepo})
#Insert to MongoDB
mongo_record_insert = {'type': 'similarrepositories', 'url': Inputrepo, 'similar': outputString}
db.insert(mongo_record_insert)
def FindAllRepositories():
query = "MATCH (n:`Repository`) RETURN n.url as url"
result = graph.cypher.execute(query)
for entries in result:
print "Processing ..... ", entries.url
SimilarRepositories(entries.url)
FindAllRepositories()
|
<commit_before><commit_msg>Insert similar repositories in Mongo<commit_after>
|
from py2neo import Graph
import os.path
from flask import Flask
app = Flask(__name__)
from pymongo import MongoClient
#TODO: delete document before entry
graph = Graph(os.environ['neoURL'])
MONGO_URL = os.environ['connectURL']
connection = MongoClient(MONGO_URL)
db = connection.githublive.pusheventCapped
def SimilarRepositories(Inputrepo):
output = []
outputString =""
mongo_record_insert = {}
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\">"
path3 = "</a>"
query1= """MATCH (me)-[r1:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]->(stuff)<-[r2:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]-(repo) """
query2="WHERE me.url = " + "\"" + Inputrepo + "\""
query3=""" AND type (r1) = type (r2)
RETURN repo.name as reponame, repo.url as url, count(stuff) as count
ORDER BY count(stuff) DESC LIMIT 5"""
query = query1 + query2 + query3
result = graph.cypher.execute(query)
for entries in result:
output.append(path1 + entries.url.encode('utf-8').strip() + path2 + entries.reponame.encode('utf-8').strip() + path3)
if len(output) !=0 :
outputString += ", ".join(output)
#Delete old entry
#db.remove({'type':'similarrepositories','url': Inputrepo})
#Insert to MongoDB
mongo_record_insert = {'type': 'similarrepositories', 'url': Inputrepo, 'similar': outputString}
db.insert(mongo_record_insert)
def FindAllRepositories():
query = "MATCH (n:`Repository`) RETURN n.url as url"
result = graph.cypher.execute(query)
for entries in result:
print "Processing ..... ", entries.url
SimilarRepositories(entries.url)
FindAllRepositories()
|
Insert similar repositories in Mongofrom py2neo import Graph
import os.path
from flask import Flask
app = Flask(__name__)
from pymongo import MongoClient
#TODO: delete document before entry
graph = Graph(os.environ['neoURL'])
MONGO_URL = os.environ['connectURL']
connection = MongoClient(MONGO_URL)
db = connection.githublive.pusheventCapped
def SimilarRepositories(Inputrepo):
output = []
outputString =""
mongo_record_insert = {}
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\">"
path3 = "</a>"
query1= """MATCH (me)-[r1:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]->(stuff)<-[r2:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]-(repo) """
query2="WHERE me.url = " + "\"" + Inputrepo + "\""
query3=""" AND type (r1) = type (r2)
RETURN repo.name as reponame, repo.url as url, count(stuff) as count
ORDER BY count(stuff) DESC LIMIT 5"""
query = query1 + query2 + query3
result = graph.cypher.execute(query)
for entries in result:
output.append(path1 + entries.url.encode('utf-8').strip() + path2 + entries.reponame.encode('utf-8').strip() + path3)
if len(output) !=0 :
outputString += ", ".join(output)
#Delete old entry
#db.remove({'type':'similarrepositories','url': Inputrepo})
#Insert to MongoDB
mongo_record_insert = {'type': 'similarrepositories', 'url': Inputrepo, 'similar': outputString}
db.insert(mongo_record_insert)
def FindAllRepositories():
query = "MATCH (n:`Repository`) RETURN n.url as url"
result = graph.cypher.execute(query)
for entries in result:
print "Processing ..... ", entries.url
SimilarRepositories(entries.url)
FindAllRepositories()
|
<commit_before><commit_msg>Insert similar repositories in Mongo<commit_after>from py2neo import Graph
import os.path
from flask import Flask
app = Flask(__name__)
from pymongo import MongoClient
#TODO: delete document before entry
graph = Graph(os.environ['neoURL'])
MONGO_URL = os.environ['connectURL']
connection = MongoClient(MONGO_URL)
db = connection.githublive.pusheventCapped
def SimilarRepositories(Inputrepo):
output = []
outputString =""
mongo_record_insert = {}
path1 = "<a href=\"/?q=repository "
path2 = "&action=Search\">"
path3 = "</a>"
query1= """MATCH (me)-[r1:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]->(stuff)<-[r2:IS_LANGUAGE|IS_OWNER|IN_ORGANIZATION|IS_ACTOR]-(repo) """
query2="WHERE me.url = " + "\"" + Inputrepo + "\""
query3=""" AND type (r1) = type (r2)
RETURN repo.name as reponame, repo.url as url, count(stuff) as count
ORDER BY count(stuff) DESC LIMIT 5"""
query = query1 + query2 + query3
result = graph.cypher.execute(query)
for entries in result:
output.append(path1 + entries.url.encode('utf-8').strip() + path2 + entries.reponame.encode('utf-8').strip() + path3)
if len(output) !=0 :
outputString += ", ".join(output)
#Delete old entry
#db.remove({'type':'similarrepositories','url': Inputrepo})
#Insert to MongoDB
mongo_record_insert = {'type': 'similarrepositories', 'url': Inputrepo, 'similar': outputString}
db.insert(mongo_record_insert)
def FindAllRepositories():
query = "MATCH (n:`Repository`) RETURN n.url as url"
result = graph.cypher.execute(query)
for entries in result:
print "Processing ..... ", entries.url
SimilarRepositories(entries.url)
FindAllRepositories()
|
|
07b6af42f90958e95b570a31d5e9cd3ecc8e2901
|
examples/widgets/errors_table.py
|
examples/widgets/errors_table.py
|
"""
Show the use of the ErrorsTable widget.
"""
import sys
from pyqode.qt import QtWidgets
from pyqode.core.modes import CheckerMessage, CheckerMessages
from pyqode.core.widgets import ErrorsTable
app = QtWidgets.QApplication(sys.argv)
table = ErrorsTable()
table.add_message(CheckerMessage(
'A fake error message', CheckerMessages.ERROR, 10, path=__file__))
table.add_message(CheckerMessage(
'A fake warning message', CheckerMessages.WARNING, 5, path=__file__))
table.show()
app.exec_()
|
Add an example for ErrorsTable
|
Add an example for ErrorsTable
|
Python
|
mit
|
pyQode/pyqode.core,pyQode/pyqode.core,zwadar/pyqode.core
|
Add an example for ErrorsTable
|
"""
Show the use of the ErrorsTable widget.
"""
import sys
from pyqode.qt import QtWidgets
from pyqode.core.modes import CheckerMessage, CheckerMessages
from pyqode.core.widgets import ErrorsTable
app = QtWidgets.QApplication(sys.argv)
table = ErrorsTable()
table.add_message(CheckerMessage(
'A fake error message', CheckerMessages.ERROR, 10, path=__file__))
table.add_message(CheckerMessage(
'A fake warning message', CheckerMessages.WARNING, 5, path=__file__))
table.show()
app.exec_()
|
<commit_before><commit_msg>Add an example for ErrorsTable<commit_after>
|
"""
Show the use of the ErrorsTable widget.
"""
import sys
from pyqode.qt import QtWidgets
from pyqode.core.modes import CheckerMessage, CheckerMessages
from pyqode.core.widgets import ErrorsTable
app = QtWidgets.QApplication(sys.argv)
table = ErrorsTable()
table.add_message(CheckerMessage(
'A fake error message', CheckerMessages.ERROR, 10, path=__file__))
table.add_message(CheckerMessage(
'A fake warning message', CheckerMessages.WARNING, 5, path=__file__))
table.show()
app.exec_()
|
Add an example for ErrorsTable"""
Show the use of the ErrorsTable widget.
"""
import sys
from pyqode.qt import QtWidgets
from pyqode.core.modes import CheckerMessage, CheckerMessages
from pyqode.core.widgets import ErrorsTable
app = QtWidgets.QApplication(sys.argv)
table = ErrorsTable()
table.add_message(CheckerMessage(
'A fake error message', CheckerMessages.ERROR, 10, path=__file__))
table.add_message(CheckerMessage(
'A fake warning message', CheckerMessages.WARNING, 5, path=__file__))
table.show()
app.exec_()
|
<commit_before><commit_msg>Add an example for ErrorsTable<commit_after>"""
Show the use of the ErrorsTable widget.
"""
import sys
from pyqode.qt import QtWidgets
from pyqode.core.modes import CheckerMessage, CheckerMessages
from pyqode.core.widgets import ErrorsTable
app = QtWidgets.QApplication(sys.argv)
table = ErrorsTable()
table.add_message(CheckerMessage(
'A fake error message', CheckerMessages.ERROR, 10, path=__file__))
table.add_message(CheckerMessage(
'A fake warning message', CheckerMessages.WARNING, 5, path=__file__))
table.show()
app.exec_()
|
|
ec0105955da05dce211be720b1d68479b5b7ed30
|
config_files.py
|
config_files.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import argparse
import glob
import os
def rewrite(fobj):
text = json.load(fobj)
clean = json.dumps(text, indent=4, sort_keys=True, separators=(',', ': ')) + '\n'
fobj.seek(0)
fobj.write(clean)
fobj.truncate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test files")
parser.add_argument('-c', '--config', help='Path to the home directory.')
args = parser.parse_args()
config_file_path = args.config
for fn in glob.glob(os.path.join(config_file_path, 'intelmq/etc/*.conf')):
with open(fn, 'r+') as f:
rewrite(f)
with open(os.path.join(config_file_path, 'intelmq/bots/BOTS'), 'r+') as f:
rewrite(f)
|
Add script to rewrite config files so travis is happy.
|
Add script to rewrite config files so travis is happy.
|
Python
|
agpl-3.0
|
certtools/intelmq,aaronkaplan/intelmq,aaronkaplan/intelmq,certtools/intelmq,aaronkaplan/intelmq,certtools/intelmq
|
Add script to rewrite config files so travis is happy.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import argparse
import glob
import os
def rewrite(fobj):
text = json.load(fobj)
clean = json.dumps(text, indent=4, sort_keys=True, separators=(',', ': ')) + '\n'
fobj.seek(0)
fobj.write(clean)
fobj.truncate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test files")
parser.add_argument('-c', '--config', help='Path to the home directory.')
args = parser.parse_args()
config_file_path = args.config
for fn in glob.glob(os.path.join(config_file_path, 'intelmq/etc/*.conf')):
with open(fn, 'r+') as f:
rewrite(f)
with open(os.path.join(config_file_path, 'intelmq/bots/BOTS'), 'r+') as f:
rewrite(f)
|
<commit_before><commit_msg>Add script to rewrite config files so travis is happy.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import argparse
import glob
import os
def rewrite(fobj):
text = json.load(fobj)
clean = json.dumps(text, indent=4, sort_keys=True, separators=(',', ': ')) + '\n'
fobj.seek(0)
fobj.write(clean)
fobj.truncate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test files")
parser.add_argument('-c', '--config', help='Path to the home directory.')
args = parser.parse_args()
config_file_path = args.config
for fn in glob.glob(os.path.join(config_file_path, 'intelmq/etc/*.conf')):
with open(fn, 'r+') as f:
rewrite(f)
with open(os.path.join(config_file_path, 'intelmq/bots/BOTS'), 'r+') as f:
rewrite(f)
|
Add script to rewrite config files so travis is happy.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import argparse
import glob
import os
def rewrite(fobj):
text = json.load(fobj)
clean = json.dumps(text, indent=4, sort_keys=True, separators=(',', ': ')) + '\n'
fobj.seek(0)
fobj.write(clean)
fobj.truncate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test files")
parser.add_argument('-c', '--config', help='Path to the home directory.')
args = parser.parse_args()
config_file_path = args.config
for fn in glob.glob(os.path.join(config_file_path, 'intelmq/etc/*.conf')):
with open(fn, 'r+') as f:
rewrite(f)
with open(os.path.join(config_file_path, 'intelmq/bots/BOTS'), 'r+') as f:
rewrite(f)
|
<commit_before><commit_msg>Add script to rewrite config files so travis is happy.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import argparse
import glob
import os
def rewrite(fobj):
text = json.load(fobj)
clean = json.dumps(text, indent=4, sort_keys=True, separators=(',', ': ')) + '\n'
fobj.seek(0)
fobj.write(clean)
fobj.truncate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Test files")
parser.add_argument('-c', '--config', help='Path to the home directory.')
args = parser.parse_args()
config_file_path = args.config
for fn in glob.glob(os.path.join(config_file_path, 'intelmq/etc/*.conf')):
with open(fn, 'r+') as f:
rewrite(f)
with open(os.path.join(config_file_path, 'intelmq/bots/BOTS'), 'r+') as f:
rewrite(f)
|
|
1f0b6333d5b5f3c29e377904d4a2f2a30ed5a787
|
src/waldur_mastermind/marketplace_openstack/migrations/0012_drop_offering_components.py
|
src/waldur_mastermind/marketplace_openstack/migrations/0012_drop_offering_components.py
|
from django.db import migrations
TENANT_TYPE = 'Packages.Template'
STORAGE_MODE_FIXED = 'fixed'
def drop_offering_components(apps, schema_editor):
"""
Drop offering components for volume types if storage mode is fixed.
"""
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
OfferingComponent.objects.filter(
offering__type=TENANT_TYPE,
offering__plugin_options__storage_mode=STORAGE_MODE_FIXED,
type__startswith='gigabytes_',
).delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0050_offering_project'),
('marketplace_openstack', '0011_limit_components'),
]
operations = [migrations.RunPython(drop_offering_components)]
|
Drop offering components related to volume types if storage mode is fixed.
|
Drop offering components related to volume types if storage mode is fixed.
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind
|
Drop offering components related to volume types if storage mode is fixed.
|
from django.db import migrations
TENANT_TYPE = 'Packages.Template'
STORAGE_MODE_FIXED = 'fixed'
def drop_offering_components(apps, schema_editor):
"""
Drop offering components for volume types if storage mode is fixed.
"""
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
OfferingComponent.objects.filter(
offering__type=TENANT_TYPE,
offering__plugin_options__storage_mode=STORAGE_MODE_FIXED,
type__startswith='gigabytes_',
).delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0050_offering_project'),
('marketplace_openstack', '0011_limit_components'),
]
operations = [migrations.RunPython(drop_offering_components)]
|
<commit_before><commit_msg>Drop offering components related to volume types if storage mode is fixed.<commit_after>
|
from django.db import migrations
TENANT_TYPE = 'Packages.Template'
STORAGE_MODE_FIXED = 'fixed'
def drop_offering_components(apps, schema_editor):
"""
Drop offering components for volume types if storage mode is fixed.
"""
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
OfferingComponent.objects.filter(
offering__type=TENANT_TYPE,
offering__plugin_options__storage_mode=STORAGE_MODE_FIXED,
type__startswith='gigabytes_',
).delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0050_offering_project'),
('marketplace_openstack', '0011_limit_components'),
]
operations = [migrations.RunPython(drop_offering_components)]
|
Drop offering components related to volume types if storage mode is fixed.from django.db import migrations
TENANT_TYPE = 'Packages.Template'
STORAGE_MODE_FIXED = 'fixed'
def drop_offering_components(apps, schema_editor):
"""
Drop offering components for volume types if storage mode is fixed.
"""
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
OfferingComponent.objects.filter(
offering__type=TENANT_TYPE,
offering__plugin_options__storage_mode=STORAGE_MODE_FIXED,
type__startswith='gigabytes_',
).delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0050_offering_project'),
('marketplace_openstack', '0011_limit_components'),
]
operations = [migrations.RunPython(drop_offering_components)]
|
<commit_before><commit_msg>Drop offering components related to volume types if storage mode is fixed.<commit_after>from django.db import migrations
TENANT_TYPE = 'Packages.Template'
STORAGE_MODE_FIXED = 'fixed'
def drop_offering_components(apps, schema_editor):
"""
Drop offering components for volume types if storage mode is fixed.
"""
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
OfferingComponent.objects.filter(
offering__type=TENANT_TYPE,
offering__plugin_options__storage_mode=STORAGE_MODE_FIXED,
type__startswith='gigabytes_',
).delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0050_offering_project'),
('marketplace_openstack', '0011_limit_components'),
]
operations = [migrations.RunPython(drop_offering_components)]
|
|
2426ef33097a8d50148ba473bb131e98ee6879eb
|
migrations/versions/460_add_audit_events_for_g4_and_g5_frameworks.py
|
migrations/versions/460_add_audit_events_for_g4_and_g5_frameworks.py
|
"""Add additional indexes for audit_events
Revision ID: 460
Revises: 450
Create Date: 2016-01-13 16:45:18.621169
"""
revision = '460'
down_revision = '450'
from alembic import op
def upgrade():
# G-Cloud 4
op.execute("""
INSERT INTO audit_events
("type", "created_at", "user", "data", "object_type", "object_id", "acknowledged")
VALUES
('framework_update', '2015-01-31T14:00:00', 'migration', '{"update": {"status": "expired"}}', 'Framework', 2, FALSE)
""")
def downgrade():
op.execute("""
DELETE FROM audit_events WHERE
"type"='framework_update' AND "created_at"='2015-01-31T14:00:00' AND "object_type"='Framework' AND "object_id"=2
""")
|
Add migration for G4 expiry
|
Add migration for G4 expiry
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add migration for G4 expiry
|
"""Add additional indexes for audit_events
Revision ID: 460
Revises: 450
Create Date: 2016-01-13 16:45:18.621169
"""
revision = '460'
down_revision = '450'
from alembic import op
def upgrade():
# G-Cloud 4
op.execute("""
INSERT INTO audit_events
("type", "created_at", "user", "data", "object_type", "object_id", "acknowledged")
VALUES
('framework_update', '2015-01-31T14:00:00', 'migration', '{"update": {"status": "expired"}}', 'Framework', 2, FALSE)
""")
def downgrade():
op.execute("""
DELETE FROM audit_events WHERE
"type"='framework_update' AND "created_at"='2015-01-31T14:00:00' AND "object_type"='Framework' AND "object_id"=2
""")
|
<commit_before><commit_msg>Add migration for G4 expiry<commit_after>
|
"""Add additional indexes for audit_events
Revision ID: 460
Revises: 450
Create Date: 2016-01-13 16:45:18.621169
"""
revision = '460'
down_revision = '450'
from alembic import op
def upgrade():
# G-Cloud 4
op.execute("""
INSERT INTO audit_events
("type", "created_at", "user", "data", "object_type", "object_id", "acknowledged")
VALUES
('framework_update', '2015-01-31T14:00:00', 'migration', '{"update": {"status": "expired"}}', 'Framework', 2, FALSE)
""")
def downgrade():
op.execute("""
DELETE FROM audit_events WHERE
"type"='framework_update' AND "created_at"='2015-01-31T14:00:00' AND "object_type"='Framework' AND "object_id"=2
""")
|
Add migration for G4 expiry"""Add additional indexes for audit_events
Revision ID: 460
Revises: 450
Create Date: 2016-01-13 16:45:18.621169
"""
revision = '460'
down_revision = '450'
from alembic import op
def upgrade():
# G-Cloud 4
op.execute("""
INSERT INTO audit_events
("type", "created_at", "user", "data", "object_type", "object_id", "acknowledged")
VALUES
('framework_update', '2015-01-31T14:00:00', 'migration', '{"update": {"status": "expired"}}', 'Framework', 2, FALSE)
""")
def downgrade():
op.execute("""
DELETE FROM audit_events WHERE
"type"='framework_update' AND "created_at"='2015-01-31T14:00:00' AND "object_type"='Framework' AND "object_id"=2
""")
|
<commit_before><commit_msg>Add migration for G4 expiry<commit_after>"""Add additional indexes for audit_events
Revision ID: 460
Revises: 450
Create Date: 2016-01-13 16:45:18.621169
"""
revision = '460'
down_revision = '450'
from alembic import op
def upgrade():
# G-Cloud 4
op.execute("""
INSERT INTO audit_events
("type", "created_at", "user", "data", "object_type", "object_id", "acknowledged")
VALUES
('framework_update', '2015-01-31T14:00:00', 'migration', '{"update": {"status": "expired"}}', 'Framework', 2, FALSE)
""")
def downgrade():
op.execute("""
DELETE FROM audit_events WHERE
"type"='framework_update' AND "created_at"='2015-01-31T14:00:00' AND "object_type"='Framework' AND "object_id"=2
""")
|
|
6d8796f3ed68c03010fc87468e40530d35402d91
|
samples/veh_segv.py
|
samples/veh_segv.py
|
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_READWRITE)
exc[0].ContextRecord[0].EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
else:
print("HAHAH {0}".format(exc[0].ExceptionRecord[0].ExceptionCode))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
return windef.EXCEPTION_CONTINUE_EXECUTION
windows.winproxy.AddVectoredExceptionHandler(0, handler)
target_page = windows.current_process.virtual_alloc(0x1000)
print("Protected page is at {0}".format(hex(target_page)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
v = ctypes.c_uint.from_address(target_page).value
print("POINT1")
v = ctypes.c_uint.from_address(target_page + 0x10).value
print("POINT2")
# (cmd) python.exe samples\veh_segv.py
#Protected page is at 0x3f0000
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0000
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT1
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0010
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT2
|
Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sample
|
Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sample
|
Python
|
bsd-3-clause
|
hakril/PythonForWindows
|
Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sample
|
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_READWRITE)
exc[0].ContextRecord[0].EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
else:
print("HAHAH {0}".format(exc[0].ExceptionRecord[0].ExceptionCode))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
return windef.EXCEPTION_CONTINUE_EXECUTION
windows.winproxy.AddVectoredExceptionHandler(0, handler)
target_page = windows.current_process.virtual_alloc(0x1000)
print("Protected page is at {0}".format(hex(target_page)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
v = ctypes.c_uint.from_address(target_page).value
print("POINT1")
v = ctypes.c_uint.from_address(target_page + 0x10).value
print("POINT2")
# (cmd) python.exe samples\veh_segv.py
#Protected page is at 0x3f0000
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0000
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT1
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0010
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT2
|
<commit_before><commit_msg>Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sample<commit_after>
|
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_READWRITE)
exc[0].ContextRecord[0].EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
else:
print("HAHAH {0}".format(exc[0].ExceptionRecord[0].ExceptionCode))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
return windef.EXCEPTION_CONTINUE_EXECUTION
windows.winproxy.AddVectoredExceptionHandler(0, handler)
target_page = windows.current_process.virtual_alloc(0x1000)
print("Protected page is at {0}".format(hex(target_page)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
v = ctypes.c_uint.from_address(target_page).value
print("POINT1")
v = ctypes.c_uint.from_address(target_page + 0x10).value
print("POINT2")
# (cmd) python.exe samples\veh_segv.py
#Protected page is at 0x3f0000
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0000
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT1
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0010
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT2
|
Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sampleimport ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_READWRITE)
exc[0].ContextRecord[0].EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
else:
print("HAHAH {0}".format(exc[0].ExceptionRecord[0].ExceptionCode))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
return windef.EXCEPTION_CONTINUE_EXECUTION
windows.winproxy.AddVectoredExceptionHandler(0, handler)
target_page = windows.current_process.virtual_alloc(0x1000)
print("Protected page is at {0}".format(hex(target_page)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
v = ctypes.c_uint.from_address(target_page).value
print("POINT1")
v = ctypes.c_uint.from_address(target_page + 0x10).value
print("POINT2")
# (cmd) python.exe samples\veh_segv.py
#Protected page is at 0x3f0000
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0000
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT1
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0010
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT2
|
<commit_before><commit_msg>Add LdrLoadDLL + Fix winproxy.VirtualProtect + add a first draft on veh sample<commit_after>import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_READWRITE)
exc[0].ContextRecord[0].EEFlags.TF = 1
return windef.EXCEPTION_CONTINUE_EXECUTION
else:
print("HAHAH {0}".format(exc[0].ExceptionRecord[0].ExceptionCode))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
return windef.EXCEPTION_CONTINUE_EXECUTION
windows.winproxy.AddVectoredExceptionHandler(0, handler)
target_page = windows.current_process.virtual_alloc(0x1000)
print("Protected page is at {0}".format(hex(target_page)))
windows.winproxy.VirtualProtect(target_page, 0x1000, windef.PAGE_NOACCESS)
v = ctypes.c_uint.from_address(target_page).value
print("POINT1")
v = ctypes.c_uint.from_address(target_page + 0x10).value
print("POINT2")
# (cmd) python.exe samples\veh_segv.py
#Protected page is at 0x3f0000
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0000
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT1
#POUET
#Instr at 0x1d1ab5f4 accessed to addr 0x3f0010
#POUET
#HAHAH EXCEPTION_SINGLE_STEP(0x80000004L)
#POINT2
|
|
e9d84efc328107e51129ed71686c8ee08b09fb99
|
apps/front/migrations/0002_auto_20200119_1707.py
|
apps/front/migrations/0002_auto_20200119_1707.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-19 17:07
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
Add migration for Django 1.11
|
Add migration for Django 1.11
|
Python
|
agpl-3.0
|
studentenportal/web,studentenportal/web,studentenportal/web,studentenportal/web,studentenportal/web
|
Add migration for Django 1.11
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-19 17:07
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add migration for Django 1.11<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-19 17:07
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
Add migration for Django 1.11# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-19 17:07
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add migration for Django 1.11<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-19 17:07
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('front', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
|
d2ef0ef1ffc5d5bba495c18d070ee953e25cd176
|
scripts/clean_failed_archives.py
|
scripts/clean_failed_archives.py
|
# -*- coding: utf-8 -*-
"""One-off script to clear out a few registrations that failed during archiving."""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.archiver import ARCHIVER_FAILURE, ARCHIVER_INITIATED
from website.archiver.model import ArchiveJob
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
FAILED_ARCHIVE_JOBS = [
'56a8d29e9ad5a10179f77bd6',
]
def clean(reg, dry):
logger.info('Cleaning registration: {}'.format(reg))
if not reg.registered_from:
logger.info('Node {0} had registered_from == None'.format(reg._id))
return
if not reg.archive_job: # Be extra sure not to delete legacy registrations
logger.info('Skipping legacy registration: {0}'.format(reg._id))
return
if not dry:
reg.archive_job.status = ARCHIVER_FAILURE
reg.archive_job.sent = True
reg.archive_job.save()
reg.root.sanction.forcibly_reject()
reg.root.sanction.save()
reg.root.delete_registration_tree(save=True)
logger.info('Done.')
def main(dry):
if dry:
logger.info('[DRY MODE]')
init_app(routes=False)
for _id in FAILED_ARCHIVE_JOBS:
archive_job = ArchiveJob.load(_id)
assert archive_job.status == ARCHIVER_INITIATED
root_node = archive_job.dst_node.root
with TokuTransaction():
clean(reg=root_node, dry=dry)
if __name__ == "__main__":
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add script to clean out failed archive
|
Add script to clean out failed archive
[skip ci]
OSF-5632
|
Python
|
apache-2.0
|
monikagrabowska/osf.io,DanielSBrown/osf.io,rdhyee/osf.io,sloria/osf.io,emetsger/osf.io,leb2dg/osf.io,acshi/osf.io,mluke93/osf.io,mattclark/osf.io,billyhunt/osf.io,cslzchen/osf.io,mfraezz/osf.io,cwisecarver/osf.io,sloria/osf.io,doublebits/osf.io,zamattiac/osf.io,HalcyonChimera/osf.io,mluo613/osf.io,cslzchen/osf.io,SSJohns/osf.io,TomBaxter/osf.io,adlius/osf.io,hmoco/osf.io,adlius/osf.io,felliott/osf.io,laurenrevere/osf.io,alexschiller/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,abought/osf.io,binoculars/osf.io,samchrisinger/osf.io,TomHeatwole/osf.io,RomanZWang/osf.io,hmoco/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,TomHeatwole/osf.io,CenterForOpenScience/osf.io,kch8qx/osf.io,wearpants/osf.io,erinspace/osf.io,felliott/osf.io,adlius/osf.io,caneruguz/osf.io,saradbowman/osf.io,hmoco/osf.io,mluo613/osf.io,kwierman/osf.io,acshi/osf.io,billyhunt/osf.io,Nesiehr/osf.io,GageGaskins/osf.io,brianjgeiger/osf.io,jnayak1/osf.io,SSJohns/osf.io,SSJohns/osf.io,binoculars/osf.io,chennan47/osf.io,GageGaskins/osf.io,caseyrollins/osf.io,SSJohns/osf.io,KAsante95/osf.io,amyshi188/osf.io,doublebits/osf.io,baylee-d/osf.io,GageGaskins/osf.io,mluke93/osf.io,brandonPurvis/osf.io,zachjanicki/osf.io,binoculars/osf.io,pattisdr/osf.io,RomanZWang/osf.io,brandonPurvis/osf.io,zachjanicki/osf.io,chrisseto/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,alexschiller/osf.io,mluo613/osf.io,Nesiehr/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,TomBaxter/osf.io,aaxelb/osf.io,RomanZWang/osf.io,asanfilippo7/osf.io,wearpants/osf.io,mluke93/osf.io,wearpants/osf.io,KAsante95/osf.io,crcresearch/osf.io,zamattiac/osf.io,mfraezz/osf.io,leb2dg/osf.io,saradbowman/osf.io,KAsante95/osf.io,abought/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,zamattiac/osf.io,rdhyee/osf.io,crcresearch/osf.io,chrisseto/osf.io,leb2dg/osf.io,sloria/osf.io,monikagrabowska/osf.io,adlius/osf.io,brianjgeiger/osf.io,amyshi188/osf.io,leb2dg/osf.io,wearpants/osf.io,mfraezz/osf.io,KAsante95/osf.io,rdhyee/osf.io,felliott/osf.io,chennan47/osf.io,asanfilippo7/osf.io,caseyrollins/osf.io,monikagrabowska/osf.io,kch8qx/osf.io,baylee-d/osf.io,RomanZWang/osf.io,chrisseto/osf.io,TomBaxter/osf.io,pattisdr/osf.io,icereval/osf.io,hmoco/osf.io,laurenrevere/osf.io,acshi/osf.io,laurenrevere/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,mluo613/osf.io,pattisdr/osf.io,brandonPurvis/osf.io,amyshi188/osf.io,mattclark/osf.io,doublebits/osf.io,TomHeatwole/osf.io,acshi/osf.io,RomanZWang/osf.io,crcresearch/osf.io,zachjanicki/osf.io,emetsger/osf.io,kwierman/osf.io,jnayak1/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,alexschiller/osf.io,brandonPurvis/osf.io,mattclark/osf.io,Johnetordoff/osf.io,KAsante95/osf.io,Johnetordoff/osf.io,samchrisinger/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,asanfilippo7/osf.io,mluo613/osf.io,Johnetordoff/osf.io,samchrisinger/osf.io,GageGaskins/osf.io,erinspace/osf.io,doublebits/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,zamattiac/osf.io,acshi/osf.io,chennan47/osf.io,doublebits/osf.io,caseyrollins/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,abought/osf.io,erinspace/osf.io,GageGaskins/osf.io,billyhunt/osf.io,rdhyee/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,felliott/osf.io,aaxelb/osf.io,DanielSBrown/osf.io,abought/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,icereval/osf.io,mluke93/osf.io,icereval/osf.io,kwierman/osf.io,kwierman/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,jnayak1/osf.io,chrisseto/osf.io,zachjanicki/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,alexschiller/osf.io,billyhunt/osf.io,kch8qx/osf.io,Johnetordoff/osf.io,caneruguz/osf.io,aaxelb/osf.io,mfraezz/osf.io,amyshi188/osf.io,samchrisinger/osf.io
|
Add script to clean out failed archive
[skip ci]
OSF-5632
|
# -*- coding: utf-8 -*-
"""One-off script to clear out a few registrations that failed during archiving."""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.archiver import ARCHIVER_FAILURE, ARCHIVER_INITIATED
from website.archiver.model import ArchiveJob
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
FAILED_ARCHIVE_JOBS = [
'56a8d29e9ad5a10179f77bd6',
]
def clean(reg, dry):
logger.info('Cleaning registration: {}'.format(reg))
if not reg.registered_from:
logger.info('Node {0} had registered_from == None'.format(reg._id))
return
if not reg.archive_job: # Be extra sure not to delete legacy registrations
logger.info('Skipping legacy registration: {0}'.format(reg._id))
return
if not dry:
reg.archive_job.status = ARCHIVER_FAILURE
reg.archive_job.sent = True
reg.archive_job.save()
reg.root.sanction.forcibly_reject()
reg.root.sanction.save()
reg.root.delete_registration_tree(save=True)
logger.info('Done.')
def main(dry):
if dry:
logger.info('[DRY MODE]')
init_app(routes=False)
for _id in FAILED_ARCHIVE_JOBS:
archive_job = ArchiveJob.load(_id)
assert archive_job.status == ARCHIVER_INITIATED
root_node = archive_job.dst_node.root
with TokuTransaction():
clean(reg=root_node, dry=dry)
if __name__ == "__main__":
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add script to clean out failed archive
[skip ci]
OSF-5632<commit_after>
|
# -*- coding: utf-8 -*-
"""One-off script to clear out a few registrations that failed during archiving."""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.archiver import ARCHIVER_FAILURE, ARCHIVER_INITIATED
from website.archiver.model import ArchiveJob
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
FAILED_ARCHIVE_JOBS = [
'56a8d29e9ad5a10179f77bd6',
]
def clean(reg, dry):
logger.info('Cleaning registration: {}'.format(reg))
if not reg.registered_from:
logger.info('Node {0} had registered_from == None'.format(reg._id))
return
if not reg.archive_job: # Be extra sure not to delete legacy registrations
logger.info('Skipping legacy registration: {0}'.format(reg._id))
return
if not dry:
reg.archive_job.status = ARCHIVER_FAILURE
reg.archive_job.sent = True
reg.archive_job.save()
reg.root.sanction.forcibly_reject()
reg.root.sanction.save()
reg.root.delete_registration_tree(save=True)
logger.info('Done.')
def main(dry):
if dry:
logger.info('[DRY MODE]')
init_app(routes=False)
for _id in FAILED_ARCHIVE_JOBS:
archive_job = ArchiveJob.load(_id)
assert archive_job.status == ARCHIVER_INITIATED
root_node = archive_job.dst_node.root
with TokuTransaction():
clean(reg=root_node, dry=dry)
if __name__ == "__main__":
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add script to clean out failed archive
[skip ci]
OSF-5632# -*- coding: utf-8 -*-
"""One-off script to clear out a few registrations that failed during archiving."""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.archiver import ARCHIVER_FAILURE, ARCHIVER_INITIATED
from website.archiver.model import ArchiveJob
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
FAILED_ARCHIVE_JOBS = [
'56a8d29e9ad5a10179f77bd6',
]
def clean(reg, dry):
logger.info('Cleaning registration: {}'.format(reg))
if not reg.registered_from:
logger.info('Node {0} had registered_from == None'.format(reg._id))
return
if not reg.archive_job: # Be extra sure not to delete legacy registrations
logger.info('Skipping legacy registration: {0}'.format(reg._id))
return
if not dry:
reg.archive_job.status = ARCHIVER_FAILURE
reg.archive_job.sent = True
reg.archive_job.save()
reg.root.sanction.forcibly_reject()
reg.root.sanction.save()
reg.root.delete_registration_tree(save=True)
logger.info('Done.')
def main(dry):
if dry:
logger.info('[DRY MODE]')
init_app(routes=False)
for _id in FAILED_ARCHIVE_JOBS:
archive_job = ArchiveJob.load(_id)
assert archive_job.status == ARCHIVER_INITIATED
root_node = archive_job.dst_node.root
with TokuTransaction():
clean(reg=root_node, dry=dry)
if __name__ == "__main__":
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add script to clean out failed archive
[skip ci]
OSF-5632<commit_after># -*- coding: utf-8 -*-
"""One-off script to clear out a few registrations that failed during archiving."""
import logging
import sys
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.archiver import ARCHIVER_FAILURE, ARCHIVER_INITIATED
from website.archiver.model import ArchiveJob
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
FAILED_ARCHIVE_JOBS = [
'56a8d29e9ad5a10179f77bd6',
]
def clean(reg, dry):
logger.info('Cleaning registration: {}'.format(reg))
if not reg.registered_from:
logger.info('Node {0} had registered_from == None'.format(reg._id))
return
if not reg.archive_job: # Be extra sure not to delete legacy registrations
logger.info('Skipping legacy registration: {0}'.format(reg._id))
return
if not dry:
reg.archive_job.status = ARCHIVER_FAILURE
reg.archive_job.sent = True
reg.archive_job.save()
reg.root.sanction.forcibly_reject()
reg.root.sanction.save()
reg.root.delete_registration_tree(save=True)
logger.info('Done.')
def main(dry):
if dry:
logger.info('[DRY MODE]')
init_app(routes=False)
for _id in FAILED_ARCHIVE_JOBS:
archive_job = ArchiveJob.load(_id)
assert archive_job.status == ARCHIVER_INITIATED
root_node = archive_job.dst_node.root
with TokuTransaction():
clean(reg=root_node, dry=dry)
if __name__ == "__main__":
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
|
966000605c76be9d9c2a9e931e298785b5adbb76
|
contentcuration/contentcuration/tests/test_public_api.py
|
contentcuration/contentcuration/tests/test_public_api.py
|
from base import BaseAPITestCase
from django.core.urlresolvers import reverse
class PublicAPITestCase(BaseAPITestCase):
"""
IMPORTANT: These tests are to never be changed. They are enforcing a
public API contract. If the tests fail, then the implementation needs
to be changed, and not the tests themselves.
"""
def setUp(self):
super(PublicAPITestCase, self).setUp()
def test_info_endpoint(self):
response = self.client.get(reverse('info'))
self.assertEqual(response.data['application'], 'studio')
self.assertEqual(response.data['device_name'], 'Kolibri Studio')
|
Add minimal test for info endpoint
|
Add minimal test for info endpoint
|
Python
|
mit
|
DXCanas/content-curation,fle-internal/content-curation,fle-internal/content-curation,DXCanas/content-curation,jayoshih/content-curation,jayoshih/content-curation,fle-internal/content-curation,DXCanas/content-curation,jayoshih/content-curation,DXCanas/content-curation,fle-internal/content-curation,jayoshih/content-curation
|
Add minimal test for info endpoint
|
from base import BaseAPITestCase
from django.core.urlresolvers import reverse
class PublicAPITestCase(BaseAPITestCase):
"""
IMPORTANT: These tests are to never be changed. They are enforcing a
public API contract. If the tests fail, then the implementation needs
to be changed, and not the tests themselves.
"""
def setUp(self):
super(PublicAPITestCase, self).setUp()
def test_info_endpoint(self):
response = self.client.get(reverse('info'))
self.assertEqual(response.data['application'], 'studio')
self.assertEqual(response.data['device_name'], 'Kolibri Studio')
|
<commit_before><commit_msg>Add minimal test for info endpoint<commit_after>
|
from base import BaseAPITestCase
from django.core.urlresolvers import reverse
class PublicAPITestCase(BaseAPITestCase):
"""
IMPORTANT: These tests are to never be changed. They are enforcing a
public API contract. If the tests fail, then the implementation needs
to be changed, and not the tests themselves.
"""
def setUp(self):
super(PublicAPITestCase, self).setUp()
def test_info_endpoint(self):
response = self.client.get(reverse('info'))
self.assertEqual(response.data['application'], 'studio')
self.assertEqual(response.data['device_name'], 'Kolibri Studio')
|
Add minimal test for info endpointfrom base import BaseAPITestCase
from django.core.urlresolvers import reverse
class PublicAPITestCase(BaseAPITestCase):
"""
IMPORTANT: These tests are to never be changed. They are enforcing a
public API contract. If the tests fail, then the implementation needs
to be changed, and not the tests themselves.
"""
def setUp(self):
super(PublicAPITestCase, self).setUp()
def test_info_endpoint(self):
response = self.client.get(reverse('info'))
self.assertEqual(response.data['application'], 'studio')
self.assertEqual(response.data['device_name'], 'Kolibri Studio')
|
<commit_before><commit_msg>Add minimal test for info endpoint<commit_after>from base import BaseAPITestCase
from django.core.urlresolvers import reverse
class PublicAPITestCase(BaseAPITestCase):
"""
IMPORTANT: These tests are to never be changed. They are enforcing a
public API contract. If the tests fail, then the implementation needs
to be changed, and not the tests themselves.
"""
def setUp(self):
super(PublicAPITestCase, self).setUp()
def test_info_endpoint(self):
response = self.client.get(reverse('info'))
self.assertEqual(response.data['application'], 'studio')
self.assertEqual(response.data['device_name'], 'Kolibri Studio')
|
|
8ac37d84cf01f879652cb455c925bb75bee0bc34
|
tools/heapcheck/PRESUBMIT.py
|
tools/heapcheck/PRESUBMIT.py
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Add presubmit checks for suppressions.
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
Python
|
bsd-3-clause
|
wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser,wistoch/meego-app-browser
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after>
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after># Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
|
45f4e85065c9d0fcd3638ff95ae46cd4231c106c
|
Python/002_AC_AddTwoNumbers.py
|
Python/002_AC_AddTwoNumbers.py
|
# Author: Jerry C. Wang <jcpwang@gmail.com>
# File: AC_AddTwoNumbers.py
lass Solution(object):
def addTwoNumbers(self, l1, l2):
tmp = ListNode(0)
head = tmp
flag = 0
while flag or l1 or l2:
newNode = ListNode(flag)
if l1:
newNode.val += l1.val
l1 = l1.next
if l2:
newNode.val += l2.val
l2 = l2.next
flag = newNode.val / 10
newNode.val %= 10
tmp.next = newNode
tmp = tmp.next
return head.next
|
Add Solution to Add Two Numbers
|
[Python] LeetCode: Add Solution to Add Two Numbers
Python Solution for 02 Add Two Numbers
[Link]: https://leetcode.com/problems/add-two-numbers
[Complexity]:
Signed-off-by: Jerry C Wang <398b9cc4b6d7225db629d423b2c3e64586a4df74@gmail.com>
|
Python
|
mit
|
jcpwang/LeetCode
|
[Python] LeetCode: Add Solution to Add Two Numbers
Python Solution for 02 Add Two Numbers
[Link]: https://leetcode.com/problems/add-two-numbers
[Complexity]:
Signed-off-by: Jerry C Wang <398b9cc4b6d7225db629d423b2c3e64586a4df74@gmail.com>
|
# Author: Jerry C. Wang <jcpwang@gmail.com>
# File: AC_AddTwoNumbers.py
lass Solution(object):
def addTwoNumbers(self, l1, l2):
tmp = ListNode(0)
head = tmp
flag = 0
while flag or l1 or l2:
newNode = ListNode(flag)
if l1:
newNode.val += l1.val
l1 = l1.next
if l2:
newNode.val += l2.val
l2 = l2.next
flag = newNode.val / 10
newNode.val %= 10
tmp.next = newNode
tmp = tmp.next
return head.next
|
<commit_before><commit_msg>[Python] LeetCode: Add Solution to Add Two Numbers
Python Solution for 02 Add Two Numbers
[Link]: https://leetcode.com/problems/add-two-numbers
[Complexity]:
Signed-off-by: Jerry C Wang <398b9cc4b6d7225db629d423b2c3e64586a4df74@gmail.com><commit_after>
|
# Author: Jerry C. Wang <jcpwang@gmail.com>
# File: AC_AddTwoNumbers.py
lass Solution(object):
def addTwoNumbers(self, l1, l2):
tmp = ListNode(0)
head = tmp
flag = 0
while flag or l1 or l2:
newNode = ListNode(flag)
if l1:
newNode.val += l1.val
l1 = l1.next
if l2:
newNode.val += l2.val
l2 = l2.next
flag = newNode.val / 10
newNode.val %= 10
tmp.next = newNode
tmp = tmp.next
return head.next
|
[Python] LeetCode: Add Solution to Add Two Numbers
Python Solution for 02 Add Two Numbers
[Link]: https://leetcode.com/problems/add-two-numbers
[Complexity]:
Signed-off-by: Jerry C Wang <398b9cc4b6d7225db629d423b2c3e64586a4df74@gmail.com># Author: Jerry C. Wang <jcpwang@gmail.com>
# File: AC_AddTwoNumbers.py
lass Solution(object):
def addTwoNumbers(self, l1, l2):
tmp = ListNode(0)
head = tmp
flag = 0
while flag or l1 or l2:
newNode = ListNode(flag)
if l1:
newNode.val += l1.val
l1 = l1.next
if l2:
newNode.val += l2.val
l2 = l2.next
flag = newNode.val / 10
newNode.val %= 10
tmp.next = newNode
tmp = tmp.next
return head.next
|
<commit_before><commit_msg>[Python] LeetCode: Add Solution to Add Two Numbers
Python Solution for 02 Add Two Numbers
[Link]: https://leetcode.com/problems/add-two-numbers
[Complexity]:
Signed-off-by: Jerry C Wang <398b9cc4b6d7225db629d423b2c3e64586a4df74@gmail.com><commit_after># Author: Jerry C. Wang <jcpwang@gmail.com>
# File: AC_AddTwoNumbers.py
lass Solution(object):
def addTwoNumbers(self, l1, l2):
tmp = ListNode(0)
head = tmp
flag = 0
while flag or l1 or l2:
newNode = ListNode(flag)
if l1:
newNode.val += l1.val
l1 = l1.next
if l2:
newNode.val += l2.val
l2 = l2.next
flag = newNode.val / 10
newNode.val %= 10
tmp.next = newNode
tmp = tmp.next
return head.next
|
|
66b17fbc9666b150c71bb94f2492fd880b2641e4
|
numpy/typing/tests/test_isfile.py
|
numpy/typing/tests/test_isfile.py
|
import os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "char.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "emath.pyi",
ROOT / "rec.pyi",
ROOT / "version.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
|
Validate the existence of `.pyi` stub files
|
TST: Validate the existence of `.pyi` stub files
|
Python
|
bsd-3-clause
|
anntzer/numpy,jakirkham/numpy,simongibbons/numpy,madphysicist/numpy,charris/numpy,mattip/numpy,grlee77/numpy,jakirkham/numpy,anntzer/numpy,grlee77/numpy,rgommers/numpy,seberg/numpy,grlee77/numpy,pdebuyl/numpy,rgommers/numpy,simongibbons/numpy,pdebuyl/numpy,anntzer/numpy,simongibbons/numpy,pbrod/numpy,charris/numpy,numpy/numpy,mattip/numpy,pbrod/numpy,mhvk/numpy,charris/numpy,madphysicist/numpy,endolith/numpy,simongibbons/numpy,numpy/numpy,jakirkham/numpy,madphysicist/numpy,numpy/numpy,pbrod/numpy,jakirkham/numpy,mattip/numpy,pdebuyl/numpy,pbrod/numpy,pbrod/numpy,rgommers/numpy,grlee77/numpy,mhvk/numpy,seberg/numpy,rgommers/numpy,anntzer/numpy,jakirkham/numpy,seberg/numpy,mhvk/numpy,numpy/numpy,seberg/numpy,endolith/numpy,madphysicist/numpy,simongibbons/numpy,madphysicist/numpy,grlee77/numpy,pdebuyl/numpy,mattip/numpy,charris/numpy,endolith/numpy,mhvk/numpy,mhvk/numpy,endolith/numpy
|
TST: Validate the existence of `.pyi` stub files
|
import os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "char.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "emath.pyi",
ROOT / "rec.pyi",
ROOT / "version.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
|
<commit_before><commit_msg>TST: Validate the existence of `.pyi` stub files<commit_after>
|
import os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "char.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "emath.pyi",
ROOT / "rec.pyi",
ROOT / "version.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
|
TST: Validate the existence of `.pyi` stub filesimport os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "char.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "emath.pyi",
ROOT / "rec.pyi",
ROOT / "version.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
|
<commit_before><commit_msg>TST: Validate the existence of `.pyi` stub files<commit_after>import os
from pathlib import Path
import numpy as np
from numpy.testing import assert_
ROOT = Path(np.__file__).parents[0]
FILES = [
ROOT / "py.typed",
ROOT / "__init__.pyi",
ROOT / "char.pyi",
ROOT / "ctypeslib.pyi",
ROOT / "emath.pyi",
ROOT / "rec.pyi",
ROOT / "version.pyi",
ROOT / "core" / "__init__.pyi",
ROOT / "distutils" / "__init__.pyi",
ROOT / "f2py" / "__init__.pyi",
ROOT / "fft" / "__init__.pyi",
ROOT / "lib" / "__init__.pyi",
ROOT / "linalg" / "__init__.pyi",
ROOT / "ma" / "__init__.pyi",
ROOT / "matrixlib" / "__init__.pyi",
ROOT / "polynomial" / "__init__.pyi",
ROOT / "random" / "__init__.pyi",
ROOT / "testing" / "__init__.pyi",
]
class TestIsFile:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
|
|
0db9864c62de4cfa556c9c8f6915750d23a4b2c8
|
scripts/user_groups_geolocator.py
|
scripts/user_groups_geolocator.py
|
from os import path
import geocoder
from ruamel import yaml
data_folder = path.join(path.dirname(__file__), "../data")
data_file = path.join(data_folder, "user-groups.yml")
with open(data_file, encoding="utf-8") as points_file:
sections = yaml.load(points_file, yaml.RoundTripLoader)
for n, section in enumerate(sections):
for n, user_group in enumerate(section['groups']):
city = user_group.get("name").replace("Kotlin", "").replace("User", "").replace("Group", "").strip()
location = city + ", " + user_group.get("country")
if 'position' in user_group:
print(location + " (saved previous)")
continue
print("Process %s..." % location)
response = geocoder.google(location)
coordinates = response.latlng
if coordinates is None or len(coordinates) == 0:
raise Exception("Location not found: " + location)
if response.ok is not True:
raise Exception("Location not resolved: ", location)
new_geo = {
"lat": coordinates[0],
"lng": coordinates[1],
}
print("Will coordinates for \"%s\":\nhttps://www.google.com/maps/search/?api=1&query=%s,%s""" % (
location, new_geo['lat'], new_geo['lng']
))
user_group['position'] = new_geo
with open(data_file, 'w') as points_file:
yaml.dump(sections, stream=points_file, Dumper=yaml.RoundTripDumper, allow_unicode=True)
|
Add a script for filling user group locations
|
KT-47068: Add a script for filling user group locations
|
Python
|
apache-2.0
|
hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,JetBrains/kotlin-web-site,JetBrains/kotlin-web-site,hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,hltj/kotlin-web-site-cn,hltj/kotlin-web-site-cn,hltj/kotlin-web-site-cn,JetBrains/kotlin-web-site,JetBrains/kotlin-web-site,hltj/kotlin-web-site-cn
|
KT-47068: Add a script for filling user group locations
|
from os import path
import geocoder
from ruamel import yaml
data_folder = path.join(path.dirname(__file__), "../data")
data_file = path.join(data_folder, "user-groups.yml")
with open(data_file, encoding="utf-8") as points_file:
sections = yaml.load(points_file, yaml.RoundTripLoader)
for n, section in enumerate(sections):
for n, user_group in enumerate(section['groups']):
city = user_group.get("name").replace("Kotlin", "").replace("User", "").replace("Group", "").strip()
location = city + ", " + user_group.get("country")
if 'position' in user_group:
print(location + " (saved previous)")
continue
print("Process %s..." % location)
response = geocoder.google(location)
coordinates = response.latlng
if coordinates is None or len(coordinates) == 0:
raise Exception("Location not found: " + location)
if response.ok is not True:
raise Exception("Location not resolved: ", location)
new_geo = {
"lat": coordinates[0],
"lng": coordinates[1],
}
print("Will coordinates for \"%s\":\nhttps://www.google.com/maps/search/?api=1&query=%s,%s""" % (
location, new_geo['lat'], new_geo['lng']
))
user_group['position'] = new_geo
with open(data_file, 'w') as points_file:
yaml.dump(sections, stream=points_file, Dumper=yaml.RoundTripDumper, allow_unicode=True)
|
<commit_before><commit_msg>KT-47068: Add a script for filling user group locations<commit_after>
|
from os import path
import geocoder
from ruamel import yaml
data_folder = path.join(path.dirname(__file__), "../data")
data_file = path.join(data_folder, "user-groups.yml")
with open(data_file, encoding="utf-8") as points_file:
sections = yaml.load(points_file, yaml.RoundTripLoader)
for n, section in enumerate(sections):
for n, user_group in enumerate(section['groups']):
city = user_group.get("name").replace("Kotlin", "").replace("User", "").replace("Group", "").strip()
location = city + ", " + user_group.get("country")
if 'position' in user_group:
print(location + " (saved previous)")
continue
print("Process %s..." % location)
response = geocoder.google(location)
coordinates = response.latlng
if coordinates is None or len(coordinates) == 0:
raise Exception("Location not found: " + location)
if response.ok is not True:
raise Exception("Location not resolved: ", location)
new_geo = {
"lat": coordinates[0],
"lng": coordinates[1],
}
print("Will coordinates for \"%s\":\nhttps://www.google.com/maps/search/?api=1&query=%s,%s""" % (
location, new_geo['lat'], new_geo['lng']
))
user_group['position'] = new_geo
with open(data_file, 'w') as points_file:
yaml.dump(sections, stream=points_file, Dumper=yaml.RoundTripDumper, allow_unicode=True)
|
KT-47068: Add a script for filling user group locationsfrom os import path
import geocoder
from ruamel import yaml
data_folder = path.join(path.dirname(__file__), "../data")
data_file = path.join(data_folder, "user-groups.yml")
with open(data_file, encoding="utf-8") as points_file:
sections = yaml.load(points_file, yaml.RoundTripLoader)
for n, section in enumerate(sections):
for n, user_group in enumerate(section['groups']):
city = user_group.get("name").replace("Kotlin", "").replace("User", "").replace("Group", "").strip()
location = city + ", " + user_group.get("country")
if 'position' in user_group:
print(location + " (saved previous)")
continue
print("Process %s..." % location)
response = geocoder.google(location)
coordinates = response.latlng
if coordinates is None or len(coordinates) == 0:
raise Exception("Location not found: " + location)
if response.ok is not True:
raise Exception("Location not resolved: ", location)
new_geo = {
"lat": coordinates[0],
"lng": coordinates[1],
}
print("Will coordinates for \"%s\":\nhttps://www.google.com/maps/search/?api=1&query=%s,%s""" % (
location, new_geo['lat'], new_geo['lng']
))
user_group['position'] = new_geo
with open(data_file, 'w') as points_file:
yaml.dump(sections, stream=points_file, Dumper=yaml.RoundTripDumper, allow_unicode=True)
|
<commit_before><commit_msg>KT-47068: Add a script for filling user group locations<commit_after>from os import path
import geocoder
from ruamel import yaml
data_folder = path.join(path.dirname(__file__), "../data")
data_file = path.join(data_folder, "user-groups.yml")
with open(data_file, encoding="utf-8") as points_file:
sections = yaml.load(points_file, yaml.RoundTripLoader)
for n, section in enumerate(sections):
for n, user_group in enumerate(section['groups']):
city = user_group.get("name").replace("Kotlin", "").replace("User", "").replace("Group", "").strip()
location = city + ", " + user_group.get("country")
if 'position' in user_group:
print(location + " (saved previous)")
continue
print("Process %s..." % location)
response = geocoder.google(location)
coordinates = response.latlng
if coordinates is None or len(coordinates) == 0:
raise Exception("Location not found: " + location)
if response.ok is not True:
raise Exception("Location not resolved: ", location)
new_geo = {
"lat": coordinates[0],
"lng": coordinates[1],
}
print("Will coordinates for \"%s\":\nhttps://www.google.com/maps/search/?api=1&query=%s,%s""" % (
location, new_geo['lat'], new_geo['lng']
))
user_group['position'] = new_geo
with open(data_file, 'w') as points_file:
yaml.dump(sections, stream=points_file, Dumper=yaml.RoundTripDumper, allow_unicode=True)
|
|
533b4c090547389054934ea88388512399b568c9
|
filter_plugins/custom_plugins.py
|
filter_plugins/custom_plugins.py
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
from ansible import errors
(out, err) = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE).communicate()
if (err != None):
raise errors.AnsibleFilterError("Unable to decrypt, aborting. Error: {error}".format(error = err))
else:
return out
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation
|
Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation
|
Python
|
apache-2.0
|
baszoetekouw/OpenConext-deploy,remold/OpenConext-deploy,OpenConext/OpenConext-deploy,baszoetekouw/OpenConext-deploy,baszoetekouw/OpenConext-deploy,OpenConext/OpenConext-deploy,OpenConext/OpenConext-deploy,remold/OpenConext-deploy,remold/OpenConext-deploy,baszoetekouw/OpenConext-deploy,OpenConext/OpenConext-deploy,baszoetekouw/OpenConext-deploy,OpenConext/OpenConext-deploy
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
from ansible import errors
(out, err) = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE).communicate()
if (err != None):
raise errors.AnsibleFilterError("Unable to decrypt, aborting. Error: {error}".format(error = err))
else:
return out
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
<commit_before>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
from ansible import errors
(out, err) = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE).communicate()
if (err != None):
raise errors.AnsibleFilterError("Unable to decrypt, aborting. Error: {error}".format(error = err))
else:
return out
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
<commit_msg>Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation<commit_after>
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
from ansible import errors
(out, err) = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE).communicate()
if (err != None):
raise errors.AnsibleFilterError("Unable to decrypt, aborting. Error: {error}".format(error = err))
else:
return out
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
<commit_before>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
from ansible import errors
(out, err) = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE).communicate()
if (err != None):
raise errors.AnsibleFilterError("Unable to decrypt, aborting. Error: {error}".format(error = err))
else:
return out
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
<commit_msg>Use simpler invocation that actually fails. Leave it to @thijskh to use Popen-type of invocation<commit_after>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
4a5dd598f689425aa89541ce890ec15aa7592543
|
dragonfire/tts/__init__.py
|
dragonfire/tts/__init__.py
|
import csv
class Synthesizer():
def __init__(self):
self.word_map = {}
filename = "../../dictionaries/VoxForgeDict"
for line in csv.reader(open(filename), delimiter=' ', skipinitialspace=True):
if len(line) > 2:
self.word_map[line[0]] = line[2:]
print len(self.word_map)
def string_to_phonemes(self, string):
string = string.upper()
string = string.replace('.','')
string = string.replace(',','')
words = string.split()
result = ""
for word in words:
print self.word_map[word]
|
Add the function for parsing strings to phonemes
|
Add the function for parsing strings to phonemes
|
Python
|
mit
|
DragonComputer/Dragonfire,DragonComputer/Dragonfire,DragonComputer/Dragonfire,mertyildiran/Dragonfire,mertyildiran/Dragonfire
|
Add the function for parsing strings to phonemes
|
import csv
class Synthesizer():
def __init__(self):
self.word_map = {}
filename = "../../dictionaries/VoxForgeDict"
for line in csv.reader(open(filename), delimiter=' ', skipinitialspace=True):
if len(line) > 2:
self.word_map[line[0]] = line[2:]
print len(self.word_map)
def string_to_phonemes(self, string):
string = string.upper()
string = string.replace('.','')
string = string.replace(',','')
words = string.split()
result = ""
for word in words:
print self.word_map[word]
|
<commit_before><commit_msg>Add the function for parsing strings to phonemes<commit_after>
|
import csv
class Synthesizer():
def __init__(self):
self.word_map = {}
filename = "../../dictionaries/VoxForgeDict"
for line in csv.reader(open(filename), delimiter=' ', skipinitialspace=True):
if len(line) > 2:
self.word_map[line[0]] = line[2:]
print len(self.word_map)
def string_to_phonemes(self, string):
string = string.upper()
string = string.replace('.','')
string = string.replace(',','')
words = string.split()
result = ""
for word in words:
print self.word_map[word]
|
Add the function for parsing strings to phonemesimport csv
class Synthesizer():
def __init__(self):
self.word_map = {}
filename = "../../dictionaries/VoxForgeDict"
for line in csv.reader(open(filename), delimiter=' ', skipinitialspace=True):
if len(line) > 2:
self.word_map[line[0]] = line[2:]
print len(self.word_map)
def string_to_phonemes(self, string):
string = string.upper()
string = string.replace('.','')
string = string.replace(',','')
words = string.split()
result = ""
for word in words:
print self.word_map[word]
|
<commit_before><commit_msg>Add the function for parsing strings to phonemes<commit_after>import csv
class Synthesizer():
def __init__(self):
self.word_map = {}
filename = "../../dictionaries/VoxForgeDict"
for line in csv.reader(open(filename), delimiter=' ', skipinitialspace=True):
if len(line) > 2:
self.word_map[line[0]] = line[2:]
print len(self.word_map)
def string_to_phonemes(self, string):
string = string.upper()
string = string.replace('.','')
string = string.replace(',','')
words = string.split()
result = ""
for word in words:
print self.word_map[word]
|
|
e5a46876e55344f54e205a76b5f16db07d099fa0
|
examples/4-resistors.py
|
examples/4-resistors.py
|
#!/usr/bin/env python3
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A simple example, placing 4 resistors in a loop on a 2x4 strip board.
Three of the resistors have a maximum length of 1, whereas the other has a
maximum length of 3. There should be 4 solutions in total.
"""
import component
import placer
r1 = component.LeadedComponent(3)
r2 = component.LeadedComponent(1)
r3 = component.LeadedComponent(1)
r4 = component.LeadedComponent(1)
board = component.StripBoard((2, 4))
nets = (
(r1.terminals[1], r2.terminals[0]),
(r2.terminals[1], r3.terminals[0]),
(r3.terminals[1], r4.terminals[0]),
(r4.terminals[1], r1.terminals[0]),
)
for placement in placer.place(board, (r1, r2, r3, r4), nets):
print("R1: {}".format(placement[r1]))
print("R2: {}".format(placement[r2]))
print("R3: {}".format(placement[r3]))
print("R4: {}".format(placement[r4]))
print()
|
Add a 4 resistors example
|
Add a 4 resistors example
|
Python
|
mit
|
matthewearl/strippy
|
Add a 4 resistors example
|
#!/usr/bin/env python3
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A simple example, placing 4 resistors in a loop on a 2x4 strip board.
Three of the resistors have a maximum length of 1, whereas the other has a
maximum length of 3. There should be 4 solutions in total.
"""
import component
import placer
r1 = component.LeadedComponent(3)
r2 = component.LeadedComponent(1)
r3 = component.LeadedComponent(1)
r4 = component.LeadedComponent(1)
board = component.StripBoard((2, 4))
nets = (
(r1.terminals[1], r2.terminals[0]),
(r2.terminals[1], r3.terminals[0]),
(r3.terminals[1], r4.terminals[0]),
(r4.terminals[1], r1.terminals[0]),
)
for placement in placer.place(board, (r1, r2, r3, r4), nets):
print("R1: {}".format(placement[r1]))
print("R2: {}".format(placement[r2]))
print("R3: {}".format(placement[r3]))
print("R4: {}".format(placement[r4]))
print()
|
<commit_before><commit_msg>Add a 4 resistors example<commit_after>
|
#!/usr/bin/env python3
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A simple example, placing 4 resistors in a loop on a 2x4 strip board.
Three of the resistors have a maximum length of 1, whereas the other has a
maximum length of 3. There should be 4 solutions in total.
"""
import component
import placer
r1 = component.LeadedComponent(3)
r2 = component.LeadedComponent(1)
r3 = component.LeadedComponent(1)
r4 = component.LeadedComponent(1)
board = component.StripBoard((2, 4))
nets = (
(r1.terminals[1], r2.terminals[0]),
(r2.terminals[1], r3.terminals[0]),
(r3.terminals[1], r4.terminals[0]),
(r4.terminals[1], r1.terminals[0]),
)
for placement in placer.place(board, (r1, r2, r3, r4), nets):
print("R1: {}".format(placement[r1]))
print("R2: {}".format(placement[r2]))
print("R3: {}".format(placement[r3]))
print("R4: {}".format(placement[r4]))
print()
|
Add a 4 resistors example#!/usr/bin/env python3
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A simple example, placing 4 resistors in a loop on a 2x4 strip board.
Three of the resistors have a maximum length of 1, whereas the other has a
maximum length of 3. There should be 4 solutions in total.
"""
import component
import placer
r1 = component.LeadedComponent(3)
r2 = component.LeadedComponent(1)
r3 = component.LeadedComponent(1)
r4 = component.LeadedComponent(1)
board = component.StripBoard((2, 4))
nets = (
(r1.terminals[1], r2.terminals[0]),
(r2.terminals[1], r3.terminals[0]),
(r3.terminals[1], r4.terminals[0]),
(r4.terminals[1], r1.terminals[0]),
)
for placement in placer.place(board, (r1, r2, r3, r4), nets):
print("R1: {}".format(placement[r1]))
print("R2: {}".format(placement[r2]))
print("R3: {}".format(placement[r3]))
print("R4: {}".format(placement[r4]))
print()
|
<commit_before><commit_msg>Add a 4 resistors example<commit_after>#!/usr/bin/env python3
# Copyright (c) 2015 Matthew Earl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
A simple example, placing 4 resistors in a loop on a 2x4 strip board.
Three of the resistors have a maximum length of 1, whereas the other has a
maximum length of 3. There should be 4 solutions in total.
"""
import component
import placer
r1 = component.LeadedComponent(3)
r2 = component.LeadedComponent(1)
r3 = component.LeadedComponent(1)
r4 = component.LeadedComponent(1)
board = component.StripBoard((2, 4))
nets = (
(r1.terminals[1], r2.terminals[0]),
(r2.terminals[1], r3.terminals[0]),
(r3.terminals[1], r4.terminals[0]),
(r4.terminals[1], r1.terminals[0]),
)
for placement in placer.place(board, (r1, r2, r3, r4), nets):
print("R1: {}".format(placement[r1]))
print("R2: {}".format(placement[r2]))
print("R3: {}".format(placement[r3]))
print("R4: {}".format(placement[r4]))
print()
|
|
25494622a88f172fb14abf10eb5936246d475066
|
other/wrapping-cpp/swig/cpointerproblem/test_examples.py
|
other/wrapping-cpp/swig/cpointerproblem/test_examples.py
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
call_make('all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
call_make('alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
call_make('clean')
|
Modify testing code to work if executed from above its own directory
|
Modify testing code to work if executed from above its own directory
|
Python
|
bsd-2-clause
|
ryanpepper/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
Modify testing code to work if executed from above its own directory
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
call_make('all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
call_make('alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
call_make('clean')
|
<commit_before>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
<commit_msg>Modify testing code to work if executed from above its own directory<commit_after>
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
call_make('all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
call_make('alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
call_make('clean')
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
Modify testing code to work if executed from above its own directory"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
call_make('all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
call_make('alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
call_make('clean')
|
<commit_before>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
<commit_msg>Modify testing code to work if executed from above its own directory<commit_after>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
call_make('all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
call_make('alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
call_make('clean')
|
10641e60bf3e99efdc919f122ee911c05da1c873
|
src/copyListWithRandomPointer.py
|
src/copyListWithRandomPointer.py
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
cur = head
while cur:
n_node = RandomListNode(cur.label)
n_node.next, cur.next, cur = cur.next, n_node, cur.next
cur = head
while cur:
if cur.random:
cur.next.random = cur.random.next
cur = cur.next.next
cur, n_node, res = head, head.next, head.next
while cur:
cur.next, cur = n_node.next, n_node.next
if cur and cur.next:
n_node.next, n_node = cur.next, cur.next
return res
|
Copy List with Random Pointer
|
Copy List with Random Pointer
|
Python
|
mit
|
zhyu/leetcode,zhyu/leetcode
|
Copy List with Random Pointer
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
cur = head
while cur:
n_node = RandomListNode(cur.label)
n_node.next, cur.next, cur = cur.next, n_node, cur.next
cur = head
while cur:
if cur.random:
cur.next.random = cur.random.next
cur = cur.next.next
cur, n_node, res = head, head.next, head.next
while cur:
cur.next, cur = n_node.next, n_node.next
if cur and cur.next:
n_node.next, n_node = cur.next, cur.next
return res
|
<commit_before><commit_msg>Copy List with Random Pointer<commit_after>
|
# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
cur = head
while cur:
n_node = RandomListNode(cur.label)
n_node.next, cur.next, cur = cur.next, n_node, cur.next
cur = head
while cur:
if cur.random:
cur.next.random = cur.random.next
cur = cur.next.next
cur, n_node, res = head, head.next, head.next
while cur:
cur.next, cur = n_node.next, n_node.next
if cur and cur.next:
n_node.next, n_node = cur.next, cur.next
return res
|
Copy List with Random Pointer# Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
cur = head
while cur:
n_node = RandomListNode(cur.label)
n_node.next, cur.next, cur = cur.next, n_node, cur.next
cur = head
while cur:
if cur.random:
cur.next.random = cur.random.next
cur = cur.next.next
cur, n_node, res = head, head.next, head.next
while cur:
cur.next, cur = n_node.next, n_node.next
if cur and cur.next:
n_node.next, n_node = cur.next, cur.next
return res
|
<commit_before><commit_msg>Copy List with Random Pointer<commit_after># Definition for singly-linked list with a random pointer.
# class RandomListNode:
# def __init__(self, x):
# self.label = x
# self.next = None
# self.random = None
class Solution:
# @param head, a RandomListNode
# @return a RandomListNode
def copyRandomList(self, head):
if head is None:
return None
cur = head
while cur:
n_node = RandomListNode(cur.label)
n_node.next, cur.next, cur = cur.next, n_node, cur.next
cur = head
while cur:
if cur.random:
cur.next.random = cur.random.next
cur = cur.next.next
cur, n_node, res = head, head.next, head.next
while cur:
cur.next, cur = n_node.next, n_node.next
if cur and cur.next:
n_node.next, n_node = cur.next, cur.next
return res
|
|
c59b10aa48640d445082b2951fc3bd80cae5816c
|
examples/feature_select.py
|
examples/feature_select.py
|
"""
An example showing feature selection.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn.datasets.iris import load
SP, SW, PL, PW, LABELS = load()
# Some noisy data not correlated
E1, E2 = np.random.normal(size=(2, len(SP)))
x = np.c_[SP, SW, PL, PW, E1, E2]
y = LABELS
################################################################################
pl.figure(1)
pl.clf()
################################################################################
# Univariate feature selection
from scikits.learn.feature_select import univ_selection
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
pl.plot(scores/scores.max(), label='Univariate score (p values)')
################################################################################
# Compare to the weights of an SVM
from scikits.learn.svm import SVM
svm = SVM(kernel_type='linear')
svm.fit(x, y)
svm_weights = (svm.support_**2).sum(axis=0)
pl.plot(svm_weights/svm_weights.max(), label='SVM weight')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.legend()
pl.show()
|
Add an example of feature selection.
|
DOC: Add an example of feature selection.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@458 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
Python
|
bsd-3-clause
|
ogrisel/scikit-learn,altairpearl/scikit-learn,beepee14/scikit-learn,ashhher3/scikit-learn,jm-begon/scikit-learn,hsiaoyi0504/scikit-learn,huobaowangxi/scikit-learn,jkarnows/scikit-learn,spallavolu/scikit-learn,zaxtax/scikit-learn,hugobowne/scikit-learn,nesterione/scikit-learn,Vimos/scikit-learn,adamgreenhall/scikit-learn,maheshakya/scikit-learn,nmayorov/scikit-learn,yyjiang/scikit-learn,waterponey/scikit-learn,alvarofierroclavero/scikit-learn,lesteve/scikit-learn,h2educ/scikit-learn,BiaDarkia/scikit-learn,mhue/scikit-learn,tomlof/scikit-learn,belltailjp/scikit-learn,imaculate/scikit-learn,cdegroc/scikit-learn,rexshihaoren/scikit-learn,appapantula/scikit-learn,elkingtonmcb/scikit-learn,murali-munna/scikit-learn,tmhm/scikit-learn,shenzebang/scikit-learn,sumspr/scikit-learn,NunoEdgarGub1/scikit-learn,CforED/Machine-Learning,btabibian/scikit-learn,manhhomienbienthuy/scikit-learn,abhishekkrthakur/scikit-learn,schets/scikit-learn,aewhatley/scikit-learn,ngoix/OCRF,jmetzen/scikit-learn,sinhrks/scikit-learn,amueller/scikit-learn,scikit-learn/scikit-learn,smartscheduling/scikit-learn-categorical-tree,liyu1990/sklearn,hugobowne/scikit-learn,q1ang/scikit-learn,joernhees/scikit-learn,andrewnc/scikit-learn,f3r/scikit-learn,0x0all/scikit-learn,yunfeilu/scikit-learn,RPGOne/scikit-learn,plissonf/scikit-learn,ahoyosid/scikit-learn,clemkoa/scikit-learn,OshynSong/scikit-learn,AlexRobson/scikit-learn,Achuth17/scikit-learn,sinhrks/scikit-learn,ChanderG/scikit-learn,treycausey/scikit-learn,fengzhyuan/scikit-learn,equialgo/scikit-learn,procoder317/scikit-learn,0asa/scikit-learn,ishanic/scikit-learn,bnaul/scikit-learn,liangz0707/scikit-learn,IshankGulati/scikit-learn,ahoyosid/scikit-learn,andaag/scikit-learn,Windy-Ground/scikit-learn,fabioticconi/scikit-learn,cainiaocome/scikit-learn,etkirsch/scikit-learn,shyamalschandra/scikit-learn,alexeyum/scikit-learn,pythonvietnam/scikit-learn,B3AU/waveTree,jereze/scikit-learn,rahul-c1/scikit-learn,MartinDelzant/scikit-learn,aflaxman/scikit-learn,PatrickChrist/scikit-learn,JPFrancoia/scikit-learn,fyffyt/scikit-learn,henrykironde/scikit-learn,thientu/scikit-learn,roxyboy/scikit-learn,LohithBlaze/scikit-learn,lbishal/scikit-learn,Fireblend/scikit-learn,kashif/scikit-learn,ivannz/scikit-learn,heli522/scikit-learn,Adai0808/scikit-learn,aminert/scikit-learn,f3r/scikit-learn,466152112/scikit-learn,Achuth17/scikit-learn,hainm/scikit-learn,AlexandreAbraham/scikit-learn,davidgbe/scikit-learn,giorgiop/scikit-learn,abhishekgahlot/scikit-learn,marcocaccin/scikit-learn,thientu/scikit-learn,yask123/scikit-learn,ilo10/scikit-learn,olologin/scikit-learn,lazywei/scikit-learn,jereze/scikit-learn,treycausey/scikit-learn,kashif/scikit-learn,hsiaoyi0504/scikit-learn,Myasuka/scikit-learn,MatthieuBizien/scikit-learn,3manuek/scikit-learn,xavierwu/scikit-learn,ilyes14/scikit-learn,Sentient07/scikit-learn,yanlend/scikit-learn,ishanic/scikit-learn,tosolveit/scikit-learn,cauchycui/scikit-learn,idlead/scikit-learn,akionakamura/scikit-learn,poryfly/scikit-learn,cybernet14/scikit-learn,gotomypc/scikit-learn,mjgrav2001/scikit-learn,lesteve/scikit-learn,moutai/scikit-learn,vigilv/scikit-learn,akionakamura/scikit-learn,madjelan/scikit-learn,shahankhatch/scikit-learn,treycausey/scikit-learn,mxjl620/scikit-learn,alvarofierroclavero/scikit-learn,henridwyer/scikit-learn,russel1237/scikit-learn,abhishekkrthakur/scikit-learn,heli522/scikit-learn,eg-zhang/scikit-learn,kevin-intel/scikit-learn,liberatorqjw/scikit-learn,xavierwu/scikit-learn,shenzebang/scikit-learn,sumspr/scikit-learn,liberatorqjw/scikit-learn,dhruv13J/scikit-learn,appapantula/scikit-learn,PatrickChrist/scikit-learn,joshloyal/scikit-learn,alexeyum/scikit-learn,devanshdalal/scikit-learn,costypetrisor/scikit-learn,ominux/scikit-learn,jakobworldpeace/scikit-learn,ashhher3/scikit-learn,aminert/scikit-learn,pompiduskus/scikit-learn,aetilley/scikit-learn,samuel1208/scikit-learn,jorge2703/scikit-learn,marcocaccin/scikit-learn,cl4rke/scikit-learn,clemkoa/scikit-learn,fyffyt/scikit-learn,RayMick/scikit-learn,IndraVikas/scikit-learn,CVML/scikit-learn,betatim/scikit-learn,tmhm/scikit-learn,mhdella/scikit-learn,kmike/scikit-learn,0x0all/scikit-learn,466152112/scikit-learn,nomadcube/scikit-learn,lucidfrontier45/scikit-learn,waterponey/scikit-learn,Obus/scikit-learn,mrshu/scikit-learn,RomainBrault/scikit-learn,anirudhjayaraman/scikit-learn,victorbergelin/scikit-learn,rahuldhote/scikit-learn,lenovor/scikit-learn,mattilyra/scikit-learn,dsquareindia/scikit-learn,mhue/scikit-learn,arahuja/scikit-learn,jkarnows/scikit-learn,nomadcube/scikit-learn,ngoix/OCRF,glennq/scikit-learn,lbishal/scikit-learn,mugizico/scikit-learn,billy-inn/scikit-learn,jaidevd/scikit-learn,Lawrence-Liu/scikit-learn,OshynSong/scikit-learn,arahuja/scikit-learn,fredhusser/scikit-learn,victorbergelin/scikit-learn,jm-begon/scikit-learn,sergeyf/scikit-learn,massmutual/scikit-learn,depet/scikit-learn,larsmans/scikit-learn,AlexRobson/scikit-learn,equialgo/scikit-learn,Obus/scikit-learn,bigdataelephants/scikit-learn,joshloyal/scikit-learn,Clyde-fare/scikit-learn,rexshihaoren/scikit-learn,abimannans/scikit-learn,Barmaley-exe/scikit-learn,gclenaghan/scikit-learn,ChanChiChoi/scikit-learn,zaxtax/scikit-learn,eickenberg/scikit-learn,robin-lai/scikit-learn,rsivapr/scikit-learn,voxlol/scikit-learn,jjx02230808/project0223,hlin117/scikit-learn,mattilyra/scikit-learn,B3AU/waveTree,NelisVerhoef/scikit-learn,pv/scikit-learn,andrewnc/scikit-learn,cybernet14/scikit-learn,ElDeveloper/scikit-learn,shikhardb/scikit-learn,jm-begon/scikit-learn,lbishal/scikit-learn,tawsifkhan/scikit-learn,pv/scikit-learn,saiwing-yeung/scikit-learn,schets/scikit-learn,pv/scikit-learn,murali-munna/scikit-learn,lazywei/scikit-learn,loli/sklearn-ensembletrees,trungnt13/scikit-learn,kaichogami/scikit-learn,sarahgrogan/scikit-learn,rahul-c1/scikit-learn,zorojean/scikit-learn,zhenv5/scikit-learn,pypot/scikit-learn,davidgbe/scikit-learn,loli/sklearn-ensembletrees,HolgerPeters/scikit-learn,procoder317/scikit-learn,jzt5132/scikit-learn,fredhusser/scikit-learn,RachitKansal/scikit-learn,RachitKansal/scikit-learn,vermouthmjl/scikit-learn,victorbergelin/scikit-learn,scikit-learn/scikit-learn,rahul-c1/scikit-learn,zorroblue/scikit-learn,Vimos/scikit-learn,nmayorov/scikit-learn,anirudhjayaraman/scikit-learn,jlegendary/scikit-learn,shangwuhencc/scikit-learn,loli/sklearn-ensembletrees,arabenjamin/scikit-learn,nhejazi/scikit-learn,jblackburne/scikit-learn,ltiao/scikit-learn,russel1237/scikit-learn,JsNoNo/scikit-learn,shusenl/scikit-learn,shahankhatch/scikit-learn,evgchz/scikit-learn,fabianp/scikit-learn,nmayorov/scikit-learn,massmutual/scikit-learn,sgenoud/scikit-learn,AnasGhrab/scikit-learn,evgchz/scikit-learn,rrohan/scikit-learn,BiaDarkia/scikit-learn,hrjn/scikit-learn,manashmndl/scikit-learn,ZenDevelopmentSystems/scikit-learn,Windy-Ground/scikit-learn,NelisVerhoef/scikit-learn,IndraVikas/scikit-learn,Garrett-R/scikit-learn,zihua/scikit-learn,vigilv/scikit-learn,lazywei/scikit-learn,manashmndl/scikit-learn,Sentient07/scikit-learn,Akshay0724/scikit-learn,ivannz/scikit-learn,depet/scikit-learn,untom/scikit-learn,yunfeilu/scikit-learn,bthirion/scikit-learn,MatthieuBizien/scikit-learn,shenzebang/scikit-learn,nrhine1/scikit-learn,ngoix/OCRF,jjx02230808/project0223,xubenben/scikit-learn,tomlof/scikit-learn,dingocuster/scikit-learn,untom/scikit-learn,moutai/scikit-learn,quheng/scikit-learn,JosmanPS/scikit-learn,gclenaghan/scikit-learn,huobaowangxi/scikit-learn,xyguo/scikit-learn,huzq/scikit-learn,treycausey/scikit-learn,bhargav/scikit-learn,voxlol/scikit-learn,yanlend/scikit-learn,Aasmi/scikit-learn,mayblue9/scikit-learn,etkirsch/scikit-learn,fabianp/scikit-learn,glemaitre/scikit-learn,JPFrancoia/scikit-learn,terkkila/scikit-learn,zuku1985/scikit-learn,AIML/scikit-learn,gclenaghan/scikit-learn,loli/semisupervisedforests,kjung/scikit-learn,raghavrv/scikit-learn,toastedcornflakes/scikit-learn,ilyes14/scikit-learn,icdishb/scikit-learn,chrisburr/scikit-learn,ycaihua/scikit-learn,espg/scikit-learn,yyjiang/scikit-learn,rahuldhote/scikit-learn,xiaoxiamii/scikit-learn,ChanChiChoi/scikit-learn,Lawrence-Liu/scikit-learn,cwu2011/scikit-learn,zorroblue/scikit-learn,robin-lai/scikit-learn,jorik041/scikit-learn,wlamond/scikit-learn,alexsavio/scikit-learn,appapantula/scikit-learn,samuel1208/scikit-learn,vermouthmjl/scikit-learn,andaag/scikit-learn,spallavolu/scikit-learn,maheshakya/scikit-learn,vortex-ape/scikit-learn,lesteve/scikit-learn,michigraber/scikit-learn,dingocuster/scikit-learn,huzq/scikit-learn,ilo10/scikit-learn,procoder317/scikit-learn,LiaoPan/scikit-learn,alvarofierroclavero/scikit-learn,eickenberg/scikit-learn,nrhine1/scikit-learn,eickenberg/scikit-learn,frank-tancf/scikit-learn,ilyes14/scikit-learn,jmschrei/scikit-learn,AnasGhrab/scikit-learn,ZENGXH/scikit-learn,khkaminska/scikit-learn,rahuldhote/scikit-learn,mehdidc/scikit-learn,lucidfrontier45/scikit-learn,nikitasingh981/scikit-learn,ClimbsRocks/scikit-learn,jakirkham/scikit-learn,hitszxp/scikit-learn,sgenoud/scikit-learn,samzhang111/scikit-learn,manashmndl/scikit-learn,hrjn/scikit-learn,wanggang3333/scikit-learn,xyguo/scikit-learn,NelisVerhoef/scikit-learn,zihua/scikit-learn,bhargav/scikit-learn,hsiaoyi0504/scikit-learn,gclenaghan/scikit-learn,hsiaoyi0504/scikit-learn,IssamLaradji/scikit-learn,fzalkow/scikit-learn,nvoron23/scikit-learn,luo66/scikit-learn,krez13/scikit-learn,rsivapr/scikit-learn,0asa/scikit-learn,ephes/scikit-learn,shahankhatch/scikit-learn,ZENGXH/scikit-learn,nhejazi/scikit-learn,CforED/Machine-Learning,wazeerzulfikar/scikit-learn,betatim/scikit-learn,macks22/scikit-learn,mattgiguere/scikit-learn,0x0all/scikit-learn,kagayakidan/scikit-learn,btabibian/scikit-learn,harshaneelhg/scikit-learn,theoryno3/scikit-learn,anntzer/scikit-learn,nhejazi/scikit-learn,ndingwall/scikit-learn,fyffyt/scikit-learn,UNR-AERIAL/scikit-learn,aminert/scikit-learn,MohammedWasim/scikit-learn,LiaoPan/scikit-learn,ahoyosid/scikit-learn,fabianp/scikit-learn,florian-f/sklearn,BiaDarkia/scikit-learn,ElDeveloper/scikit-learn,imaculate/scikit-learn,cainiaocome/scikit-learn,elkingtonmcb/scikit-learn,sergeyf/scikit-learn,petosegan/scikit-learn,iismd17/scikit-learn,tmhm/scikit-learn,yonglehou/scikit-learn,lin-credible/scikit-learn,hainm/scikit-learn,B3AU/waveTree,themrmax/scikit-learn,herilalaina/scikit-learn,mxjl620/scikit-learn,Adai0808/scikit-learn,CVML/scikit-learn,mhdella/scikit-learn,OshynSong/scikit-learn,AlexanderFabisch/scikit-learn,NunoEdgarGub1/scikit-learn,NunoEdgarGub1/scikit-learn,eickenberg/scikit-learn,AlexandreAbraham/scikit-learn,florian-f/sklearn,IndraVikas/scikit-learn,hdmetor/scikit-learn,ilyes14/scikit-learn,cauchycui/scikit-learn,liyu1990/sklearn,jlegendary/scikit-learn,zorojean/scikit-learn,anirudhjayaraman/scikit-learn,Vimos/scikit-learn,robbymeals/scikit-learn,olologin/scikit-learn,vinayak-mehta/scikit-learn,zuku1985/scikit-learn,fabianp/scikit-learn,LohithBlaze/scikit-learn,vortex-ape/scikit-learn,shusenl/scikit-learn,RPGOne/scikit-learn,nvoron23/scikit-learn,zihua/scikit-learn,adamgreenhall/scikit-learn,altairpearl/scikit-learn,jayflo/scikit-learn,xuewei4d/scikit-learn,equialgo/scikit-learn,IssamLaradji/scikit-learn,fredhusser/scikit-learn,jblackburne/scikit-learn,joernhees/scikit-learn,xzh86/scikit-learn,quheng/scikit-learn,tawsifkhan/scikit-learn,RachitKansal/scikit-learn,kylerbrown/scikit-learn,MartinSavc/scikit-learn,xavierwu/scikit-learn,harshaneelhg/scikit-learn,mattilyra/scikit-learn,jseabold/scikit-learn,dsullivan7/scikit-learn,BiaDarkia/scikit-learn,shangwuhencc/scikit-learn,Vimos/scikit-learn,ltiao/scikit-learn,justincassidy/scikit-learn,rexshihaoren/scikit-learn,aetilley/scikit-learn,Clyde-fare/scikit-learn,chrisburr/scikit-learn,qifeigit/scikit-learn,maheshakya/scikit-learn,pnedunuri/scikit-learn,rishikksh20/scikit-learn,AlexandreAbraham/scikit-learn,liyu1990/sklearn,liberatorqjw/scikit-learn,joshloyal/scikit-learn,vybstat/scikit-learn,wzbozon/scikit-learn,idlead/scikit-learn,sonnyhu/scikit-learn,belltailjp/scikit-learn,dingocuster/scikit-learn,ningchi/scikit-learn,mfjb/scikit-learn,vermouthmjl/scikit-learn,mattgiguere/scikit-learn,ogrisel/scikit-learn,Srisai85/scikit-learn,vshtanko/scikit-learn,DonBeo/scikit-learn,pianomania/scikit-learn,loli/semisupervisedforests,anurag313/scikit-learn,zuku1985/scikit-learn,ashhher3/scikit-learn,fabioticconi/scikit-learn,glemaitre/scikit-learn,voxlol/scikit-learn,davidgbe/scikit-learn,wanggang3333/scikit-learn,schets/scikit-learn,djgagne/scikit-learn,devanshdalal/scikit-learn,ogrisel/scikit-learn,bnaul/scikit-learn,mattgiguere/scikit-learn,Titan-C/scikit-learn,MartinSavc/scikit-learn,mhue/scikit-learn,eickenberg/scikit-learn,vshtanko/scikit-learn,rsivapr/scikit-learn,mikebenfield/scikit-learn,PatrickChrist/scikit-learn,jmetzen/scikit-learn,cybernet14/scikit-learn,jakobworldpeace/scikit-learn,AlexandreAbraham/scikit-learn,hsuantien/scikit-learn,tdhopper/scikit-learn,mjgrav2001/scikit-learn,vibhorag/scikit-learn,glouppe/scikit-learn,stylianos-kampakis/scikit-learn,alexsavio/scikit-learn,Fireblend/scikit-learn,bigdataelephants/scikit-learn,smartscheduling/scikit-learn-categorical-tree,cl4rke/scikit-learn,fabioticconi/scikit-learn,PatrickOReilly/scikit-learn,zorroblue/scikit-learn,yask123/scikit-learn,jmschrei/scikit-learn,djgagne/scikit-learn,russel1237/scikit-learn,Aasmi/scikit-learn,lenovor/scikit-learn,iismd17/scikit-learn,espg/scikit-learn,vivekmishra1991/scikit-learn,carrillo/scikit-learn,scikit-learn/scikit-learn,bikong2/scikit-learn,abhishekgahlot/scikit-learn,AlexanderFabisch/scikit-learn,thientu/scikit-learn,ivannz/scikit-learn,gotomypc/scikit-learn,pratapvardhan/scikit-learn,depet/scikit-learn,HolgerPeters/scikit-learn,schets/scikit-learn,tosolveit/scikit-learn,wzbozon/scikit-learn,kjung/scikit-learn,arabenjamin/scikit-learn,fzalkow/scikit-learn,0x0all/scikit-learn,espg/scikit-learn,lin-credible/scikit-learn,krez13/scikit-learn,fyffyt/scikit-learn,toastedcornflakes/scikit-learn,mikebenfield/scikit-learn,ngoix/OCRF,dhruv13J/scikit-learn,toastedcornflakes/scikit-learn,pkruskal/scikit-learn,DonBeo/scikit-learn,f3r/scikit-learn,mlyundin/scikit-learn,xubenben/scikit-learn,siutanwong/scikit-learn,tomlof/scikit-learn,samzhang111/scikit-learn,mxjl620/scikit-learn,jjx02230808/project0223,Adai0808/scikit-learn,costypetrisor/scikit-learn,appapantula/scikit-learn,simon-pepin/scikit-learn,JPFrancoia/scikit-learn,ilo10/scikit-learn,mfjb/scikit-learn,LohithBlaze/scikit-learn,thilbern/scikit-learn,ldirer/scikit-learn,mikebenfield/scikit-learn,YinongLong/scikit-learn,wanggang3333/scikit-learn,alexeyum/scikit-learn,rajat1994/scikit-learn,spallavolu/scikit-learn,bigdataelephants/scikit-learn,vigilv/scikit-learn,Srisai85/scikit-learn,giorgiop/scikit-learn,eg-zhang/scikit-learn,tawsifkhan/scikit-learn,ashhher3/scikit-learn,hainm/scikit-learn,liangz0707/scikit-learn,Sentient07/scikit-learn,abhishekgahlot/scikit-learn,ilo10/scikit-learn,pompiduskus/scikit-learn,Obus/scikit-learn,RayMick/scikit-learn,andrewnc/scikit-learn,ankurankan/scikit-learn,r-mart/scikit-learn,murali-munna/scikit-learn,trungnt13/scikit-learn,macks22/scikit-learn,beepee14/scikit-learn,chrisburr/scikit-learn,bikong2/scikit-learn,wazeerzulfikar/scikit-learn,fengzhyuan/scikit-learn,mattgiguere/scikit-learn,tawsifkhan/scikit-learn,IssamLaradji/scikit-learn,krez13/scikit-learn,nesterione/scikit-learn,gotomypc/scikit-learn,sumspr/scikit-learn,shusenl/scikit-learn,pythonvietnam/scikit-learn,0x0all/scikit-learn,kaichogami/scikit-learn,vermouthmjl/scikit-learn,shyamalschandra/scikit-learn,trankmichael/scikit-learn,q1ang/scikit-learn,nikitasingh981/scikit-learn,JeanKossaifi/scikit-learn,glennq/scikit-learn,robbymeals/scikit-learn,AlexanderFabisch/scikit-learn,mfjb/scikit-learn,mwv/scikit-learn,0asa/scikit-learn,DSLituiev/scikit-learn,mugizico/scikit-learn,pratapvardhan/scikit-learn,sanketloke/scikit-learn,anurag313/scikit-learn,vybstat/scikit-learn,arjoly/scikit-learn,simon-pepin/scikit-learn,ephes/scikit-learn,zorojean/scikit-learn,manhhomienbienthuy/scikit-learn,hugobowne/scikit-learn,walterreade/scikit-learn,vinayak-mehta/scikit-learn,h2educ/scikit-learn,elkingtonmcb/scikit-learn,bthirion/scikit-learn,sergeyf/scikit-learn,Barmaley-exe/scikit-learn,altairpearl/scikit-learn,anirudhjayaraman/scikit-learn,0asa/scikit-learn,ky822/scikit-learn,anntzer/scikit-learn,ishanic/scikit-learn,walterreade/scikit-learn,lenovor/scikit-learn,hdmetor/scikit-learn,olologin/scikit-learn,JsNoNo/scikit-learn,heli522/scikit-learn,hitszxp/scikit-learn,rohanp/scikit-learn,jzt5132/scikit-learn,justincassidy/scikit-learn,mhdella/scikit-learn,PrashntS/scikit-learn,eg-zhang/scikit-learn,NelisVerhoef/scikit-learn,ndingwall/scikit-learn,chrsrds/scikit-learn,wazeerzulfikar/scikit-learn,Djabbz/scikit-learn,krez13/scikit-learn,ElDeveloper/scikit-learn,jmschrei/scikit-learn,samuel1208/scikit-learn,florian-f/sklearn,ndingwall/scikit-learn,fzalkow/scikit-learn,tosolveit/scikit-learn,poryfly/scikit-learn,yonglehou/scikit-learn,theoryno3/scikit-learn,ycaihua/scikit-learn,ldirer/scikit-learn,luo66/scikit-learn,LohithBlaze/scikit-learn,kevin-intel/scikit-learn,nelson-liu/scikit-learn,xavierwu/scikit-learn,Clyde-fare/scikit-learn,florian-f/sklearn,vibhorag/scikit-learn,AlexRobson/scikit-learn,ankurankan/scikit-learn,glemaitre/scikit-learn,kylerbrown/scikit-learn,florian-f/sklearn,mojoboss/scikit-learn,RPGOne/scikit-learn,AIML/scikit-learn,PatrickOReilly/scikit-learn,hdmetor/scikit-learn,wazeerzulfikar/scikit-learn,ankurankan/scikit-learn,pnedunuri/scikit-learn,sarahgrogan/scikit-learn,jorge2703/scikit-learn,zaxtax/scikit-learn,jakirkham/scikit-learn,joernhees/scikit-learn,MartinDelzant/scikit-learn,Jimmy-Morzaria/scikit-learn,betatim/scikit-learn,r-mart/scikit-learn,Barmaley-exe/scikit-learn,jayflo/scikit-learn,kevin-intel/scikit-learn,AIML/scikit-learn,aflaxman/scikit-learn,zhenv5/scikit-learn,trankmichael/scikit-learn,mjudsp/Tsallis,vivekmishra1991/scikit-learn,kmike/scikit-learn,xwolf12/scikit-learn,terkkila/scikit-learn,xuewei4d/scikit-learn,pythonvietnam/scikit-learn,aewhatley/scikit-learn,pkruskal/scikit-learn,thilbern/scikit-learn,xyguo/scikit-learn,nelson-liu/scikit-learn,rexshihaoren/scikit-learn,zhenv5/scikit-learn,TomDLT/scikit-learn,glouppe/scikit-learn,PatrickChrist/scikit-learn,ephes/scikit-learn,jpautom/scikit-learn,siutanwong/scikit-learn,cauchycui/scikit-learn,jkarnows/scikit-learn,potash/scikit-learn,Nyker510/scikit-learn,jereze/scikit-learn,xiaoxiamii/scikit-learn,ssaeger/scikit-learn,liangz0707/scikit-learn,larsmans/scikit-learn,Akshay0724/scikit-learn,dsullivan7/scikit-learn,yyjiang/scikit-learn,UNR-AERIAL/scikit-learn,mehdidc/scikit-learn,aabadie/scikit-learn,andrewnc/scikit-learn,0asa/scikit-learn,pompiduskus/scikit-learn,ky822/scikit-learn,roxyboy/scikit-learn,btabibian/scikit-learn,mlyundin/scikit-learn,MohammedWasim/scikit-learn,amueller/scikit-learn,xwolf12/scikit-learn,zuku1985/scikit-learn,qifeigit/scikit-learn,cwu2011/scikit-learn,evgchz/scikit-learn,stylianos-kampakis/scikit-learn,depet/scikit-learn,pnedunuri/scikit-learn,imaculate/scikit-learn,procoder317/scikit-learn,ningchi/scikit-learn,jayflo/scikit-learn,ZENGXH/scikit-learn,aabadie/scikit-learn,abhishekgahlot/scikit-learn,JosmanPS/scikit-learn,giorgiop/scikit-learn,belltailjp/scikit-learn,jzt5132/scikit-learn,roxyboy/scikit-learn,robbymeals/scikit-learn,costypetrisor/scikit-learn,meduz/scikit-learn,maheshakya/scikit-learn,frank-tancf/scikit-learn,robin-lai/scikit-learn,hsuantien/scikit-learn,raghavrv/scikit-learn,toastedcornflakes/scikit-learn,alexsavio/scikit-learn,Achuth17/scikit-learn,Fireblend/scikit-learn,mhdella/scikit-learn,wanggang3333/scikit-learn,theoryno3/scikit-learn,raghavrv/scikit-learn,CforED/Machine-Learning,arahuja/scikit-learn,RomainBrault/scikit-learn,AlexanderFabisch/scikit-learn,jpautom/scikit-learn,jakirkham/scikit-learn,LiaoPan/scikit-learn,espg/scikit-learn,amueller/scikit-learn,madjelan/scikit-learn,rishikksh20/scikit-learn,michigraber/scikit-learn,aflaxman/scikit-learn,IshankGulati/scikit-learn,kaichogami/scikit-learn,sinhrks/scikit-learn,vinayak-mehta/scikit-learn,shikhardb/scikit-learn,petosegan/scikit-learn,costypetrisor/scikit-learn,idlead/scikit-learn,jakobworldpeace/scikit-learn,dhruv13J/scikit-learn,ZenDevelopmentSystems/scikit-learn,rohanp/scikit-learn,mjgrav2001/scikit-learn,potash/scikit-learn,pianomania/scikit-learn,ElDeveloper/scikit-learn,meduz/scikit-learn,mwv/scikit-learn,henrykironde/scikit-learn,depet/scikit-learn,chrsrds/scikit-learn,mblondel/scikit-learn,IshankGulati/scikit-learn,justincassidy/scikit-learn,ChanderG/scikit-learn,MartinSavc/scikit-learn,mjudsp/Tsallis,cauchycui/scikit-learn,mrshu/scikit-learn,JosmanPS/scikit-learn,pnedunuri/scikit-learn,hlin117/scikit-learn,andaag/scikit-learn,mjudsp/Tsallis,themrmax/scikit-learn,devanshdalal/scikit-learn,bnaul/scikit-learn,phdowling/scikit-learn,adamgreenhall/scikit-learn,vybstat/scikit-learn,chrisburr/scikit-learn,yunfeilu/scikit-learn,pratapvardhan/scikit-learn,herilalaina/scikit-learn,ogrisel/scikit-learn,cainiaocome/scikit-learn,theoryno3/scikit-learn,ClimbsRocks/scikit-learn,vibhorag/scikit-learn,alvarofierroclavero/scikit-learn,Akshay0724/scikit-learn,shikhardb/scikit-learn,xzh86/scikit-learn,glouppe/scikit-learn,3manuek/scikit-learn,fengzhyuan/scikit-learn,mjudsp/Tsallis,khkaminska/scikit-learn,bthirion/scikit-learn,mojoboss/scikit-learn,MechCoder/scikit-learn,ndingwall/scikit-learn,jaidevd/scikit-learn,RomainBrault/scikit-learn,mayblue9/scikit-learn,shusenl/scikit-learn,henrykironde/scikit-learn,arjoly/scikit-learn,Srisai85/scikit-learn,quheng/scikit-learn,larsmans/scikit-learn,djgagne/scikit-learn,giorgiop/scikit-learn,jlegendary/scikit-learn,ChanChiChoi/scikit-learn,nmayorov/scikit-learn,moutai/scikit-learn,YinongLong/scikit-learn,Windy-Ground/scikit-learn,shangwuhencc/scikit-learn,nelson-liu/scikit-learn,vinayak-mehta/scikit-learn,rahul-c1/scikit-learn,scikit-learn/scikit-learn,glennq/scikit-learn,plissonf/scikit-learn,lenovor/scikit-learn,vivekmishra1991/scikit-learn,RomainBrault/scikit-learn,hitszxp/scikit-learn,dsquareindia/scikit-learn,JsNoNo/scikit-learn,AnasGhrab/scikit-learn,tdhopper/scikit-learn,gotomypc/scikit-learn,vigilv/scikit-learn,meduz/scikit-learn,harshaneelhg/scikit-learn,untom/scikit-learn,kjung/scikit-learn,clemkoa/scikit-learn,ahoyosid/scikit-learn,lucidfrontier45/scikit-learn,NunoEdgarGub1/scikit-learn,aflaxman/scikit-learn,carrillo/scikit-learn,deepesch/scikit-learn,ishanic/scikit-learn,sanketloke/scikit-learn,dhruv13J/scikit-learn,lazywei/scikit-learn,lucidfrontier45/scikit-learn,chrsrds/scikit-learn,r-mart/scikit-learn,Aasmi/scikit-learn,jm-begon/scikit-learn,huobaowangxi/scikit-learn,MartinSavc/scikit-learn,frank-tancf/scikit-learn,vivekmishra1991/scikit-learn,kashif/scikit-learn,ningchi/scikit-learn,walterreade/scikit-learn,tomlof/scikit-learn,zaxtax/scikit-learn,evgchz/scikit-learn,marcocaccin/scikit-learn,RayMick/scikit-learn,mjgrav2001/scikit-learn,cdegroc/scikit-learn,ominux/scikit-learn,AnasGhrab/scikit-learn,fbagirov/scikit-learn,mwv/scikit-learn,huzq/scikit-learn,kjung/scikit-learn,xubenben/scikit-learn,jorge2703/scikit-learn,icdishb/scikit-learn,Djabbz/scikit-learn,djgagne/scikit-learn,466152112/scikit-learn,fzalkow/scikit-learn,CVML/scikit-learn,jakobworldpeace/scikit-learn,betatim/scikit-learn,jjx02230808/project0223,jaidevd/scikit-learn,massmutual/scikit-learn,vortex-ape/scikit-learn,samzhang111/scikit-learn,ankurankan/scikit-learn,OshynSong/scikit-learn,yask123/scikit-learn,qifeigit/scikit-learn,equialgo/scikit-learn,JeanKossaifi/scikit-learn,cainiaocome/scikit-learn,mugizico/scikit-learn,nesterione/scikit-learn,beepee14/scikit-learn,simon-pepin/scikit-learn,yonglehou/scikit-learn,trankmichael/scikit-learn,mugizico/scikit-learn,MartinDelzant/scikit-learn,akionakamura/scikit-learn,vybstat/scikit-learn,amueller/scikit-learn,zihua/scikit-learn,IssamLaradji/scikit-learn,ssaeger/scikit-learn,nvoron23/scikit-learn,MechCoder/scikit-learn,mrshu/scikit-learn,joernhees/scikit-learn,yunfeilu/scikit-learn,altairpearl/scikit-learn,anntzer/scikit-learn,ldirer/scikit-learn,aabadie/scikit-learn,shenzebang/scikit-learn,wzbozon/scikit-learn,arabenjamin/scikit-learn,cwu2011/scikit-learn,iismd17/scikit-learn,ldirer/scikit-learn,sgenoud/scikit-learn,mayblue9/scikit-learn,loli/sklearn-ensembletrees,idlead/scikit-learn,Nyker510/scikit-learn,hlin117/scikit-learn,poryfly/scikit-learn,rrohan/scikit-learn,Myasuka/scikit-learn,kevin-intel/scikit-learn,ZenDevelopmentSystems/scikit-learn,loli/semisupervisedforests,Nyker510/scikit-learn,cl4rke/scikit-learn,henridwyer/scikit-learn,h2educ/scikit-learn,r-mart/scikit-learn,YinongLong/scikit-learn,waterponey/scikit-learn,xwolf12/scikit-learn,Nyker510/scikit-learn,iismd17/scikit-learn,ycaihua/scikit-learn,zorojean/scikit-learn,Jimmy-Morzaria/scikit-learn,macks22/scikit-learn,sanketloke/scikit-learn,ltiao/scikit-learn,herilalaina/scikit-learn,bhargav/scikit-learn,abhishekkrthakur/scikit-learn,murali-munna/scikit-learn,pythonvietnam/scikit-learn,poryfly/scikit-learn,IshankGulati/scikit-learn,tosolveit/scikit-learn,quheng/scikit-learn,lin-credible/scikit-learn,PatrickOReilly/scikit-learn,russel1237/scikit-learn,arjoly/scikit-learn,vshtanko/scikit-learn,rrohan/scikit-learn,kmike/scikit-learn,mikebenfield/scikit-learn,Garrett-R/scikit-learn,jlegendary/scikit-learn,jorik041/scikit-learn,clemkoa/scikit-learn,abimannans/scikit-learn,deepesch/scikit-learn,jblackburne/scikit-learn,mjudsp/Tsallis,UNR-AERIAL/scikit-learn,jzt5132/scikit-learn,khkaminska/scikit-learn,RayMick/scikit-learn,pv/scikit-learn,moutai/scikit-learn,rahuldhote/scikit-learn,anntzer/scikit-learn,pianomania/scikit-learn,cdegroc/scikit-learn,ky822/scikit-learn,aewhatley/scikit-learn,sarahgrogan/scikit-learn,CforED/Machine-Learning,mehdidc/scikit-learn,Djabbz/scikit-learn,tdhopper/scikit-learn,nesterione/scikit-learn,rishikksh20/scikit-learn,q1ang/scikit-learn,themrmax/scikit-learn,herilalaina/scikit-learn,Jimmy-Morzaria/scikit-learn,mrshu/scikit-learn,hsuantien/scikit-learn,untom/scikit-learn,kylerbrown/scikit-learn,anurag313/scikit-learn,aminert/scikit-learn,fengzhyuan/scikit-learn,466152112/scikit-learn,dingocuster/scikit-learn,qifeigit/scikit-learn,AlexRobson/scikit-learn,rohanp/scikit-learn,xyguo/scikit-learn,ivannz/scikit-learn,glouppe/scikit-learn,mrshu/scikit-learn,ngoix/OCRF,Garrett-R/scikit-learn,3manuek/scikit-learn,andaag/scikit-learn,JosmanPS/scikit-learn,Myasuka/scikit-learn,sonnyhu/scikit-learn,xzh86/scikit-learn,siutanwong/scikit-learn,alexeyum/scikit-learn,mblondel/scikit-learn,TomDLT/scikit-learn,manashmndl/scikit-learn,arahuja/scikit-learn,Akshay0724/scikit-learn,ominux/scikit-learn,ChanderG/scikit-learn,mattilyra/scikit-learn,Obus/scikit-learn,nhejazi/scikit-learn,plissonf/scikit-learn,kashif/scikit-learn,mxjl620/scikit-learn,AIML/scikit-learn,ltiao/scikit-learn,HolgerPeters/scikit-learn,mehdidc/scikit-learn,potash/scikit-learn,jorge2703/scikit-learn,ominux/scikit-learn,huobaowangxi/scikit-learn,nvoron23/scikit-learn,davidgbe/scikit-learn,loli/semisupervisedforests,aabadie/scikit-learn,dsquareindia/scikit-learn,potash/scikit-learn,treycausey/scikit-learn,wzbozon/scikit-learn,rajat1994/scikit-learn,MatthieuBizien/scikit-learn,hitszxp/scikit-learn,aetilley/scikit-learn,YinongLong/scikit-learn,trankmichael/scikit-learn,q1ang/scikit-learn,etkirsch/scikit-learn,khkaminska/scikit-learn,beepee14/scikit-learn,bikong2/scikit-learn,nelson-liu/scikit-learn,bigdataelephants/scikit-learn,glemaitre/scikit-learn,MohammedWasim/scikit-learn,hdmetor/scikit-learn,Barmaley-exe/scikit-learn,rrohan/scikit-learn,yonglehou/scikit-learn,rvraghav93/scikit-learn,yask123/scikit-learn,kagayakidan/scikit-learn,ZENGXH/scikit-learn,kaichogami/scikit-learn,petosegan/scikit-learn,Titan-C/scikit-learn,imaculate/scikit-learn,shahankhatch/scikit-learn,dsullivan7/scikit-learn,Adai0808/scikit-learn,fbagirov/scikit-learn,Sentient07/scikit-learn,larsmans/scikit-learn,devanshdalal/scikit-learn,nomadcube/scikit-learn,robbymeals/scikit-learn,billy-inn/scikit-learn,hlin117/scikit-learn,rajat1994/scikit-learn,sumspr/scikit-learn,cdegroc/scikit-learn,cwu2011/scikit-learn,glennq/scikit-learn,fredhusser/scikit-learn,jmetzen/scikit-learn,ZenDevelopmentSystems/scikit-learn,ClimbsRocks/scikit-learn,wlamond/scikit-learn,madjelan/scikit-learn,jkarnows/scikit-learn,ssaeger/scikit-learn,vortex-ape/scikit-learn,jaidevd/scikit-learn,henrykironde/scikit-learn,manhhomienbienthuy/scikit-learn,B3AU/waveTree,ankurankan/scikit-learn,kylerbrown/scikit-learn,jmetzen/scikit-learn,h2educ/scikit-learn,PrashntS/scikit-learn,PatrickOReilly/scikit-learn,roxyboy/scikit-learn,jakirkham/scikit-learn,abhishekgahlot/scikit-learn,deepesch/scikit-learn,plissonf/scikit-learn,jorik041/scikit-learn,liyu1990/sklearn,thilbern/scikit-learn,mlyundin/scikit-learn,jorik041/scikit-learn,DSLituiev/scikit-learn,manhhomienbienthuy/scikit-learn,kmike/scikit-learn,LiaoPan/scikit-learn,pypot/scikit-learn,jpautom/scikit-learn,michigraber/scikit-learn,DonBeo/scikit-learn,rvraghav93/scikit-learn,themrmax/scikit-learn,jmschrei/scikit-learn,TomDLT/scikit-learn,jseabold/scikit-learn,evgchz/scikit-learn,ningchi/scikit-learn,liangz0707/scikit-learn,jayflo/scikit-learn,thilbern/scikit-learn,mfjb/scikit-learn,justincassidy/scikit-learn,RachitKansal/scikit-learn,MatthieuBizien/scikit-learn,olologin/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jseabold/scikit-learn,shyamalschandra/scikit-learn,xwolf12/scikit-learn,mblondel/scikit-learn,xubenben/scikit-learn,Jimmy-Morzaria/scikit-learn,Srisai85/scikit-learn,nikitasingh981/scikit-learn,stylianos-kampakis/scikit-learn,Windy-Ground/scikit-learn,sergeyf/scikit-learn,arjoly/scikit-learn,3manuek/scikit-learn,saiwing-yeung/scikit-learn,chrsrds/scikit-learn,yyjiang/scikit-learn,ssaeger/scikit-learn,mhue/scikit-learn,nrhine1/scikit-learn,fbagirov/scikit-learn,ChanderG/scikit-learn,sgenoud/scikit-learn,Myasuka/scikit-learn,icdishb/scikit-learn,fabioticconi/scikit-learn,PrashntS/scikit-learn,Lawrence-Liu/scikit-learn,kmike/scikit-learn,eg-zhang/scikit-learn,rishikksh20/scikit-learn,hsuantien/scikit-learn,hitszxp/scikit-learn,bikong2/scikit-learn,tmhm/scikit-learn,TomDLT/scikit-learn,belltailjp/scikit-learn,smartscheduling/scikit-learn-categorical-tree,shyamalschandra/scikit-learn,mayblue9/scikit-learn,ycaihua/scikit-learn,liberatorqjw/scikit-learn,PrashntS/scikit-learn,lbishal/scikit-learn,kagayakidan/scikit-learn,JeanKossaifi/scikit-learn,heli522/scikit-learn,abimannans/scikit-learn,rohanp/scikit-learn,carrillo/scikit-learn,luo66/scikit-learn,walterreade/scikit-learn,hrjn/scikit-learn,bhargav/scikit-learn,hrjn/scikit-learn,dsullivan7/scikit-learn,IndraVikas/scikit-learn,ephes/scikit-learn,thientu/scikit-learn,mattilyra/scikit-learn,larsmans/scikit-learn,rvraghav93/scikit-learn,pypot/scikit-learn,wlamond/scikit-learn,deepesch/scikit-learn,frank-tancf/scikit-learn,vibhorag/scikit-learn,bnaul/scikit-learn,DSLituiev/scikit-learn,bthirion/scikit-learn,marcocaccin/scikit-learn,wlamond/scikit-learn,sonnyhu/scikit-learn,yanlend/scikit-learn,victorbergelin/scikit-learn,DSLituiev/scikit-learn,abimannans/scikit-learn,shangwuhencc/scikit-learn,michigraber/scikit-learn,loli/sklearn-ensembletrees,pkruskal/scikit-learn,fbagirov/scikit-learn,huzq/scikit-learn,Djabbz/scikit-learn,kagayakidan/scikit-learn,hugobowne/scikit-learn,JsNoNo/scikit-learn,anurag313/scikit-learn,lesteve/scikit-learn,billy-inn/scikit-learn,RPGOne/scikit-learn,UNR-AERIAL/scikit-learn,petosegan/scikit-learn,robin-lai/scikit-learn,rvraghav93/scikit-learn,terkkila/scikit-learn,samuel1208/scikit-learn,f3r/scikit-learn,Garrett-R/scikit-learn,JPFrancoia/scikit-learn,rsivapr/scikit-learn,samzhang111/scikit-learn,henridwyer/scikit-learn,ngoix/OCRF,xuewei4d/scikit-learn,pratapvardhan/scikit-learn,billy-inn/scikit-learn,xuewei4d/scikit-learn,tdhopper/scikit-learn,mojoboss/scikit-learn,mojoboss/scikit-learn,MechCoder/scikit-learn,ky822/scikit-learn,mlyundin/scikit-learn,phdowling/scikit-learn,MohammedWasim/scikit-learn,dsquareindia/scikit-learn,btabibian/scikit-learn,arabenjamin/scikit-learn,Fireblend/scikit-learn,carrillo/scikit-learn,spallavolu/scikit-learn,rsivapr/scikit-learn,icdishb/scikit-learn,DonBeo/scikit-learn,massmutual/scikit-learn,HolgerPeters/scikit-learn,yanlend/scikit-learn,sonnyhu/scikit-learn,MartinDelzant/scikit-learn,cl4rke/scikit-learn,alexsavio/scikit-learn,nrhine1/scikit-learn,waterponey/scikit-learn,raghavrv/scikit-learn,madjelan/scikit-learn,Lawrence-Liu/scikit-learn,meduz/scikit-learn,terkkila/scikit-learn,Clyde-fare/scikit-learn,Aasmi/scikit-learn,elkingtonmcb/scikit-learn,luo66/scikit-learn,macks22/scikit-learn,trungnt13/scikit-learn,saiwing-yeung/scikit-learn,lin-credible/scikit-learn,voxlol/scikit-learn,pkruskal/scikit-learn,joshloyal/scikit-learn,harshaneelhg/scikit-learn,xiaoxiamii/scikit-learn,ycaihua/scikit-learn,sinhrks/scikit-learn,jpautom/scikit-learn,phdowling/scikit-learn,aetilley/scikit-learn,cybernet14/scikit-learn,lucidfrontier45/scikit-learn,nomadcube/scikit-learn,pypot/scikit-learn,ClimbsRocks/scikit-learn,jblackburne/scikit-learn,stylianos-kampakis/scikit-learn,MechCoder/scikit-learn,sanketloke/scikit-learn,jseabold/scikit-learn,hainm/scikit-learn,phdowling/scikit-learn,mblondel/scikit-learn,JeanKossaifi/scikit-learn,zhenv5/scikit-learn,trungnt13/scikit-learn,akionakamura/scikit-learn,B3AU/waveTree,rajat1994/scikit-learn,sarahgrogan/scikit-learn,pompiduskus/scikit-learn,CVML/scikit-learn,jereze/scikit-learn,simon-pepin/scikit-learn,Titan-C/scikit-learn,Achuth17/scikit-learn,mwv/scikit-learn,etkirsch/scikit-learn,sgenoud/scikit-learn,xiaoxiamii/scikit-learn,pianomania/scikit-learn,Titan-C/scikit-learn,saiwing-yeung/scikit-learn,zorroblue/scikit-learn,nikitasingh981/scikit-learn,abhishekkrthakur/scikit-learn,maheshakya/scikit-learn,henridwyer/scikit-learn,xzh86/scikit-learn,adamgreenhall/scikit-learn,siutanwong/scikit-learn,shikhardb/scikit-learn,ChanChiChoi/scikit-learn,vshtanko/scikit-learn,Garrett-R/scikit-learn,aewhatley/scikit-learn
|
DOC: Add an example of feature selection.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@458 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
"""
An example showing feature selection.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn.datasets.iris import load
SP, SW, PL, PW, LABELS = load()
# Some noisy data not correlated
E1, E2 = np.random.normal(size=(2, len(SP)))
x = np.c_[SP, SW, PL, PW, E1, E2]
y = LABELS
################################################################################
pl.figure(1)
pl.clf()
################################################################################
# Univariate feature selection
from scikits.learn.feature_select import univ_selection
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
pl.plot(scores/scores.max(), label='Univariate score (p values)')
################################################################################
# Compare to the weights of an SVM
from scikits.learn.svm import SVM
svm = SVM(kernel_type='linear')
svm.fit(x, y)
svm_weights = (svm.support_**2).sum(axis=0)
pl.plot(svm_weights/svm_weights.max(), label='SVM weight')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.legend()
pl.show()
|
<commit_before><commit_msg>DOC: Add an example of feature selection.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@458 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>
|
"""
An example showing feature selection.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn.datasets.iris import load
SP, SW, PL, PW, LABELS = load()
# Some noisy data not correlated
E1, E2 = np.random.normal(size=(2, len(SP)))
x = np.c_[SP, SW, PL, PW, E1, E2]
y = LABELS
################################################################################
pl.figure(1)
pl.clf()
################################################################################
# Univariate feature selection
from scikits.learn.feature_select import univ_selection
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
pl.plot(scores/scores.max(), label='Univariate score (p values)')
################################################################################
# Compare to the weights of an SVM
from scikits.learn.svm import SVM
svm = SVM(kernel_type='linear')
svm.fit(x, y)
svm_weights = (svm.support_**2).sum(axis=0)
pl.plot(svm_weights/svm_weights.max(), label='SVM weight')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.legend()
pl.show()
|
DOC: Add an example of feature selection.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@458 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8"""
An example showing feature selection.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn.datasets.iris import load
SP, SW, PL, PW, LABELS = load()
# Some noisy data not correlated
E1, E2 = np.random.normal(size=(2, len(SP)))
x = np.c_[SP, SW, PL, PW, E1, E2]
y = LABELS
################################################################################
pl.figure(1)
pl.clf()
################################################################################
# Univariate feature selection
from scikits.learn.feature_select import univ_selection
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
pl.plot(scores/scores.max(), label='Univariate score (p values)')
################################################################################
# Compare to the weights of an SVM
from scikits.learn.svm import SVM
svm = SVM(kernel_type='linear')
svm.fit(x, y)
svm_weights = (svm.support_**2).sum(axis=0)
pl.plot(svm_weights/svm_weights.max(), label='SVM weight')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.legend()
pl.show()
|
<commit_before><commit_msg>DOC: Add an example of feature selection.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@458 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>"""
An example showing feature selection.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn.datasets.iris import load
SP, SW, PL, PW, LABELS = load()
# Some noisy data not correlated
E1, E2 = np.random.normal(size=(2, len(SP)))
x = np.c_[SP, SW, PL, PW, E1, E2]
y = LABELS
################################################################################
pl.figure(1)
pl.clf()
################################################################################
# Univariate feature selection
from scikits.learn.feature_select import univ_selection
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
pl.plot(scores/scores.max(), label='Univariate score (p values)')
################################################################################
# Compare to the weights of an SVM
from scikits.learn.svm import SVM
svm = SVM(kernel_type='linear')
svm.fit(x, y)
svm_weights = (svm.support_**2).sum(axis=0)
pl.plot(svm_weights/svm_weights.max(), label='SVM weight')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.legend()
pl.show()
|
|
76b75ac9ae8a1456962ed9cf70024628be27d371
|
corehq/apps/reminders/management/commands/find_ivr_usage.py
|
corehq/apps/reminders/management/commands/find_ivr_usage.py
|
from __future__ import absolute_import
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, METHOD_IVR_SURVEY
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
active_ivr_handlers = defaultdict(lambda: 0)
inactive_ivr_handlers = defaultdict(lambda: 0)
for handler in handlers:
if handler.method == METHOD_IVR_SURVEY:
if handler.active:
active_ivr_handlers[handler.domain] += 1
else:
inactive_ivr_handlers[handler.domain] += 1
print("============ Inactive IVR Handlers ============")
for domain, count in inactive_ivr_handlers.items():
print("%s: %s" % (domain, count))
print("============ Active IVR Handlers ============")
for domain, count in active_ivr_handlers.items():
print("%s: %s" % (domain, count))
|
Add script to find projects using IVR
|
Add script to find projects using IVR
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add script to find projects using IVR
|
from __future__ import absolute_import
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, METHOD_IVR_SURVEY
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
active_ivr_handlers = defaultdict(lambda: 0)
inactive_ivr_handlers = defaultdict(lambda: 0)
for handler in handlers:
if handler.method == METHOD_IVR_SURVEY:
if handler.active:
active_ivr_handlers[handler.domain] += 1
else:
inactive_ivr_handlers[handler.domain] += 1
print("============ Inactive IVR Handlers ============")
for domain, count in inactive_ivr_handlers.items():
print("%s: %s" % (domain, count))
print("============ Active IVR Handlers ============")
for domain, count in active_ivr_handlers.items():
print("%s: %s" % (domain, count))
|
<commit_before><commit_msg>Add script to find projects using IVR<commit_after>
|
from __future__ import absolute_import
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, METHOD_IVR_SURVEY
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
active_ivr_handlers = defaultdict(lambda: 0)
inactive_ivr_handlers = defaultdict(lambda: 0)
for handler in handlers:
if handler.method == METHOD_IVR_SURVEY:
if handler.active:
active_ivr_handlers[handler.domain] += 1
else:
inactive_ivr_handlers[handler.domain] += 1
print("============ Inactive IVR Handlers ============")
for domain, count in inactive_ivr_handlers.items():
print("%s: %s" % (domain, count))
print("============ Active IVR Handlers ============")
for domain, count in active_ivr_handlers.items():
print("%s: %s" % (domain, count))
|
Add script to find projects using IVRfrom __future__ import absolute_import
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, METHOD_IVR_SURVEY
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
active_ivr_handlers = defaultdict(lambda: 0)
inactive_ivr_handlers = defaultdict(lambda: 0)
for handler in handlers:
if handler.method == METHOD_IVR_SURVEY:
if handler.active:
active_ivr_handlers[handler.domain] += 1
else:
inactive_ivr_handlers[handler.domain] += 1
print("============ Inactive IVR Handlers ============")
for domain, count in inactive_ivr_handlers.items():
print("%s: %s" % (domain, count))
print("============ Active IVR Handlers ============")
for domain, count in active_ivr_handlers.items():
print("%s: %s" % (domain, count))
|
<commit_before><commit_msg>Add script to find projects using IVR<commit_after>from __future__ import absolute_import
from collections import defaultdict
from corehq.apps.reminders.models import CaseReminderHandler, METHOD_IVR_SURVEY
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, **options):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
include_docs=True
).all()
active_ivr_handlers = defaultdict(lambda: 0)
inactive_ivr_handlers = defaultdict(lambda: 0)
for handler in handlers:
if handler.method == METHOD_IVR_SURVEY:
if handler.active:
active_ivr_handlers[handler.domain] += 1
else:
inactive_ivr_handlers[handler.domain] += 1
print("============ Inactive IVR Handlers ============")
for domain, count in inactive_ivr_handlers.items():
print("%s: %s" % (domain, count))
print("============ Active IVR Handlers ============")
for domain, count in active_ivr_handlers.items():
print("%s: %s" % (domain, count))
|
|
2e713f4f2f5cc206c5029bb00db0fa10f428fafc
|
geolang/__init__.py
|
geolang/__init__.py
|
from geolang.geolang import (
__author__,
__version__,
KA2LAT,
LAT2KA,
UNI2LAT,
_2KA,
_2LAT,
encode_slugify,
GeoLangToolKit,
unicode,
)
# from geolang.geolang import *
from .uni2lat import *
|
Initialize the geolang toolkit package
|
Initialize the geolang toolkit package
|
Python
|
mit
|
Lh4cKg/simple-geolang-toolkit
|
Initialize the geolang toolkit package
|
from geolang.geolang import (
__author__,
__version__,
KA2LAT,
LAT2KA,
UNI2LAT,
_2KA,
_2LAT,
encode_slugify,
GeoLangToolKit,
unicode,
)
# from geolang.geolang import *
from .uni2lat import *
|
<commit_before><commit_msg>Initialize the geolang toolkit package<commit_after>
|
from geolang.geolang import (
__author__,
__version__,
KA2LAT,
LAT2KA,
UNI2LAT,
_2KA,
_2LAT,
encode_slugify,
GeoLangToolKit,
unicode,
)
# from geolang.geolang import *
from .uni2lat import *
|
Initialize the geolang toolkit packagefrom geolang.geolang import (
__author__,
__version__,
KA2LAT,
LAT2KA,
UNI2LAT,
_2KA,
_2LAT,
encode_slugify,
GeoLangToolKit,
unicode,
)
# from geolang.geolang import *
from .uni2lat import *
|
<commit_before><commit_msg>Initialize the geolang toolkit package<commit_after>from geolang.geolang import (
__author__,
__version__,
KA2LAT,
LAT2KA,
UNI2LAT,
_2KA,
_2LAT,
encode_slugify,
GeoLangToolKit,
unicode,
)
# from geolang.geolang import *
from .uni2lat import *
|
|
41caf18a4885f3e53078ff4d0f6efb570ab8c239
|
fileshack/management.py
|
fileshack/management.py
|
"""
Creates the default Site object.
"""
# Modelled after django.contrib.sites.management.
from django.db.models import signals
from django.db import router
import models as fileshack_app
from models import Store
def create_default_store(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table.
if Store in created_models and router.allow_syncdb(db, Store) :
if verbosity >= 2:
print "Creating default Store object"
Store().save(using=db)
signals.post_syncdb.connect(create_default_store, sender=fileshack_app)
|
Create a default store on syncdb.
|
Create a default store on syncdb.
This is done by a hook to post_syncdb.
|
Python
|
mit
|
peterkuma/fileshackproject,peterkuma/fileshackproject,peterkuma/fileshackproject
|
Create a default store on syncdb.
This is done by a hook to post_syncdb.
|
"""
Creates the default Site object.
"""
# Modelled after django.contrib.sites.management.
from django.db.models import signals
from django.db import router
import models as fileshack_app
from models import Store
def create_default_store(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table.
if Store in created_models and router.allow_syncdb(db, Store) :
if verbosity >= 2:
print "Creating default Store object"
Store().save(using=db)
signals.post_syncdb.connect(create_default_store, sender=fileshack_app)
|
<commit_before><commit_msg>Create a default store on syncdb.
This is done by a hook to post_syncdb.<commit_after>
|
"""
Creates the default Site object.
"""
# Modelled after django.contrib.sites.management.
from django.db.models import signals
from django.db import router
import models as fileshack_app
from models import Store
def create_default_store(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table.
if Store in created_models and router.allow_syncdb(db, Store) :
if verbosity >= 2:
print "Creating default Store object"
Store().save(using=db)
signals.post_syncdb.connect(create_default_store, sender=fileshack_app)
|
Create a default store on syncdb.
This is done by a hook to post_syncdb."""
Creates the default Site object.
"""
# Modelled after django.contrib.sites.management.
from django.db.models import signals
from django.db import router
import models as fileshack_app
from models import Store
def create_default_store(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table.
if Store in created_models and router.allow_syncdb(db, Store) :
if verbosity >= 2:
print "Creating default Store object"
Store().save(using=db)
signals.post_syncdb.connect(create_default_store, sender=fileshack_app)
|
<commit_before><commit_msg>Create a default store on syncdb.
This is done by a hook to post_syncdb.<commit_after>"""
Creates the default Site object.
"""
# Modelled after django.contrib.sites.management.
from django.db.models import signals
from django.db import router
import models as fileshack_app
from models import Store
def create_default_store(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table.
if Store in created_models and router.allow_syncdb(db, Store) :
if verbosity >= 2:
print "Creating default Store object"
Store().save(using=db)
signals.post_syncdb.connect(create_default_store, sender=fileshack_app)
|
|
c2961fbe1746ba61707fb9fc9a0a9873a4abbf33
|
folium/elements.py
|
folium/elements.py
|
from branca.element import Figure, Element, JavascriptLink, CssLink
class JSCSSMixin(Element):
"""Render links to external Javascript and CSS resources."""
default_js = []
default_css = []
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, url in self.default_js:
figure.header.add_child(JavascriptLink(url), name=name)
for name, url in self.default_css:
figure.header.add_child(CssLink(url), name=name)
super().render(**kwargs)
|
Add mixin to render JS and CSS links
|
Add mixin to render JS and CSS links
|
Python
|
mit
|
python-visualization/folium,ocefpaf/folium,python-visualization/folium,ocefpaf/folium
|
Add mixin to render JS and CSS links
|
from branca.element import Figure, Element, JavascriptLink, CssLink
class JSCSSMixin(Element):
"""Render links to external Javascript and CSS resources."""
default_js = []
default_css = []
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, url in self.default_js:
figure.header.add_child(JavascriptLink(url), name=name)
for name, url in self.default_css:
figure.header.add_child(CssLink(url), name=name)
super().render(**kwargs)
|
<commit_before><commit_msg>Add mixin to render JS and CSS links<commit_after>
|
from branca.element import Figure, Element, JavascriptLink, CssLink
class JSCSSMixin(Element):
"""Render links to external Javascript and CSS resources."""
default_js = []
default_css = []
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, url in self.default_js:
figure.header.add_child(JavascriptLink(url), name=name)
for name, url in self.default_css:
figure.header.add_child(CssLink(url), name=name)
super().render(**kwargs)
|
Add mixin to render JS and CSS linksfrom branca.element import Figure, Element, JavascriptLink, CssLink
class JSCSSMixin(Element):
"""Render links to external Javascript and CSS resources."""
default_js = []
default_css = []
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, url in self.default_js:
figure.header.add_child(JavascriptLink(url), name=name)
for name, url in self.default_css:
figure.header.add_child(CssLink(url), name=name)
super().render(**kwargs)
|
<commit_before><commit_msg>Add mixin to render JS and CSS links<commit_after>from branca.element import Figure, Element, JavascriptLink, CssLink
class JSCSSMixin(Element):
"""Render links to external Javascript and CSS resources."""
default_js = []
default_css = []
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
for name, url in self.default_js:
figure.header.add_child(JavascriptLink(url), name=name)
for name, url in self.default_css:
figure.header.add_child(CssLink(url), name=name)
super().render(**kwargs)
|
|
d9818f1fd05a1b308ef0b4675e4ce4553f1d2291
|
third_party/chromium_browser_clang/get_latest.py
|
third_party/chromium_browser_clang/get_latest.py
|
#!/usr/bin/python3 -u
'''Download the prebuilt clang binary built by chromium and is used by chromium.'''
import os
import os.path
import subprocess
UPDATE_SH_URL = 'https://chromium.googlesource.com/chromium/src/+/master/tools/clang/scripts/update.sh'
CLANG_REVISION = 238013
CLANG_SUB_REVISION = 1
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
def get_revision():
'''Get the revision based encoded in chrome.sh'''
# TODO: we need to get the raw output or try to parse the HTML output
update_sh = subprocess.check_output(['curl', UPDATE_SH_URL],
stderr=subprocess.DEVNULL,
universal_newlines=True)
revision = None
sub_revision = None
for line in update_sh.split('\n'):
print('==' + line)
if line.find('CLANG_REVISION') == 0:
revision = line.split('=')
elif line.find('CLANG_SUB_REVISION') == 0:
sub_revision = line.split('=')
break
return revision, sub_revision
def main():
url = CDS_URL + '/Linux_x64/clang-%d-%d.tgz' % (CLANG_REVISION, CLANG_SUB_REVISION)
output_dir = os.path.join(os.path.dirname(__file__), 'Linux_x64')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Downloading %s to %s' % (url, output_dir))
subprocess.check_call(['curl %s |tar zx -C %s' % (url, output_dir)],
shell=True)
if __name__ == '__main__':
main()
|
Add script to download chromium prebuilt clang for Linux.
|
Add script to download chromium prebuilt clang for Linux.
|
Python
|
apache-2.0
|
duanguoxue/trunk,duanguoxue/trunk,mzhaom/trunk,bazelment/trunk,bazelment/trunk,mzhaom/trunk,bazelment/trunk,bazelment/trunk,mzhaom/trunk,duanguoxue/trunk
|
Add script to download chromium prebuilt clang for Linux.
|
#!/usr/bin/python3 -u
'''Download the prebuilt clang binary built by chromium and is used by chromium.'''
import os
import os.path
import subprocess
UPDATE_SH_URL = 'https://chromium.googlesource.com/chromium/src/+/master/tools/clang/scripts/update.sh'
CLANG_REVISION = 238013
CLANG_SUB_REVISION = 1
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
def get_revision():
'''Get the revision based encoded in chrome.sh'''
# TODO: we need to get the raw output or try to parse the HTML output
update_sh = subprocess.check_output(['curl', UPDATE_SH_URL],
stderr=subprocess.DEVNULL,
universal_newlines=True)
revision = None
sub_revision = None
for line in update_sh.split('\n'):
print('==' + line)
if line.find('CLANG_REVISION') == 0:
revision = line.split('=')
elif line.find('CLANG_SUB_REVISION') == 0:
sub_revision = line.split('=')
break
return revision, sub_revision
def main():
url = CDS_URL + '/Linux_x64/clang-%d-%d.tgz' % (CLANG_REVISION, CLANG_SUB_REVISION)
output_dir = os.path.join(os.path.dirname(__file__), 'Linux_x64')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Downloading %s to %s' % (url, output_dir))
subprocess.check_call(['curl %s |tar zx -C %s' % (url, output_dir)],
shell=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to download chromium prebuilt clang for Linux.<commit_after>
|
#!/usr/bin/python3 -u
'''Download the prebuilt clang binary built by chromium and is used by chromium.'''
import os
import os.path
import subprocess
UPDATE_SH_URL = 'https://chromium.googlesource.com/chromium/src/+/master/tools/clang/scripts/update.sh'
CLANG_REVISION = 238013
CLANG_SUB_REVISION = 1
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
def get_revision():
'''Get the revision based encoded in chrome.sh'''
# TODO: we need to get the raw output or try to parse the HTML output
update_sh = subprocess.check_output(['curl', UPDATE_SH_URL],
stderr=subprocess.DEVNULL,
universal_newlines=True)
revision = None
sub_revision = None
for line in update_sh.split('\n'):
print('==' + line)
if line.find('CLANG_REVISION') == 0:
revision = line.split('=')
elif line.find('CLANG_SUB_REVISION') == 0:
sub_revision = line.split('=')
break
return revision, sub_revision
def main():
url = CDS_URL + '/Linux_x64/clang-%d-%d.tgz' % (CLANG_REVISION, CLANG_SUB_REVISION)
output_dir = os.path.join(os.path.dirname(__file__), 'Linux_x64')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Downloading %s to %s' % (url, output_dir))
subprocess.check_call(['curl %s |tar zx -C %s' % (url, output_dir)],
shell=True)
if __name__ == '__main__':
main()
|
Add script to download chromium prebuilt clang for Linux.#!/usr/bin/python3 -u
'''Download the prebuilt clang binary built by chromium and is used by chromium.'''
import os
import os.path
import subprocess
UPDATE_SH_URL = 'https://chromium.googlesource.com/chromium/src/+/master/tools/clang/scripts/update.sh'
CLANG_REVISION = 238013
CLANG_SUB_REVISION = 1
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
def get_revision():
'''Get the revision based encoded in chrome.sh'''
# TODO: we need to get the raw output or try to parse the HTML output
update_sh = subprocess.check_output(['curl', UPDATE_SH_URL],
stderr=subprocess.DEVNULL,
universal_newlines=True)
revision = None
sub_revision = None
for line in update_sh.split('\n'):
print('==' + line)
if line.find('CLANG_REVISION') == 0:
revision = line.split('=')
elif line.find('CLANG_SUB_REVISION') == 0:
sub_revision = line.split('=')
break
return revision, sub_revision
def main():
url = CDS_URL + '/Linux_x64/clang-%d-%d.tgz' % (CLANG_REVISION, CLANG_SUB_REVISION)
output_dir = os.path.join(os.path.dirname(__file__), 'Linux_x64')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Downloading %s to %s' % (url, output_dir))
subprocess.check_call(['curl %s |tar zx -C %s' % (url, output_dir)],
shell=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to download chromium prebuilt clang for Linux.<commit_after>#!/usr/bin/python3 -u
'''Download the prebuilt clang binary built by chromium and is used by chromium.'''
import os
import os.path
import subprocess
UPDATE_SH_URL = 'https://chromium.googlesource.com/chromium/src/+/master/tools/clang/scripts/update.sh'
CLANG_REVISION = 238013
CLANG_SUB_REVISION = 1
CDS_URL = 'https://commondatastorage.googleapis.com/chromium-browser-clang'
def get_revision():
'''Get the revision based encoded in chrome.sh'''
# TODO: we need to get the raw output or try to parse the HTML output
update_sh = subprocess.check_output(['curl', UPDATE_SH_URL],
stderr=subprocess.DEVNULL,
universal_newlines=True)
revision = None
sub_revision = None
for line in update_sh.split('\n'):
print('==' + line)
if line.find('CLANG_REVISION') == 0:
revision = line.split('=')
elif line.find('CLANG_SUB_REVISION') == 0:
sub_revision = line.split('=')
break
return revision, sub_revision
def main():
url = CDS_URL + '/Linux_x64/clang-%d-%d.tgz' % (CLANG_REVISION, CLANG_SUB_REVISION)
output_dir = os.path.join(os.path.dirname(__file__), 'Linux_x64')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Downloading %s to %s' % (url, output_dir))
subprocess.check_call(['curl %s |tar zx -C %s' % (url, output_dir)],
shell=True)
if __name__ == '__main__':
main()
|
|
ab92b41d0bbb1a6befbc7d34225a9cf84d088e30
|
bin/set-paypal-email.py
|
bin/set-paypal-email.py
|
#!/usr/bin/env python
"""Set the PayPal email address for a user.
Usage:
[gittip] $ heroku config -s -a gittip | foreman run -e /dev/stdin ./env/bin/python ./bin/set-paypal-email.py username user@example.com [first-eight-of-api-key] [overwrite]
"""
from __future__ import print_function
import sys
from gittip import wireup
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " username user@example.com [first-eight-of-api-key] [overwrite]")
sys.exit(1)
username = sys.argv[1] # will fail with KeyError if missing
email = sys.argv[2]
if len(sys.argv) < 4:
first_eight = "unknown!"
else:
first_eight = sys.argv[3]
db = wireup.db()
FIELDS = """
SELECT username, api_key, paypal_email
FROM participants
WHERE username = %s
"""
fields = db.one(FIELDS, (username,))
print(fields)
if fields == None:
print("No Gittip participant found with username '" + username + "'")
sys.exit(2)
if fields.paypal_email != None:
print("PayPal email is already set to: " + fields.paypal_email)
if len(sys.argv) < 5 or sys.argv[4] != "overwrite":
print("Not overwriting existing PayPal email.")
sys.exit(3)
if fields.api_key == None:
assert first_eight == "None"
else:
assert fields.api_key[0:8] == first_eight
print("Setting PayPal email for " + username + " to " + email)
SET_EMAIL = """
UPDATE participants
SET paypal_email=%s
WHERE username=%s;
"""
print(SET_EMAIL % (email, username))
db.run(SET_EMAIL, (email, username))
print("All done.")
|
Add a script to setup a PayPal email address
|
Add a script to setup a PayPal email address
|
Python
|
mit
|
gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,mccolgst/www.gittip.com,eXcomm/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com
|
Add a script to setup a PayPal email address
|
#!/usr/bin/env python
"""Set the PayPal email address for a user.
Usage:
[gittip] $ heroku config -s -a gittip | foreman run -e /dev/stdin ./env/bin/python ./bin/set-paypal-email.py username user@example.com [first-eight-of-api-key] [overwrite]
"""
from __future__ import print_function
import sys
from gittip import wireup
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " username user@example.com [first-eight-of-api-key] [overwrite]")
sys.exit(1)
username = sys.argv[1] # will fail with KeyError if missing
email = sys.argv[2]
if len(sys.argv) < 4:
first_eight = "unknown!"
else:
first_eight = sys.argv[3]
db = wireup.db()
FIELDS = """
SELECT username, api_key, paypal_email
FROM participants
WHERE username = %s
"""
fields = db.one(FIELDS, (username,))
print(fields)
if fields == None:
print("No Gittip participant found with username '" + username + "'")
sys.exit(2)
if fields.paypal_email != None:
print("PayPal email is already set to: " + fields.paypal_email)
if len(sys.argv) < 5 or sys.argv[4] != "overwrite":
print("Not overwriting existing PayPal email.")
sys.exit(3)
if fields.api_key == None:
assert first_eight == "None"
else:
assert fields.api_key[0:8] == first_eight
print("Setting PayPal email for " + username + " to " + email)
SET_EMAIL = """
UPDATE participants
SET paypal_email=%s
WHERE username=%s;
"""
print(SET_EMAIL % (email, username))
db.run(SET_EMAIL, (email, username))
print("All done.")
|
<commit_before><commit_msg>Add a script to setup a PayPal email address<commit_after>
|
#!/usr/bin/env python
"""Set the PayPal email address for a user.
Usage:
[gittip] $ heroku config -s -a gittip | foreman run -e /dev/stdin ./env/bin/python ./bin/set-paypal-email.py username user@example.com [first-eight-of-api-key] [overwrite]
"""
from __future__ import print_function
import sys
from gittip import wireup
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " username user@example.com [first-eight-of-api-key] [overwrite]")
sys.exit(1)
username = sys.argv[1] # will fail with KeyError if missing
email = sys.argv[2]
if len(sys.argv) < 4:
first_eight = "unknown!"
else:
first_eight = sys.argv[3]
db = wireup.db()
FIELDS = """
SELECT username, api_key, paypal_email
FROM participants
WHERE username = %s
"""
fields = db.one(FIELDS, (username,))
print(fields)
if fields == None:
print("No Gittip participant found with username '" + username + "'")
sys.exit(2)
if fields.paypal_email != None:
print("PayPal email is already set to: " + fields.paypal_email)
if len(sys.argv) < 5 or sys.argv[4] != "overwrite":
print("Not overwriting existing PayPal email.")
sys.exit(3)
if fields.api_key == None:
assert first_eight == "None"
else:
assert fields.api_key[0:8] == first_eight
print("Setting PayPal email for " + username + " to " + email)
SET_EMAIL = """
UPDATE participants
SET paypal_email=%s
WHERE username=%s;
"""
print(SET_EMAIL % (email, username))
db.run(SET_EMAIL, (email, username))
print("All done.")
|
Add a script to setup a PayPal email address#!/usr/bin/env python
"""Set the PayPal email address for a user.
Usage:
[gittip] $ heroku config -s -a gittip | foreman run -e /dev/stdin ./env/bin/python ./bin/set-paypal-email.py username user@example.com [first-eight-of-api-key] [overwrite]
"""
from __future__ import print_function
import sys
from gittip import wireup
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " username user@example.com [first-eight-of-api-key] [overwrite]")
sys.exit(1)
username = sys.argv[1] # will fail with KeyError if missing
email = sys.argv[2]
if len(sys.argv) < 4:
first_eight = "unknown!"
else:
first_eight = sys.argv[3]
db = wireup.db()
FIELDS = """
SELECT username, api_key, paypal_email
FROM participants
WHERE username = %s
"""
fields = db.one(FIELDS, (username,))
print(fields)
if fields == None:
print("No Gittip participant found with username '" + username + "'")
sys.exit(2)
if fields.paypal_email != None:
print("PayPal email is already set to: " + fields.paypal_email)
if len(sys.argv) < 5 or sys.argv[4] != "overwrite":
print("Not overwriting existing PayPal email.")
sys.exit(3)
if fields.api_key == None:
assert first_eight == "None"
else:
assert fields.api_key[0:8] == first_eight
print("Setting PayPal email for " + username + " to " + email)
SET_EMAIL = """
UPDATE participants
SET paypal_email=%s
WHERE username=%s;
"""
print(SET_EMAIL % (email, username))
db.run(SET_EMAIL, (email, username))
print("All done.")
|
<commit_before><commit_msg>Add a script to setup a PayPal email address<commit_after>#!/usr/bin/env python
"""Set the PayPal email address for a user.
Usage:
[gittip] $ heroku config -s -a gittip | foreman run -e /dev/stdin ./env/bin/python ./bin/set-paypal-email.py username user@example.com [first-eight-of-api-key] [overwrite]
"""
from __future__ import print_function
import sys
from gittip import wireup
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " username user@example.com [first-eight-of-api-key] [overwrite]")
sys.exit(1)
username = sys.argv[1] # will fail with KeyError if missing
email = sys.argv[2]
if len(sys.argv) < 4:
first_eight = "unknown!"
else:
first_eight = sys.argv[3]
db = wireup.db()
FIELDS = """
SELECT username, api_key, paypal_email
FROM participants
WHERE username = %s
"""
fields = db.one(FIELDS, (username,))
print(fields)
if fields == None:
print("No Gittip participant found with username '" + username + "'")
sys.exit(2)
if fields.paypal_email != None:
print("PayPal email is already set to: " + fields.paypal_email)
if len(sys.argv) < 5 or sys.argv[4] != "overwrite":
print("Not overwriting existing PayPal email.")
sys.exit(3)
if fields.api_key == None:
assert first_eight == "None"
else:
assert fields.api_key[0:8] == first_eight
print("Setting PayPal email for " + username + " to " + email)
SET_EMAIL = """
UPDATE participants
SET paypal_email=%s
WHERE username=%s;
"""
print(SET_EMAIL % (email, username))
db.run(SET_EMAIL, (email, username))
print("All done.")
|
|
6f8ea15058161bd9735c37cda5f09d4bd9db7514
|
doc/deployer/set_prior_nodes_from_collection_set.py
|
doc/deployer/set_prior_nodes_from_collection_set.py
|
def set_prior_nodes_from_collection_set(node_obj):
print "\n Name : ", node_obj.name, " -- ", node_obj.member_of_names_list , " --- ", node_obj._id
if node_obj.collection_set:
for each in node_obj.collection_set:
each_obj = node_collection.one({'_id': ObjectId(each)})
# if "Page" in each_obj.member_of_names_list:
# print "\n\n Page -- ", each_obj.prior_node
if node_obj._id not in each_obj.prior_node:
each_obj.prior_node.append(node_obj._id)
each_obj.save()
set_prior_nodes_from_collection_set(each_obj)
ce_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseEventGroup"})
course_event_grps = node_collection.find({'member_of': ce_gst._id})
for eachce in course_event_grps:
set_prior_nodes_from_collection_set(eachce)
|
Add prior node i.e unit id in course's resources
|
Add prior node i.e unit id in course's resources
|
Python
|
agpl-3.0
|
gnowledge/gstudio,gnowledge/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio,AvadootNachankar/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio
|
Add prior node i.e unit id in course's resources
|
def set_prior_nodes_from_collection_set(node_obj):
print "\n Name : ", node_obj.name, " -- ", node_obj.member_of_names_list , " --- ", node_obj._id
if node_obj.collection_set:
for each in node_obj.collection_set:
each_obj = node_collection.one({'_id': ObjectId(each)})
# if "Page" in each_obj.member_of_names_list:
# print "\n\n Page -- ", each_obj.prior_node
if node_obj._id not in each_obj.prior_node:
each_obj.prior_node.append(node_obj._id)
each_obj.save()
set_prior_nodes_from_collection_set(each_obj)
ce_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseEventGroup"})
course_event_grps = node_collection.find({'member_of': ce_gst._id})
for eachce in course_event_grps:
set_prior_nodes_from_collection_set(eachce)
|
<commit_before><commit_msg>Add prior node i.e unit id in course's resources<commit_after>
|
def set_prior_nodes_from_collection_set(node_obj):
print "\n Name : ", node_obj.name, " -- ", node_obj.member_of_names_list , " --- ", node_obj._id
if node_obj.collection_set:
for each in node_obj.collection_set:
each_obj = node_collection.one({'_id': ObjectId(each)})
# if "Page" in each_obj.member_of_names_list:
# print "\n\n Page -- ", each_obj.prior_node
if node_obj._id not in each_obj.prior_node:
each_obj.prior_node.append(node_obj._id)
each_obj.save()
set_prior_nodes_from_collection_set(each_obj)
ce_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseEventGroup"})
course_event_grps = node_collection.find({'member_of': ce_gst._id})
for eachce in course_event_grps:
set_prior_nodes_from_collection_set(eachce)
|
Add prior node i.e unit id in course's resourcesdef set_prior_nodes_from_collection_set(node_obj):
print "\n Name : ", node_obj.name, " -- ", node_obj.member_of_names_list , " --- ", node_obj._id
if node_obj.collection_set:
for each in node_obj.collection_set:
each_obj = node_collection.one({'_id': ObjectId(each)})
# if "Page" in each_obj.member_of_names_list:
# print "\n\n Page -- ", each_obj.prior_node
if node_obj._id not in each_obj.prior_node:
each_obj.prior_node.append(node_obj._id)
each_obj.save()
set_prior_nodes_from_collection_set(each_obj)
ce_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseEventGroup"})
course_event_grps = node_collection.find({'member_of': ce_gst._id})
for eachce in course_event_grps:
set_prior_nodes_from_collection_set(eachce)
|
<commit_before><commit_msg>Add prior node i.e unit id in course's resources<commit_after>def set_prior_nodes_from_collection_set(node_obj):
print "\n Name : ", node_obj.name, " -- ", node_obj.member_of_names_list , " --- ", node_obj._id
if node_obj.collection_set:
for each in node_obj.collection_set:
each_obj = node_collection.one({'_id': ObjectId(each)})
# if "Page" in each_obj.member_of_names_list:
# print "\n\n Page -- ", each_obj.prior_node
if node_obj._id not in each_obj.prior_node:
each_obj.prior_node.append(node_obj._id)
each_obj.save()
set_prior_nodes_from_collection_set(each_obj)
ce_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseEventGroup"})
course_event_grps = node_collection.find({'member_of': ce_gst._id})
for eachce in course_event_grps:
set_prior_nodes_from_collection_set(eachce)
|
|
ab12cb56a0d91384c1e80f20618025b3ec3e94a3
|
kimochiconsumer/kimochi.py
|
kimochiconsumer/kimochi.py
|
import requests
class Kimochi:
def __init__(self, url, api_key, site_key = None):
if not url.endswith('/'):
url += '/'
if site_key:
self.url = url + 'sites/' + site_key + '/'
self.api_key = api_key
def page(self, page_id):
return self._get('pages/' + str(page_id))
def image(self, image_id):
return self._get('images/' + str(image_id))
def gallery(self, gallery_id):
return self._get('galleries/' + str(gallery_id))
def _get(self, path):
r = requests.get(self.url + path, {'api_key': self.api_key})
return r.json()
|
Add initial framework for Kimochi client
|
Add initial framework for Kimochi client
|
Python
|
mit
|
matslindh/kimochi-consumer
|
Add initial framework for Kimochi client
|
import requests
class Kimochi:
def __init__(self, url, api_key, site_key = None):
if not url.endswith('/'):
url += '/'
if site_key:
self.url = url + 'sites/' + site_key + '/'
self.api_key = api_key
def page(self, page_id):
return self._get('pages/' + str(page_id))
def image(self, image_id):
return self._get('images/' + str(image_id))
def gallery(self, gallery_id):
return self._get('galleries/' + str(gallery_id))
def _get(self, path):
r = requests.get(self.url + path, {'api_key': self.api_key})
return r.json()
|
<commit_before><commit_msg>Add initial framework for Kimochi client<commit_after>
|
import requests
class Kimochi:
def __init__(self, url, api_key, site_key = None):
if not url.endswith('/'):
url += '/'
if site_key:
self.url = url + 'sites/' + site_key + '/'
self.api_key = api_key
def page(self, page_id):
return self._get('pages/' + str(page_id))
def image(self, image_id):
return self._get('images/' + str(image_id))
def gallery(self, gallery_id):
return self._get('galleries/' + str(gallery_id))
def _get(self, path):
r = requests.get(self.url + path, {'api_key': self.api_key})
return r.json()
|
Add initial framework for Kimochi clientimport requests
class Kimochi:
def __init__(self, url, api_key, site_key = None):
if not url.endswith('/'):
url += '/'
if site_key:
self.url = url + 'sites/' + site_key + '/'
self.api_key = api_key
def page(self, page_id):
return self._get('pages/' + str(page_id))
def image(self, image_id):
return self._get('images/' + str(image_id))
def gallery(self, gallery_id):
return self._get('galleries/' + str(gallery_id))
def _get(self, path):
r = requests.get(self.url + path, {'api_key': self.api_key})
return r.json()
|
<commit_before><commit_msg>Add initial framework for Kimochi client<commit_after>import requests
class Kimochi:
def __init__(self, url, api_key, site_key = None):
if not url.endswith('/'):
url += '/'
if site_key:
self.url = url + 'sites/' + site_key + '/'
self.api_key = api_key
def page(self, page_id):
return self._get('pages/' + str(page_id))
def image(self, image_id):
return self._get('images/' + str(image_id))
def gallery(self, gallery_id):
return self._get('galleries/' + str(gallery_id))
def _get(self, path):
r = requests.get(self.url + path, {'api_key': self.api_key})
return r.json()
|
|
88e5f659e12a6be46246bbf36b59b73bad167b0b
|
syntacticframes_project/syntacticframes/settings/test.py
|
syntacticframes_project/syntacticframes/settings/test.py
|
from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
Test using sqlite in production
|
Test using sqlite in production
This lets Django create a new database for every test run, which should be done
via the admin interface for PostgreSQL databases
|
Python
|
mit
|
aymara/verbenet-editor,aymara/verbenet-editor,aymara/verbenet-editor
|
Test using sqlite in production
This lets Django create a new database for every test run, which should be done
via the admin interface for PostgreSQL databases
|
from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
<commit_before><commit_msg>Test using sqlite in production
This lets Django create a new database for every test run, which should be done
via the admin interface for PostgreSQL databases<commit_after>
|
from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
Test using sqlite in production
This lets Django create a new database for every test run, which should be done
via the admin interface for PostgreSQL databasesfrom __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
<commit_before><commit_msg>Test using sqlite in production
This lets Django create a new database for every test run, which should be done
via the admin interface for PostgreSQL databases<commit_after>from __future__ import absolute_import
from .base import *
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
|
|
32fe5e192ed7b74812a2d117cc8f6374d139948d
|
sherlock.stanford.edu.run_gpaw.py
|
sherlock.stanford.edu.run_gpaw.py
|
#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
|
Add the submission script for GPAW on Sherlock at Stanford
|
Add the submission script for GPAW on Sherlock at Stanford
|
Python
|
mit
|
RKBK/gpaw-customize-files,RKBK/gpaw-customize-files
|
Add the submission script for GPAW on Sherlock at Stanford
|
#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
|
<commit_before><commit_msg>Add the submission script for GPAW on Sherlock at Stanford<commit_after>
|
#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
|
Add the submission script for GPAW on Sherlock at Stanford#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
|
<commit_before><commit_msg>Add the submission script for GPAW on Sherlock at Stanford<commit_after>#!/usr/bin/env python
from sys import argv
import os
job = argv[1]
nodes = argv[2]
time = argv[3] + ":00"
if len(argv) > 4:
gpaw_options = ' '.join(argv[4:])
else:
gpaw_options = ' '
#options = '-l nodes=' + nodes +':ppn=2' + ' -l' +' walltime=' + time + ' -m abe'
#options = '-N ' + nodes +' -t ' + time + ' -J ' + job
options = ' -J ' + job
#dir = os.getcwd()
f = open('tmp.sh', 'w')
f.write("""\
#!/bin/bash
#SBATCH -n %s
#SBATCH -t %s
#SBATCH -p iric,normal
#SBATCH --exclusive
# Add nodes that always fail
#SBATCH -x gpu-14-1,sh-20-35
# send email about job status changes
##SBATCH --mail-type=ALL
#Set an open-mpi parameter to suppress "fork()" warnings
# GPAW is written to use fork calls
export OMPI_MCA_mpi_warn_on_fork=0
#This next line decides which version of gpaw will be used
source $HOME/environment_scripts/set_paths_gpaw-trunk_scalapack_libvdwxc.sh # Gpaw trunk with mBEEF-vdW fixed for libvdwxc
srun `which gpaw-python` %s %s
""" % (nodes,time,job,gpaw_options))
f.close()
os.system('sbatch ' + options + ' tmp.sh')
|
|
acead95a46db9f7228fd44f439e50eaa37f0a288
|
kirppuauth/migrations/0005_alter_user_first_name.py
|
kirppuauth/migrations/0005_alter_user_first_name.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kirppuauth', '0004_auto_20180703_1615'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
Add missing Django 3.1 migration.
|
Add missing Django 3.1 migration.
Missing from d9890245e30bf7036c05f2359cc48d89b5361ba5.
|
Python
|
mit
|
jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu
|
Add missing Django 3.1 migration.
Missing from d9890245e30bf7036c05f2359cc48d89b5361ba5.
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kirppuauth', '0004_auto_20180703_1615'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
<commit_before><commit_msg>Add missing Django 3.1 migration.
Missing from d9890245e30bf7036c05f2359cc48d89b5361ba5.<commit_after>
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kirppuauth', '0004_auto_20180703_1615'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
Add missing Django 3.1 migration.
Missing from d9890245e30bf7036c05f2359cc48d89b5361ba5.from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kirppuauth', '0004_auto_20180703_1615'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
<commit_before><commit_msg>Add missing Django 3.1 migration.
Missing from d9890245e30bf7036c05f2359cc48d89b5361ba5.<commit_after>from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kirppuauth', '0004_auto_20180703_1615'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
|
f89ba25232d9a8e5f47a2e5cbc210afeda40210e
|
submissions/generate_all_fours.py
|
submissions/generate_all_fours.py
|
"""Generate all_threes.dta submission file with every rating as 3.0
.. moduleauthor:: Quinn Osha stolen from Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
def run():
fours = ['4.0\n'] * 2749898
with open('all_fours.dta', 'w+') as all_fours_submission_file:
all_fours_submission_file.writelines(fours)
if __name__ == '__main__':
run()
|
Add all fours submission generation
|
Add all fours submission generation
|
Python
|
mit
|
jvanbrug/netflix,jvanbrug/netflix
|
Add all fours submission generation
|
"""Generate all_threes.dta submission file with every rating as 3.0
.. moduleauthor:: Quinn Osha stolen from Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
def run():
fours = ['4.0\n'] * 2749898
with open('all_fours.dta', 'w+') as all_fours_submission_file:
all_fours_submission_file.writelines(fours)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add all fours submission generation<commit_after>
|
"""Generate all_threes.dta submission file with every rating as 3.0
.. moduleauthor:: Quinn Osha stolen from Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
def run():
fours = ['4.0\n'] * 2749898
with open('all_fours.dta', 'w+') as all_fours_submission_file:
all_fours_submission_file.writelines(fours)
if __name__ == '__main__':
run()
|
Add all fours submission generation"""Generate all_threes.dta submission file with every rating as 3.0
.. moduleauthor:: Quinn Osha stolen from Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
def run():
fours = ['4.0\n'] * 2749898
with open('all_fours.dta', 'w+') as all_fours_submission_file:
all_fours_submission_file.writelines(fours)
if __name__ == '__main__':
run()
|
<commit_before><commit_msg>Add all fours submission generation<commit_after>"""Generate all_threes.dta submission file with every rating as 3.0
.. moduleauthor:: Quinn Osha stolen from Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
def run():
fours = ['4.0\n'] * 2749898
with open('all_fours.dta', 'w+') as all_fours_submission_file:
all_fours_submission_file.writelines(fours)
if __name__ == '__main__':
run()
|
|
19ea035e68fbec2b39420b5bfd3ab340b3d14d78
|
i3pystatus/dpms.py
|
i3pystatus/dpms.py
|
from i3pystatus import IntervalModule
from i3pystatus.core.command import run_through_shell
class DPMS(IntervalModule):
"""
Shows and toggles status of DPMS which prevents screen from blanking.
.. rubric:: Available formatters
* `{status}` — the current status of DPMS
@author Georg Sieber <g.sieber AT gmail.com>
"""
interval = 5
settings = (
"format",
"color",
"color_disabled",
)
color_disabled = "#AAAAAA"
color = "#FFFFFF"
format = "DPMS: {status}"
on_leftclick = "toggle_dpms"
status = False
def run(self):
self.status = run_through_shell("xset -q | grep -q 'DPMS is Enabled'", True).rc == 0
self.output = {
"full_text": self.format.format(status='on' if self.status else 'off'),
"color": self.color if self.status else self.color_disabled
}
def toggle_dpms(self):
if self.status:
run_through_shell("xset -dpms s off", True)
else:
run_through_shell("xset +dpms s on", True)
|
Add module for DPMS state
|
Add module for DPMS state
|
Python
|
mit
|
fmarchenko/i3pystatus,yang-ling/i3pystatus,drwahl/i3pystatus,eBrnd/i3pystatus,juliushaertl/i3pystatus,drwahl/i3pystatus,onkelpit/i3pystatus,richese/i3pystatus,Elder-of-Ozone/i3pystatus,opatut/i3pystatus,eBrnd/i3pystatus,richese/i3pystatus,m45t3r/i3pystatus,claria/i3pystatus,facetoe/i3pystatus,juliushaertl/i3pystatus,paulollivier/i3pystatus,asmikhailov/i3pystatus,schroeji/i3pystatus,teto/i3pystatus,enkore/i3pystatus,yang-ling/i3pystatus,plumps/i3pystatus,facetoe/i3pystatus,enkore/i3pystatus,fmarchenko/i3pystatus,ismaelpuerto/i3pystatus,Elder-of-Ozone/i3pystatus,schroeji/i3pystatus,Arvedui/i3pystatus,Arvedui/i3pystatus,opatut/i3pystatus,onkelpit/i3pystatus,m45t3r/i3pystatus,paulollivier/i3pystatus,asmikhailov/i3pystatus,teto/i3pystatus,plumps/i3pystatus,ncoop/i3pystatus,claria/i3pystatus,ismaelpuerto/i3pystatus,ncoop/i3pystatus
|
Add module for DPMS state
|
from i3pystatus import IntervalModule
from i3pystatus.core.command import run_through_shell
class DPMS(IntervalModule):
"""
Shows and toggles status of DPMS which prevents screen from blanking.
.. rubric:: Available formatters
* `{status}` — the current status of DPMS
@author Georg Sieber <g.sieber AT gmail.com>
"""
interval = 5
settings = (
"format",
"color",
"color_disabled",
)
color_disabled = "#AAAAAA"
color = "#FFFFFF"
format = "DPMS: {status}"
on_leftclick = "toggle_dpms"
status = False
def run(self):
self.status = run_through_shell("xset -q | grep -q 'DPMS is Enabled'", True).rc == 0
self.output = {
"full_text": self.format.format(status='on' if self.status else 'off'),
"color": self.color if self.status else self.color_disabled
}
def toggle_dpms(self):
if self.status:
run_through_shell("xset -dpms s off", True)
else:
run_through_shell("xset +dpms s on", True)
|
<commit_before><commit_msg>Add module for DPMS state<commit_after>
|
from i3pystatus import IntervalModule
from i3pystatus.core.command import run_through_shell
class DPMS(IntervalModule):
"""
Shows and toggles status of DPMS which prevents screen from blanking.
.. rubric:: Available formatters
* `{status}` — the current status of DPMS
@author Georg Sieber <g.sieber AT gmail.com>
"""
interval = 5
settings = (
"format",
"color",
"color_disabled",
)
color_disabled = "#AAAAAA"
color = "#FFFFFF"
format = "DPMS: {status}"
on_leftclick = "toggle_dpms"
status = False
def run(self):
self.status = run_through_shell("xset -q | grep -q 'DPMS is Enabled'", True).rc == 0
self.output = {
"full_text": self.format.format(status='on' if self.status else 'off'),
"color": self.color if self.status else self.color_disabled
}
def toggle_dpms(self):
if self.status:
run_through_shell("xset -dpms s off", True)
else:
run_through_shell("xset +dpms s on", True)
|
Add module for DPMS statefrom i3pystatus import IntervalModule
from i3pystatus.core.command import run_through_shell
class DPMS(IntervalModule):
"""
Shows and toggles status of DPMS which prevents screen from blanking.
.. rubric:: Available formatters
* `{status}` — the current status of DPMS
@author Georg Sieber <g.sieber AT gmail.com>
"""
interval = 5
settings = (
"format",
"color",
"color_disabled",
)
color_disabled = "#AAAAAA"
color = "#FFFFFF"
format = "DPMS: {status}"
on_leftclick = "toggle_dpms"
status = False
def run(self):
self.status = run_through_shell("xset -q | grep -q 'DPMS is Enabled'", True).rc == 0
self.output = {
"full_text": self.format.format(status='on' if self.status else 'off'),
"color": self.color if self.status else self.color_disabled
}
def toggle_dpms(self):
if self.status:
run_through_shell("xset -dpms s off", True)
else:
run_through_shell("xset +dpms s on", True)
|
<commit_before><commit_msg>Add module for DPMS state<commit_after>from i3pystatus import IntervalModule
from i3pystatus.core.command import run_through_shell
class DPMS(IntervalModule):
"""
Shows and toggles status of DPMS which prevents screen from blanking.
.. rubric:: Available formatters
* `{status}` — the current status of DPMS
@author Georg Sieber <g.sieber AT gmail.com>
"""
interval = 5
settings = (
"format",
"color",
"color_disabled",
)
color_disabled = "#AAAAAA"
color = "#FFFFFF"
format = "DPMS: {status}"
on_leftclick = "toggle_dpms"
status = False
def run(self):
self.status = run_through_shell("xset -q | grep -q 'DPMS is Enabled'", True).rc == 0
self.output = {
"full_text": self.format.format(status='on' if self.status else 'off'),
"color": self.color if self.status else self.color_disabled
}
def toggle_dpms(self):
if self.status:
run_through_shell("xset -dpms s off", True)
else:
run_through_shell("xset +dpms s on", True)
|
|
70d8e6a050b3e88de5da47f30d3fb16664cd690c
|
main.py
|
main.py
|
import argparse
from uncertainty.classifier import Classifier
def train(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
classifier.train(args.filepath)
def predict(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
print('{}: {}'.format(args.sentence, classifier.predict(args.sentence)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Command line access to the Linguistic Uncertainty '
'Classifier Interface (LUCI).'
)
subparsers = parser.add_subparsers(title='Commands')
parser_train = subparsers.add_parser(
'train', help='Train uncertainty classifier.'
)
parser_train.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the classifier must be trained. '
'Default is word.'
)
parser_train.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is trained.'
)
parser_train.add_argument(
'filepath',
help='The absolute path to a file containining the training data.'
)
parser_train.set_defaults(handler=train)
parser_predict = subparsers.add_parser(
'predict', help='Predict uncertainty of a sentence.'
)
parser_predict.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the prediction must be made. '
'Default is word.'
)
parser_predict.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is used for '
'prediction.'
)
parser_predict.add_argument(
'sentence',
help='A sentence for which the uncertainty is to be predicted.'
)
parser_predict.set_defaults(handler=predict)
args = parser.parse_args()
args.handler(args)
|
Add command line access to uncertainty classifier
|
Add command line access to uncertainty classifier
|
Python
|
mit
|
meyersbs/uncertainty
|
Add command line access to uncertainty classifier
|
import argparse
from uncertainty.classifier import Classifier
def train(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
classifier.train(args.filepath)
def predict(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
print('{}: {}'.format(args.sentence, classifier.predict(args.sentence)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Command line access to the Linguistic Uncertainty '
'Classifier Interface (LUCI).'
)
subparsers = parser.add_subparsers(title='Commands')
parser_train = subparsers.add_parser(
'train', help='Train uncertainty classifier.'
)
parser_train.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the classifier must be trained. '
'Default is word.'
)
parser_train.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is trained.'
)
parser_train.add_argument(
'filepath',
help='The absolute path to a file containining the training data.'
)
parser_train.set_defaults(handler=train)
parser_predict = subparsers.add_parser(
'predict', help='Predict uncertainty of a sentence.'
)
parser_predict.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the prediction must be made. '
'Default is word.'
)
parser_predict.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is used for '
'prediction.'
)
parser_predict.add_argument(
'sentence',
help='A sentence for which the uncertainty is to be predicted.'
)
parser_predict.set_defaults(handler=predict)
args = parser.parse_args()
args.handler(args)
|
<commit_before><commit_msg>Add command line access to uncertainty classifier<commit_after>
|
import argparse
from uncertainty.classifier import Classifier
def train(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
classifier.train(args.filepath)
def predict(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
print('{}: {}'.format(args.sentence, classifier.predict(args.sentence)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Command line access to the Linguistic Uncertainty '
'Classifier Interface (LUCI).'
)
subparsers = parser.add_subparsers(title='Commands')
parser_train = subparsers.add_parser(
'train', help='Train uncertainty classifier.'
)
parser_train.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the classifier must be trained. '
'Default is word.'
)
parser_train.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is trained.'
)
parser_train.add_argument(
'filepath',
help='The absolute path to a file containining the training data.'
)
parser_train.set_defaults(handler=train)
parser_predict = subparsers.add_parser(
'predict', help='Predict uncertainty of a sentence.'
)
parser_predict.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the prediction must be made. '
'Default is word.'
)
parser_predict.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is used for '
'prediction.'
)
parser_predict.add_argument(
'sentence',
help='A sentence for which the uncertainty is to be predicted.'
)
parser_predict.set_defaults(handler=predict)
args = parser.parse_args()
args.handler(args)
|
Add command line access to uncertainty classifierimport argparse
from uncertainty.classifier import Classifier
def train(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
classifier.train(args.filepath)
def predict(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
print('{}: {}'.format(args.sentence, classifier.predict(args.sentence)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Command line access to the Linguistic Uncertainty '
'Classifier Interface (LUCI).'
)
subparsers = parser.add_subparsers(title='Commands')
parser_train = subparsers.add_parser(
'train', help='Train uncertainty classifier.'
)
parser_train.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the classifier must be trained. '
'Default is word.'
)
parser_train.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is trained.'
)
parser_train.add_argument(
'filepath',
help='The absolute path to a file containining the training data.'
)
parser_train.set_defaults(handler=train)
parser_predict = subparsers.add_parser(
'predict', help='Predict uncertainty of a sentence.'
)
parser_predict.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the prediction must be made. '
'Default is word.'
)
parser_predict.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is used for '
'prediction.'
)
parser_predict.add_argument(
'sentence',
help='A sentence for which the uncertainty is to be predicted.'
)
parser_predict.set_defaults(handler=predict)
args = parser.parse_args()
args.handler(args)
|
<commit_before><commit_msg>Add command line access to uncertainty classifier<commit_after>import argparse
from uncertainty.classifier import Classifier
def train(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
classifier.train(args.filepath)
def predict(args):
classifier = Classifier(
granularity=args.granularity, binary=not args.multiclass
)
print('{}: {}'.format(args.sentence, classifier.predict(args.sentence)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Command line access to the Linguistic Uncertainty '
'Classifier Interface (LUCI).'
)
subparsers = parser.add_subparsers(title='Commands')
parser_train = subparsers.add_parser(
'train', help='Train uncertainty classifier.'
)
parser_train.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the classifier must be trained. '
'Default is word.'
)
parser_train.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is trained.'
)
parser_train.add_argument(
'filepath',
help='The absolute path to a file containining the training data.'
)
parser_train.set_defaults(handler=train)
parser_predict = subparsers.add_parser(
'predict', help='Predict uncertainty of a sentence.'
)
parser_predict.add_argument(
'-g', '--granularity', choices=['word', 'sentence'],
default='word',
help='The granularity at which the prediction must be made. '
'Default is word.'
)
parser_predict.add_argument(
'-m', '--multiclass', action='store_true',
help='When set, the response variable is considered multi-class. '
'Consequently, a multi-class classifier is used for '
'prediction.'
)
parser_predict.add_argument(
'sentence',
help='A sentence for which the uncertainty is to be predicted.'
)
parser_predict.set_defaults(handler=predict)
args = parser.parse_args()
args.handler(args)
|
|
45229754b77e457866b6c700174a8c4f7dfde58a
|
audio_pipeline/tb_ui/util/Resources.py
|
audio_pipeline/tb_ui/util/Resources.py
|
import uuid
mbid_directory = "Ready To Filewalk"
picard_directory = "Picard Me!"
def has_mbid(track):
"""
Check whether or not the given track has an MBID.
"""
if track.mbid.value:
try:
id = uuid.UUID(track.mbid.value)
good = True
except ValueError as e:
good = False
else:
good = False
return good
|
Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in help
|
Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in help
|
Python
|
mit
|
hidat/audio_pipeline
|
Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in help
|
import uuid
mbid_directory = "Ready To Filewalk"
picard_directory = "Picard Me!"
def has_mbid(track):
"""
Check whether or not the given track has an MBID.
"""
if track.mbid.value:
try:
id = uuid.UUID(track.mbid.value)
good = True
except ValueError as e:
good = False
else:
good = False
return good
|
<commit_before><commit_msg>Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in help<commit_after>
|
import uuid
mbid_directory = "Ready To Filewalk"
picard_directory = "Picard Me!"
def has_mbid(track):
"""
Check whether or not the given track has an MBID.
"""
if track.mbid.value:
try:
id = uuid.UUID(track.mbid.value)
good = True
except ValueError as e:
good = False
else:
good = False
return good
|
Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in helpimport uuid
mbid_directory = "Ready To Filewalk"
picard_directory = "Picard Me!"
def has_mbid(track):
"""
Check whether or not the given track has an MBID.
"""
if track.mbid.value:
try:
id = uuid.UUID(track.mbid.value)
good = True
except ValueError as e:
good = False
else:
good = False
return good
|
<commit_before><commit_msg>Add move file after TomatoBanana; ctrl-z for one cell in Meta Entr; updated commands in help<commit_after>import uuid
mbid_directory = "Ready To Filewalk"
picard_directory = "Picard Me!"
def has_mbid(track):
"""
Check whether or not the given track has an MBID.
"""
if track.mbid.value:
try:
id = uuid.UUID(track.mbid.value)
good = True
except ValueError as e:
good = False
else:
good = False
return good
|
|
30e18ffea7885f5708c751d8a7e8783bfea3260b
|
src/python/parse_csv.py
|
src/python/parse_csv.py
|
import csv
def csv_as_list(csv_file_name, delim = ';'):
with open(csv_file_name, 'rb') as csv_file:
data = csv.DictReader(csv_file, delimiter = delim)
csv_list = []
for item in data:
csv_list.append(item)
return csv_list
|
Implement simple CSV parsing utility
|
Implement simple CSV parsing utility
|
Python
|
mit
|
vjuranek/rg-offline-plotting,vjuranek/rg-offline-plotting
|
Implement simple CSV parsing utility
|
import csv
def csv_as_list(csv_file_name, delim = ';'):
with open(csv_file_name, 'rb') as csv_file:
data = csv.DictReader(csv_file, delimiter = delim)
csv_list = []
for item in data:
csv_list.append(item)
return csv_list
|
<commit_before><commit_msg>Implement simple CSV parsing utility<commit_after>
|
import csv
def csv_as_list(csv_file_name, delim = ';'):
with open(csv_file_name, 'rb') as csv_file:
data = csv.DictReader(csv_file, delimiter = delim)
csv_list = []
for item in data:
csv_list.append(item)
return csv_list
|
Implement simple CSV parsing utilityimport csv
def csv_as_list(csv_file_name, delim = ';'):
with open(csv_file_name, 'rb') as csv_file:
data = csv.DictReader(csv_file, delimiter = delim)
csv_list = []
for item in data:
csv_list.append(item)
return csv_list
|
<commit_before><commit_msg>Implement simple CSV parsing utility<commit_after>import csv
def csv_as_list(csv_file_name, delim = ';'):
with open(csv_file_name, 'rb') as csv_file:
data = csv.DictReader(csv_file, delimiter = delim)
csv_list = []
for item in data:
csv_list.append(item)
return csv_list
|
|
0af033308873038a8d10d5348a63aa4c6fab3033
|
main_withoutGUI.py
|
main_withoutGUI.py
|
# -*- coding:utf-8 -*-
# This version is mainly used for a test. It doesn't have a GUI yet.
import shelve
from getIntern import get_sxs
# Use shelve to get parameter changing rules.
with shelve.open("shelve/para_change_dict") as slvFile:
city_dict = slvFile["city"]
salary_dict = slvFile["salary"]
degree_dict = slvFile["degree"]
remain_dict = slvFile["remain"]
# Input your requirements for Intern Chances.
keyword = input("Please input A Keyword of Your Interested Intern Chance or Company:\n")
city = input("Please input A City Name for your Internship:\n")
salary = input("Please Input A Number for Salary (¥/Day) ==》\n "
"0: Don't care;\n"
"1: 0-50;\n"
"2: 50-100;\n"
"3: 100-150;\n"
"4: 150-200;\n"
"5: 200-300;\n"
"6: > 300.\n")
degree = input("Please Input A Number for Minimum Degree Requirement.\n"
"0: Don't care;\n"
"1: College;\n"
"2: Bachelor;\n"
"3: Master;\n"
"4: Doctor.\n")
remain = input("Please Input A Number for Whether to Stay or Not After the Internship.\n"
"0: Don't care;\n"
"1: Yes;\n"
"2: No;\n"
"3: Not Sure.\n")
day = input("Please Input A Number for How Many Days A Week Can You Work:\n"
"If You Don't Care, Just Type in 0.\n")
month = input("Please Input A Number for How Many Months Can You Keep On Work:\n"
"If You Don't Care, Just Type in 0.\n")
frequency = input("Please Input A Number for How Often (in hours) Do You Want To Get An Update Reminder.\n")
# Now we change the parameters.
if day == "" or day == "0":
day = None
if month == "" or month == "0":
month = None
if frequency == "":
frequency = 24
city = city_dict[city]
salary = salary_dict[salary]
degree = degree_dict[degree]
remain = remain_dict[remain]
# Now We Check The Inputs.
results = get_sxs(place=city, keyword=keyword, day=day, month=month, salary=salary, degree=degree, remain=remain)
print(results)
|
Add a classification about page numbers of searching results to the get_sxs function.
|
Add a classification about page numbers of searching results to the get_sxs function.
|
Python
|
mit
|
HutchinHuang/New_Intern_Reminder
|
Add a classification about page numbers of searching results to the get_sxs function.
|
# -*- coding:utf-8 -*-
# This version is mainly used for a test. It doesn't have a GUI yet.
import shelve
from getIntern import get_sxs
# Use shelve to get parameter changing rules.
with shelve.open("shelve/para_change_dict") as slvFile:
city_dict = slvFile["city"]
salary_dict = slvFile["salary"]
degree_dict = slvFile["degree"]
remain_dict = slvFile["remain"]
# Input your requirements for Intern Chances.
keyword = input("Please input A Keyword of Your Interested Intern Chance or Company:\n")
city = input("Please input A City Name for your Internship:\n")
salary = input("Please Input A Number for Salary (¥/Day) ==》\n "
"0: Don't care;\n"
"1: 0-50;\n"
"2: 50-100;\n"
"3: 100-150;\n"
"4: 150-200;\n"
"5: 200-300;\n"
"6: > 300.\n")
degree = input("Please Input A Number for Minimum Degree Requirement.\n"
"0: Don't care;\n"
"1: College;\n"
"2: Bachelor;\n"
"3: Master;\n"
"4: Doctor.\n")
remain = input("Please Input A Number for Whether to Stay or Not After the Internship.\n"
"0: Don't care;\n"
"1: Yes;\n"
"2: No;\n"
"3: Not Sure.\n")
day = input("Please Input A Number for How Many Days A Week Can You Work:\n"
"If You Don't Care, Just Type in 0.\n")
month = input("Please Input A Number for How Many Months Can You Keep On Work:\n"
"If You Don't Care, Just Type in 0.\n")
frequency = input("Please Input A Number for How Often (in hours) Do You Want To Get An Update Reminder.\n")
# Now we change the parameters.
if day == "" or day == "0":
day = None
if month == "" or month == "0":
month = None
if frequency == "":
frequency = 24
city = city_dict[city]
salary = salary_dict[salary]
degree = degree_dict[degree]
remain = remain_dict[remain]
# Now We Check The Inputs.
results = get_sxs(place=city, keyword=keyword, day=day, month=month, salary=salary, degree=degree, remain=remain)
print(results)
|
<commit_before><commit_msg>Add a classification about page numbers of searching results to the get_sxs function.<commit_after>
|
# -*- coding:utf-8 -*-
# This version is mainly used for a test. It doesn't have a GUI yet.
import shelve
from getIntern import get_sxs
# Use shelve to get parameter changing rules.
with shelve.open("shelve/para_change_dict") as slvFile:
city_dict = slvFile["city"]
salary_dict = slvFile["salary"]
degree_dict = slvFile["degree"]
remain_dict = slvFile["remain"]
# Input your requirements for Intern Chances.
keyword = input("Please input A Keyword of Your Interested Intern Chance or Company:\n")
city = input("Please input A City Name for your Internship:\n")
salary = input("Please Input A Number for Salary (¥/Day) ==》\n "
"0: Don't care;\n"
"1: 0-50;\n"
"2: 50-100;\n"
"3: 100-150;\n"
"4: 150-200;\n"
"5: 200-300;\n"
"6: > 300.\n")
degree = input("Please Input A Number for Minimum Degree Requirement.\n"
"0: Don't care;\n"
"1: College;\n"
"2: Bachelor;\n"
"3: Master;\n"
"4: Doctor.\n")
remain = input("Please Input A Number for Whether to Stay or Not After the Internship.\n"
"0: Don't care;\n"
"1: Yes;\n"
"2: No;\n"
"3: Not Sure.\n")
day = input("Please Input A Number for How Many Days A Week Can You Work:\n"
"If You Don't Care, Just Type in 0.\n")
month = input("Please Input A Number for How Many Months Can You Keep On Work:\n"
"If You Don't Care, Just Type in 0.\n")
frequency = input("Please Input A Number for How Often (in hours) Do You Want To Get An Update Reminder.\n")
# Now we change the parameters.
if day == "" or day == "0":
day = None
if month == "" or month == "0":
month = None
if frequency == "":
frequency = 24
city = city_dict[city]
salary = salary_dict[salary]
degree = degree_dict[degree]
remain = remain_dict[remain]
# Now We Check The Inputs.
results = get_sxs(place=city, keyword=keyword, day=day, month=month, salary=salary, degree=degree, remain=remain)
print(results)
|
Add a classification about page numbers of searching results to the get_sxs function.# -*- coding:utf-8 -*-
# This version is mainly used for a test. It doesn't have a GUI yet.
import shelve
from getIntern import get_sxs
# Use shelve to get parameter changing rules.
with shelve.open("shelve/para_change_dict") as slvFile:
city_dict = slvFile["city"]
salary_dict = slvFile["salary"]
degree_dict = slvFile["degree"]
remain_dict = slvFile["remain"]
# Input your requirements for Intern Chances.
keyword = input("Please input A Keyword of Your Interested Intern Chance or Company:\n")
city = input("Please input A City Name for your Internship:\n")
salary = input("Please Input A Number for Salary (¥/Day) ==》\n "
"0: Don't care;\n"
"1: 0-50;\n"
"2: 50-100;\n"
"3: 100-150;\n"
"4: 150-200;\n"
"5: 200-300;\n"
"6: > 300.\n")
degree = input("Please Input A Number for Minimum Degree Requirement.\n"
"0: Don't care;\n"
"1: College;\n"
"2: Bachelor;\n"
"3: Master;\n"
"4: Doctor.\n")
remain = input("Please Input A Number for Whether to Stay or Not After the Internship.\n"
"0: Don't care;\n"
"1: Yes;\n"
"2: No;\n"
"3: Not Sure.\n")
day = input("Please Input A Number for How Many Days A Week Can You Work:\n"
"If You Don't Care, Just Type in 0.\n")
month = input("Please Input A Number for How Many Months Can You Keep On Work:\n"
"If You Don't Care, Just Type in 0.\n")
frequency = input("Please Input A Number for How Often (in hours) Do You Want To Get An Update Reminder.\n")
# Now we change the parameters.
if day == "" or day == "0":
day = None
if month == "" or month == "0":
month = None
if frequency == "":
frequency = 24
city = city_dict[city]
salary = salary_dict[salary]
degree = degree_dict[degree]
remain = remain_dict[remain]
# Now We Check The Inputs.
results = get_sxs(place=city, keyword=keyword, day=day, month=month, salary=salary, degree=degree, remain=remain)
print(results)
|
<commit_before><commit_msg>Add a classification about page numbers of searching results to the get_sxs function.<commit_after># -*- coding:utf-8 -*-
# This version is mainly used for a test. It doesn't have a GUI yet.
import shelve
from getIntern import get_sxs
# Use shelve to get parameter changing rules.
with shelve.open("shelve/para_change_dict") as slvFile:
city_dict = slvFile["city"]
salary_dict = slvFile["salary"]
degree_dict = slvFile["degree"]
remain_dict = slvFile["remain"]
# Input your requirements for Intern Chances.
keyword = input("Please input A Keyword of Your Interested Intern Chance or Company:\n")
city = input("Please input A City Name for your Internship:\n")
salary = input("Please Input A Number for Salary (¥/Day) ==》\n "
"0: Don't care;\n"
"1: 0-50;\n"
"2: 50-100;\n"
"3: 100-150;\n"
"4: 150-200;\n"
"5: 200-300;\n"
"6: > 300.\n")
degree = input("Please Input A Number for Minimum Degree Requirement.\n"
"0: Don't care;\n"
"1: College;\n"
"2: Bachelor;\n"
"3: Master;\n"
"4: Doctor.\n")
remain = input("Please Input A Number for Whether to Stay or Not After the Internship.\n"
"0: Don't care;\n"
"1: Yes;\n"
"2: No;\n"
"3: Not Sure.\n")
day = input("Please Input A Number for How Many Days A Week Can You Work:\n"
"If You Don't Care, Just Type in 0.\n")
month = input("Please Input A Number for How Many Months Can You Keep On Work:\n"
"If You Don't Care, Just Type in 0.\n")
frequency = input("Please Input A Number for How Often (in hours) Do You Want To Get An Update Reminder.\n")
# Now we change the parameters.
if day == "" or day == "0":
day = None
if month == "" or month == "0":
month = None
if frequency == "":
frequency = 24
city = city_dict[city]
salary = salary_dict[salary]
degree = degree_dict[degree]
remain = remain_dict[remain]
# Now We Check The Inputs.
results = get_sxs(place=city, keyword=keyword, day=day, month=month, salary=salary, degree=degree, remain=remain)
print(results)
|
|
850a9951580cd21c27f5f992d8c907057b1eb1b1
|
tests/test_wb2k.py
|
tests/test_wb2k.py
|
from wb2k.__main__ import bail
def test_bail():
msg_type = 'fatal'
color = 'red'
text = "It doesn't go beyond 11."
given = bail(msg_type, color, text)
expected = "\x1b[31mfatal\x1b[0m: It doesn't go beyond 11."
assert given == expected
|
Add initial test for bail
|
Add initial test for bail
|
Python
|
isc
|
reillysiemens/wb2k
|
Add initial test for bail
|
from wb2k.__main__ import bail
def test_bail():
msg_type = 'fatal'
color = 'red'
text = "It doesn't go beyond 11."
given = bail(msg_type, color, text)
expected = "\x1b[31mfatal\x1b[0m: It doesn't go beyond 11."
assert given == expected
|
<commit_before><commit_msg>Add initial test for bail<commit_after>
|
from wb2k.__main__ import bail
def test_bail():
msg_type = 'fatal'
color = 'red'
text = "It doesn't go beyond 11."
given = bail(msg_type, color, text)
expected = "\x1b[31mfatal\x1b[0m: It doesn't go beyond 11."
assert given == expected
|
Add initial test for bailfrom wb2k.__main__ import bail
def test_bail():
msg_type = 'fatal'
color = 'red'
text = "It doesn't go beyond 11."
given = bail(msg_type, color, text)
expected = "\x1b[31mfatal\x1b[0m: It doesn't go beyond 11."
assert given == expected
|
<commit_before><commit_msg>Add initial test for bail<commit_after>from wb2k.__main__ import bail
def test_bail():
msg_type = 'fatal'
color = 'red'
text = "It doesn't go beyond 11."
given = bail(msg_type, color, text)
expected = "\x1b[31mfatal\x1b[0m: It doesn't go beyond 11."
assert given == expected
|
|
01e69806d0f0e196e7e832c0473a9f70725911e8
|
tests/test_yaml.py
|
tests/test_yaml.py
|
from . import IpynbTest, RmdTest
yaml_source = """---
title: Test document
author: foobar <foo@bar.tld>
date: 1970-01-01T00:00:00+0000
output:
html_document:
toc: true
ünicode: £¼±å
---
lorem ipsum
```{r}
1+1
```
"""
class TestYAMLHeader(RmdTest):
source = yaml_source
def test_header_in_ipynb(self):
self.assertIn("Rmd_header", self.ipynb.metadata)
header = self.ipynb.metadata.Rmd_header
self.assertIn("title", header)
self.assertIn("author", header)
self.assertIn("date", header)
self.assertIn("output", header)
self.assertIn("ünicode", header)
self.assertEqual("Test document", header.title)
self.assertEqual("foobar <foo@bar.tld>", header.author)
self.assertEqual("1970-01-01T00:00:00+0000", header.date)
self.assertEqual(True, header.output.html_document.toc)
self.assertEqual("£¼±å", header.ünicode)
def test_header_in_rmd(self):
self.assertIn("---", self.roundtrip)
self.assertIn("Test document", self.roundtrip)
self.assertIn("foobar <foo@bar.tld>", self.roundtrip)
self.assertIn("1970-01-01T00:00:00+0000", self.roundtrip)
self.assertIn("£¼±å", self.roundtrip)
self.assertRegexpMatches(self.roundtrip, r"output:\s*html_document:\s*toc:\s*true")
self.assertIn("1+1", self.roundtrip)
self.assertIn("lorem ipsum", self.roundtrip)
spin_yaml_source = """#' ---
#' title: Test document
#' author: foobar <foo@bar.tld>
#' date: 1970-01-01T00:00:00+0000
#' output:
#' html_document:
#' toc: true
#' ünicode: £¼±å
#' ---
#' lorem ipsum
1+1
"""
class TestSpinYAMLHeader(TestYAMLHeader):
source = spin_yaml_source
use_rmd = False
|
Add some tests for yaml header handling
|
Add some tests for yaml header handling
|
Python
|
mit
|
chronitis/ipyrmd
|
Add some tests for yaml header handling
|
from . import IpynbTest, RmdTest
yaml_source = """---
title: Test document
author: foobar <foo@bar.tld>
date: 1970-01-01T00:00:00+0000
output:
html_document:
toc: true
ünicode: £¼±å
---
lorem ipsum
```{r}
1+1
```
"""
class TestYAMLHeader(RmdTest):
source = yaml_source
def test_header_in_ipynb(self):
self.assertIn("Rmd_header", self.ipynb.metadata)
header = self.ipynb.metadata.Rmd_header
self.assertIn("title", header)
self.assertIn("author", header)
self.assertIn("date", header)
self.assertIn("output", header)
self.assertIn("ünicode", header)
self.assertEqual("Test document", header.title)
self.assertEqual("foobar <foo@bar.tld>", header.author)
self.assertEqual("1970-01-01T00:00:00+0000", header.date)
self.assertEqual(True, header.output.html_document.toc)
self.assertEqual("£¼±å", header.ünicode)
def test_header_in_rmd(self):
self.assertIn("---", self.roundtrip)
self.assertIn("Test document", self.roundtrip)
self.assertIn("foobar <foo@bar.tld>", self.roundtrip)
self.assertIn("1970-01-01T00:00:00+0000", self.roundtrip)
self.assertIn("£¼±å", self.roundtrip)
self.assertRegexpMatches(self.roundtrip, r"output:\s*html_document:\s*toc:\s*true")
self.assertIn("1+1", self.roundtrip)
self.assertIn("lorem ipsum", self.roundtrip)
spin_yaml_source = """#' ---
#' title: Test document
#' author: foobar <foo@bar.tld>
#' date: 1970-01-01T00:00:00+0000
#' output:
#' html_document:
#' toc: true
#' ünicode: £¼±å
#' ---
#' lorem ipsum
1+1
"""
class TestSpinYAMLHeader(TestYAMLHeader):
source = spin_yaml_source
use_rmd = False
|
<commit_before><commit_msg>Add some tests for yaml header handling<commit_after>
|
from . import IpynbTest, RmdTest
yaml_source = """---
title: Test document
author: foobar <foo@bar.tld>
date: 1970-01-01T00:00:00+0000
output:
html_document:
toc: true
ünicode: £¼±å
---
lorem ipsum
```{r}
1+1
```
"""
class TestYAMLHeader(RmdTest):
source = yaml_source
def test_header_in_ipynb(self):
self.assertIn("Rmd_header", self.ipynb.metadata)
header = self.ipynb.metadata.Rmd_header
self.assertIn("title", header)
self.assertIn("author", header)
self.assertIn("date", header)
self.assertIn("output", header)
self.assertIn("ünicode", header)
self.assertEqual("Test document", header.title)
self.assertEqual("foobar <foo@bar.tld>", header.author)
self.assertEqual("1970-01-01T00:00:00+0000", header.date)
self.assertEqual(True, header.output.html_document.toc)
self.assertEqual("£¼±å", header.ünicode)
def test_header_in_rmd(self):
self.assertIn("---", self.roundtrip)
self.assertIn("Test document", self.roundtrip)
self.assertIn("foobar <foo@bar.tld>", self.roundtrip)
self.assertIn("1970-01-01T00:00:00+0000", self.roundtrip)
self.assertIn("£¼±å", self.roundtrip)
self.assertRegexpMatches(self.roundtrip, r"output:\s*html_document:\s*toc:\s*true")
self.assertIn("1+1", self.roundtrip)
self.assertIn("lorem ipsum", self.roundtrip)
spin_yaml_source = """#' ---
#' title: Test document
#' author: foobar <foo@bar.tld>
#' date: 1970-01-01T00:00:00+0000
#' output:
#' html_document:
#' toc: true
#' ünicode: £¼±å
#' ---
#' lorem ipsum
1+1
"""
class TestSpinYAMLHeader(TestYAMLHeader):
source = spin_yaml_source
use_rmd = False
|
Add some tests for yaml header handlingfrom . import IpynbTest, RmdTest
yaml_source = """---
title: Test document
author: foobar <foo@bar.tld>
date: 1970-01-01T00:00:00+0000
output:
html_document:
toc: true
ünicode: £¼±å
---
lorem ipsum
```{r}
1+1
```
"""
class TestYAMLHeader(RmdTest):
source = yaml_source
def test_header_in_ipynb(self):
self.assertIn("Rmd_header", self.ipynb.metadata)
header = self.ipynb.metadata.Rmd_header
self.assertIn("title", header)
self.assertIn("author", header)
self.assertIn("date", header)
self.assertIn("output", header)
self.assertIn("ünicode", header)
self.assertEqual("Test document", header.title)
self.assertEqual("foobar <foo@bar.tld>", header.author)
self.assertEqual("1970-01-01T00:00:00+0000", header.date)
self.assertEqual(True, header.output.html_document.toc)
self.assertEqual("£¼±å", header.ünicode)
def test_header_in_rmd(self):
self.assertIn("---", self.roundtrip)
self.assertIn("Test document", self.roundtrip)
self.assertIn("foobar <foo@bar.tld>", self.roundtrip)
self.assertIn("1970-01-01T00:00:00+0000", self.roundtrip)
self.assertIn("£¼±å", self.roundtrip)
self.assertRegexpMatches(self.roundtrip, r"output:\s*html_document:\s*toc:\s*true")
self.assertIn("1+1", self.roundtrip)
self.assertIn("lorem ipsum", self.roundtrip)
spin_yaml_source = """#' ---
#' title: Test document
#' author: foobar <foo@bar.tld>
#' date: 1970-01-01T00:00:00+0000
#' output:
#' html_document:
#' toc: true
#' ünicode: £¼±å
#' ---
#' lorem ipsum
1+1
"""
class TestSpinYAMLHeader(TestYAMLHeader):
source = spin_yaml_source
use_rmd = False
|
<commit_before><commit_msg>Add some tests for yaml header handling<commit_after>from . import IpynbTest, RmdTest
yaml_source = """---
title: Test document
author: foobar <foo@bar.tld>
date: 1970-01-01T00:00:00+0000
output:
html_document:
toc: true
ünicode: £¼±å
---
lorem ipsum
```{r}
1+1
```
"""
class TestYAMLHeader(RmdTest):
source = yaml_source
def test_header_in_ipynb(self):
self.assertIn("Rmd_header", self.ipynb.metadata)
header = self.ipynb.metadata.Rmd_header
self.assertIn("title", header)
self.assertIn("author", header)
self.assertIn("date", header)
self.assertIn("output", header)
self.assertIn("ünicode", header)
self.assertEqual("Test document", header.title)
self.assertEqual("foobar <foo@bar.tld>", header.author)
self.assertEqual("1970-01-01T00:00:00+0000", header.date)
self.assertEqual(True, header.output.html_document.toc)
self.assertEqual("£¼±å", header.ünicode)
def test_header_in_rmd(self):
self.assertIn("---", self.roundtrip)
self.assertIn("Test document", self.roundtrip)
self.assertIn("foobar <foo@bar.tld>", self.roundtrip)
self.assertIn("1970-01-01T00:00:00+0000", self.roundtrip)
self.assertIn("£¼±å", self.roundtrip)
self.assertRegexpMatches(self.roundtrip, r"output:\s*html_document:\s*toc:\s*true")
self.assertIn("1+1", self.roundtrip)
self.assertIn("lorem ipsum", self.roundtrip)
spin_yaml_source = """#' ---
#' title: Test document
#' author: foobar <foo@bar.tld>
#' date: 1970-01-01T00:00:00+0000
#' output:
#' html_document:
#' toc: true
#' ünicode: £¼±å
#' ---
#' lorem ipsum
1+1
"""
class TestSpinYAMLHeader(TestYAMLHeader):
source = spin_yaml_source
use_rmd = False
|
|
f2266ab12794c1035980c9ef7483356ae9036ba8
|
lexgen/utils.py
|
lexgen/utils.py
|
import math
def percentile(values, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
Params:
values (list): Sorted list of values.
percent (float): A value from 0.0 to 1.0.
key (function): Optional key function to compute value from each value on list.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
floor = math.floor(k)
ceil = math.ceil(k)
if floor == ceil:
return key(values[int(k)])
d0 = key(values[int(floor)]) * (ceil - k)
d1 = key(values[int(ceil)]) * (k - floor)
return d0 + d1
def filter_dict_by_iqr(dictionary):
"""
Returns a new dictionary filtering values outside of the interquartile range.
Params:
dictionary (dict): Dictionary to be filtered.
Returns:
A new dictionary without items outside of the interquartile range.
"""
filtered_dict = {}
values = sorted(set(dictionary.values()))
first_quartile = percentile(values, 0.25)
second_quartile = percentile(values, 0.75)
for key in dictionary:
if first_quartile <= dictionary[key] <= second_quartile:
filtered_dict[key] = dictionary[key]
return filtered_dict
|
Add two functions to calculate percentiles and filter a dict using IQR
|
Add two functions to calculate percentiles and filter a dict using IQR
The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.
|
Python
|
mit
|
davidmogar/lexgen,davidmogar/lexgen
|
Add two functions to calculate percentiles and filter a dict using IQR
The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.
|
import math
def percentile(values, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
Params:
values (list): Sorted list of values.
percent (float): A value from 0.0 to 1.0.
key (function): Optional key function to compute value from each value on list.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
floor = math.floor(k)
ceil = math.ceil(k)
if floor == ceil:
return key(values[int(k)])
d0 = key(values[int(floor)]) * (ceil - k)
d1 = key(values[int(ceil)]) * (k - floor)
return d0 + d1
def filter_dict_by_iqr(dictionary):
"""
Returns a new dictionary filtering values outside of the interquartile range.
Params:
dictionary (dict): Dictionary to be filtered.
Returns:
A new dictionary without items outside of the interquartile range.
"""
filtered_dict = {}
values = sorted(set(dictionary.values()))
first_quartile = percentile(values, 0.25)
second_quartile = percentile(values, 0.75)
for key in dictionary:
if first_quartile <= dictionary[key] <= second_quartile:
filtered_dict[key] = dictionary[key]
return filtered_dict
|
<commit_before><commit_msg>Add two functions to calculate percentiles and filter a dict using IQR
The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.<commit_after>
|
import math
def percentile(values, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
Params:
values (list): Sorted list of values.
percent (float): A value from 0.0 to 1.0.
key (function): Optional key function to compute value from each value on list.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
floor = math.floor(k)
ceil = math.ceil(k)
if floor == ceil:
return key(values[int(k)])
d0 = key(values[int(floor)]) * (ceil - k)
d1 = key(values[int(ceil)]) * (k - floor)
return d0 + d1
def filter_dict_by_iqr(dictionary):
"""
Returns a new dictionary filtering values outside of the interquartile range.
Params:
dictionary (dict): Dictionary to be filtered.
Returns:
A new dictionary without items outside of the interquartile range.
"""
filtered_dict = {}
values = sorted(set(dictionary.values()))
first_quartile = percentile(values, 0.25)
second_quartile = percentile(values, 0.75)
for key in dictionary:
if first_quartile <= dictionary[key] <= second_quartile:
filtered_dict[key] = dictionary[key]
return filtered_dict
|
Add two functions to calculate percentiles and filter a dict using IQR
The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.import math
def percentile(values, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
Params:
values (list): Sorted list of values.
percent (float): A value from 0.0 to 1.0.
key (function): Optional key function to compute value from each value on list.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
floor = math.floor(k)
ceil = math.ceil(k)
if floor == ceil:
return key(values[int(k)])
d0 = key(values[int(floor)]) * (ceil - k)
d1 = key(values[int(ceil)]) * (k - floor)
return d0 + d1
def filter_dict_by_iqr(dictionary):
"""
Returns a new dictionary filtering values outside of the interquartile range.
Params:
dictionary (dict): Dictionary to be filtered.
Returns:
A new dictionary without items outside of the interquartile range.
"""
filtered_dict = {}
values = sorted(set(dictionary.values()))
first_quartile = percentile(values, 0.25)
second_quartile = percentile(values, 0.75)
for key in dictionary:
if first_quartile <= dictionary[key] <= second_quartile:
filtered_dict[key] = dictionary[key]
return filtered_dict
|
<commit_before><commit_msg>Add two functions to calculate percentiles and filter a dict using IQR
The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.<commit_after>import math
def percentile(values, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
Params:
values (list): Sorted list of values.
percent (float): A value from 0.0 to 1.0.
key (function): Optional key function to compute value from each value on list.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
floor = math.floor(k)
ceil = math.ceil(k)
if floor == ceil:
return key(values[int(k)])
d0 = key(values[int(floor)]) * (ceil - k)
d1 = key(values[int(ceil)]) * (k - floor)
return d0 + d1
def filter_dict_by_iqr(dictionary):
"""
Returns a new dictionary filtering values outside of the interquartile range.
Params:
dictionary (dict): Dictionary to be filtered.
Returns:
A new dictionary without items outside of the interquartile range.
"""
filtered_dict = {}
values = sorted(set(dictionary.values()))
first_quartile = percentile(values, 0.25)
second_quartile = percentile(values, 0.75)
for key in dictionary:
if first_quartile <= dictionary[key] <= second_quartile:
filtered_dict[key] = dictionary[key]
return filtered_dict
|
|
1160b792eb4f6b14cb01680ffadaaa3886098d1c
|
util/test_graph.py
|
util/test_graph.py
|
import urllib2
token = 'test_token'
channel = 'test_channel'
graphtype = 'test'
url = 'http://{}/ocpgraph/{}/{}/{}/'.format('localhost:8000', token, channel, graphtype)
try:
req = urllib2.Request(url)
resposne = urllib2.urlopen(req)
except Exception, e:
raise
|
Test file for Graph code
|
[util] Test file for Graph code
|
Python
|
apache-2.0
|
openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore,neurodata/ndstore,openconnectome/open-connectome,openconnectome/open-connectome,openconnectome/open-connectome,neurodata/ndstore,neurodata/ndstore,openconnectome/open-connectome
|
[util] Test file for Graph code
|
import urllib2
token = 'test_token'
channel = 'test_channel'
graphtype = 'test'
url = 'http://{}/ocpgraph/{}/{}/{}/'.format('localhost:8000', token, channel, graphtype)
try:
req = urllib2.Request(url)
resposne = urllib2.urlopen(req)
except Exception, e:
raise
|
<commit_before><commit_msg>[util] Test file for Graph code<commit_after>
|
import urllib2
token = 'test_token'
channel = 'test_channel'
graphtype = 'test'
url = 'http://{}/ocpgraph/{}/{}/{}/'.format('localhost:8000', token, channel, graphtype)
try:
req = urllib2.Request(url)
resposne = urllib2.urlopen(req)
except Exception, e:
raise
|
[util] Test file for Graph codeimport urllib2
token = 'test_token'
channel = 'test_channel'
graphtype = 'test'
url = 'http://{}/ocpgraph/{}/{}/{}/'.format('localhost:8000', token, channel, graphtype)
try:
req = urllib2.Request(url)
resposne = urllib2.urlopen(req)
except Exception, e:
raise
|
<commit_before><commit_msg>[util] Test file for Graph code<commit_after>import urllib2
token = 'test_token'
channel = 'test_channel'
graphtype = 'test'
url = 'http://{}/ocpgraph/{}/{}/{}/'.format('localhost:8000', token, channel, graphtype)
try:
req = urllib2.Request(url)
resposne = urllib2.urlopen(req)
except Exception, e:
raise
|
|
b647505a585a35e5f069d5a58524eaf6e25681d4
|
analysis/opensimulator-stats-analyzer/src/osta.py
|
analysis/opensimulator-stats-analyzer/src/osta.py
|
#!/usr/bin/python
import pprint
import re
import sys
if len(sys.argv) <= 1:
print "Usage: %s <stats-log-path>"
sys.exit(1)
lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
data = {}
with open(sys.argv[1]) as f:
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
value = match.group(3)
# print "%s: %s" % (statFullName, value)
if not statFullName in data:
data[statFullName] = []
data[statFullName].append(float(value))
for stat, values in data.items():
# print "%s: %s" % (stat, ", ".join(values))
print "%s: min %s, max %s" % (stat, min(values), max(values))
|
Add first draft of opensim statistics file analyzer
|
Add first draft of opensim statistics file analyzer
|
Python
|
bsd-3-clause
|
justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools
|
Add first draft of opensim statistics file analyzer
|
#!/usr/bin/python
import pprint
import re
import sys
if len(sys.argv) <= 1:
print "Usage: %s <stats-log-path>"
sys.exit(1)
lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
data = {}
with open(sys.argv[1]) as f:
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
value = match.group(3)
# print "%s: %s" % (statFullName, value)
if not statFullName in data:
data[statFullName] = []
data[statFullName].append(float(value))
for stat, values in data.items():
# print "%s: %s" % (stat, ", ".join(values))
print "%s: min %s, max %s" % (stat, min(values), max(values))
|
<commit_before><commit_msg>Add first draft of opensim statistics file analyzer<commit_after>
|
#!/usr/bin/python
import pprint
import re
import sys
if len(sys.argv) <= 1:
print "Usage: %s <stats-log-path>"
sys.exit(1)
lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
data = {}
with open(sys.argv[1]) as f:
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
value = match.group(3)
# print "%s: %s" % (statFullName, value)
if not statFullName in data:
data[statFullName] = []
data[statFullName].append(float(value))
for stat, values in data.items():
# print "%s: %s" % (stat, ", ".join(values))
print "%s: min %s, max %s" % (stat, min(values), max(values))
|
Add first draft of opensim statistics file analyzer#!/usr/bin/python
import pprint
import re
import sys
if len(sys.argv) <= 1:
print "Usage: %s <stats-log-path>"
sys.exit(1)
lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
data = {}
with open(sys.argv[1]) as f:
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
value = match.group(3)
# print "%s: %s" % (statFullName, value)
if not statFullName in data:
data[statFullName] = []
data[statFullName].append(float(value))
for stat, values in data.items():
# print "%s: %s" % (stat, ", ".join(values))
print "%s: min %s, max %s" % (stat, min(values), max(values))
|
<commit_before><commit_msg>Add first draft of opensim statistics file analyzer<commit_after>#!/usr/bin/python
import pprint
import re
import sys
if len(sys.argv) <= 1:
print "Usage: %s <stats-log-path>"
sys.exit(1)
lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
data = {}
with open(sys.argv[1]) as f:
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
value = match.group(3)
# print "%s: %s" % (statFullName, value)
if not statFullName in data:
data[statFullName] = []
data[statFullName].append(float(value))
for stat, values in data.items():
# print "%s: %s" % (stat, ", ".join(values))
print "%s: min %s, max %s" % (stat, min(values), max(values))
|
|
507a52905164d2814b0b43a6d61eb002dfe0662a
|
enerdata/datetime/work_and_holidays.py
|
enerdata/datetime/work_and_holidays.py
|
import calendar
from datetime import timedelta
def get_num_of_workdays_holidays(init_date, end_date, holidays_list):
workdays = 0
holidays = 0
_date = end_date
while _date <= init_date:
if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6)
) or (_date.date() in holidays_list):
holidays += 1
else:
workdays += 1
_date += timedelta(days=1)
return workdays, holidays
|
Add work and holidays get
|
Add work and holidays get
|
Python
|
mit
|
gisce/enerdata
|
Add work and holidays get
|
import calendar
from datetime import timedelta
def get_num_of_workdays_holidays(init_date, end_date, holidays_list):
workdays = 0
holidays = 0
_date = end_date
while _date <= init_date:
if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6)
) or (_date.date() in holidays_list):
holidays += 1
else:
workdays += 1
_date += timedelta(days=1)
return workdays, holidays
|
<commit_before><commit_msg>Add work and holidays get<commit_after>
|
import calendar
from datetime import timedelta
def get_num_of_workdays_holidays(init_date, end_date, holidays_list):
workdays = 0
holidays = 0
_date = end_date
while _date <= init_date:
if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6)
) or (_date.date() in holidays_list):
holidays += 1
else:
workdays += 1
_date += timedelta(days=1)
return workdays, holidays
|
Add work and holidays getimport calendar
from datetime import timedelta
def get_num_of_workdays_holidays(init_date, end_date, holidays_list):
workdays = 0
holidays = 0
_date = end_date
while _date <= init_date:
if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6)
) or (_date.date() in holidays_list):
holidays += 1
else:
workdays += 1
_date += timedelta(days=1)
return workdays, holidays
|
<commit_before><commit_msg>Add work and holidays get<commit_after>import calendar
from datetime import timedelta
def get_num_of_workdays_holidays(init_date, end_date, holidays_list):
workdays = 0
holidays = 0
_date = end_date
while _date <= init_date:
if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6)
) or (_date.date() in holidays_list):
holidays += 1
else:
workdays += 1
_date += timedelta(days=1)
return workdays, holidays
|
|
59e78c024af6bdf62e9d4e2ed374a727362d3a28
|
doc/add_dash_anchors.py
|
doc/add_dash_anchors.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add Dash-style anchors to already-generated HTML documentation.
This script iterates over pre-specified HTML files generated via
sphinx-build, finds all of the sections, and adds Dash-style anchors
so that when those HTML files are displayed in the Dash macOS app,
the sections are displayed in a Dash TOC on the right.
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import argparse
import logging
import re
import pathlib
import unicodedata
from urllib.parse import quote
from bs4 import BeautifulSoup
# pre-define the list of HTML files we want to miodify
FILES_TO_MODIFY = ["advanced_usage.html", "api.html", "contributing.html",
"custom_notebooks.html", "evaluation.html",
"getting_started.html", "pipeline.html",
"internal.html", "tutorial.html",
"usage_rsmtool.html", "utilities.html", "who.html"]
PILCROW = unicodedata.lookup('PILCROW SIGN')
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog='add_dash_anchors.py')
parser.add_argument("htmldir",
type=pathlib.Path,
help="path to the already-built HTML documentation")
# parse given command line arguments
args = parser.parse_args()
# set up the logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# iterate over all the built HTML files
for htmlfile in args.htmldir.glob("**/*.html"):
# we only care about the pre-specified html files
if htmlfile.name in FILES_TO_MODIFY:
logging.info(f'Processing {htmlfile.name} ...')
# parse the file
with open(htmlfile, 'r') as htmlfh:
soup = BeautifulSoup(htmlfh, features='html.parser')
# each HTML file has a main section which we do not need
# but we need _all_ of the other sections
sections = soup.body.div.find_all("div", class_="section")[1:]
for section in sections:
section_title = section.find(re.compile(r'^h[0-9]')).text
section_title = section_title.rstrip(PILCROW)
# convert this title to percent-encoded format which will be
# the name of our entry
entry_name = quote(section_title)
entry_type = 'Section'
anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}"
# create a new anchor tag for this subsection
anchor_tag = soup.new_tag('a',
attrs={'name': anchor_name,
'class': "dashAnchor"})
# insert this new tag right before the section
section.insert_before(anchor_tag)
# overwrite the original HTML file
with open(htmlfile, 'w') as outfh:
outfh.write(str(soup))
if __name__ == '__main__':
main()
|
Add script to insert Dash TOC anchors in HTML files.
|
Add script to insert Dash TOC anchors in HTML files.
|
Python
|
apache-2.0
|
EducationalTestingService/rsmtool
|
Add script to insert Dash TOC anchors in HTML files.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add Dash-style anchors to already-generated HTML documentation.
This script iterates over pre-specified HTML files generated via
sphinx-build, finds all of the sections, and adds Dash-style anchors
so that when those HTML files are displayed in the Dash macOS app,
the sections are displayed in a Dash TOC on the right.
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import argparse
import logging
import re
import pathlib
import unicodedata
from urllib.parse import quote
from bs4 import BeautifulSoup
# pre-define the list of HTML files we want to miodify
FILES_TO_MODIFY = ["advanced_usage.html", "api.html", "contributing.html",
"custom_notebooks.html", "evaluation.html",
"getting_started.html", "pipeline.html",
"internal.html", "tutorial.html",
"usage_rsmtool.html", "utilities.html", "who.html"]
PILCROW = unicodedata.lookup('PILCROW SIGN')
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog='add_dash_anchors.py')
parser.add_argument("htmldir",
type=pathlib.Path,
help="path to the already-built HTML documentation")
# parse given command line arguments
args = parser.parse_args()
# set up the logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# iterate over all the built HTML files
for htmlfile in args.htmldir.glob("**/*.html"):
# we only care about the pre-specified html files
if htmlfile.name in FILES_TO_MODIFY:
logging.info(f'Processing {htmlfile.name} ...')
# parse the file
with open(htmlfile, 'r') as htmlfh:
soup = BeautifulSoup(htmlfh, features='html.parser')
# each HTML file has a main section which we do not need
# but we need _all_ of the other sections
sections = soup.body.div.find_all("div", class_="section")[1:]
for section in sections:
section_title = section.find(re.compile(r'^h[0-9]')).text
section_title = section_title.rstrip(PILCROW)
# convert this title to percent-encoded format which will be
# the name of our entry
entry_name = quote(section_title)
entry_type = 'Section'
anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}"
# create a new anchor tag for this subsection
anchor_tag = soup.new_tag('a',
attrs={'name': anchor_name,
'class': "dashAnchor"})
# insert this new tag right before the section
section.insert_before(anchor_tag)
# overwrite the original HTML file
with open(htmlfile, 'w') as outfh:
outfh.write(str(soup))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to insert Dash TOC anchors in HTML files.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add Dash-style anchors to already-generated HTML documentation.
This script iterates over pre-specified HTML files generated via
sphinx-build, finds all of the sections, and adds Dash-style anchors
so that when those HTML files are displayed in the Dash macOS app,
the sections are displayed in a Dash TOC on the right.
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import argparse
import logging
import re
import pathlib
import unicodedata
from urllib.parse import quote
from bs4 import BeautifulSoup
# pre-define the list of HTML files we want to miodify
FILES_TO_MODIFY = ["advanced_usage.html", "api.html", "contributing.html",
"custom_notebooks.html", "evaluation.html",
"getting_started.html", "pipeline.html",
"internal.html", "tutorial.html",
"usage_rsmtool.html", "utilities.html", "who.html"]
PILCROW = unicodedata.lookup('PILCROW SIGN')
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog='add_dash_anchors.py')
parser.add_argument("htmldir",
type=pathlib.Path,
help="path to the already-built HTML documentation")
# parse given command line arguments
args = parser.parse_args()
# set up the logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# iterate over all the built HTML files
for htmlfile in args.htmldir.glob("**/*.html"):
# we only care about the pre-specified html files
if htmlfile.name in FILES_TO_MODIFY:
logging.info(f'Processing {htmlfile.name} ...')
# parse the file
with open(htmlfile, 'r') as htmlfh:
soup = BeautifulSoup(htmlfh, features='html.parser')
# each HTML file has a main section which we do not need
# but we need _all_ of the other sections
sections = soup.body.div.find_all("div", class_="section")[1:]
for section in sections:
section_title = section.find(re.compile(r'^h[0-9]')).text
section_title = section_title.rstrip(PILCROW)
# convert this title to percent-encoded format which will be
# the name of our entry
entry_name = quote(section_title)
entry_type = 'Section'
anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}"
# create a new anchor tag for this subsection
anchor_tag = soup.new_tag('a',
attrs={'name': anchor_name,
'class': "dashAnchor"})
# insert this new tag right before the section
section.insert_before(anchor_tag)
# overwrite the original HTML file
with open(htmlfile, 'w') as outfh:
outfh.write(str(soup))
if __name__ == '__main__':
main()
|
Add script to insert Dash TOC anchors in HTML files.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add Dash-style anchors to already-generated HTML documentation.
This script iterates over pre-specified HTML files generated via
sphinx-build, finds all of the sections, and adds Dash-style anchors
so that when those HTML files are displayed in the Dash macOS app,
the sections are displayed in a Dash TOC on the right.
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import argparse
import logging
import re
import pathlib
import unicodedata
from urllib.parse import quote
from bs4 import BeautifulSoup
# pre-define the list of HTML files we want to miodify
FILES_TO_MODIFY = ["advanced_usage.html", "api.html", "contributing.html",
"custom_notebooks.html", "evaluation.html",
"getting_started.html", "pipeline.html",
"internal.html", "tutorial.html",
"usage_rsmtool.html", "utilities.html", "who.html"]
PILCROW = unicodedata.lookup('PILCROW SIGN')
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog='add_dash_anchors.py')
parser.add_argument("htmldir",
type=pathlib.Path,
help="path to the already-built HTML documentation")
# parse given command line arguments
args = parser.parse_args()
# set up the logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# iterate over all the built HTML files
for htmlfile in args.htmldir.glob("**/*.html"):
# we only care about the pre-specified html files
if htmlfile.name in FILES_TO_MODIFY:
logging.info(f'Processing {htmlfile.name} ...')
# parse the file
with open(htmlfile, 'r') as htmlfh:
soup = BeautifulSoup(htmlfh, features='html.parser')
# each HTML file has a main section which we do not need
# but we need _all_ of the other sections
sections = soup.body.div.find_all("div", class_="section")[1:]
for section in sections:
section_title = section.find(re.compile(r'^h[0-9]')).text
section_title = section_title.rstrip(PILCROW)
# convert this title to percent-encoded format which will be
# the name of our entry
entry_name = quote(section_title)
entry_type = 'Section'
anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}"
# create a new anchor tag for this subsection
anchor_tag = soup.new_tag('a',
attrs={'name': anchor_name,
'class': "dashAnchor"})
# insert this new tag right before the section
section.insert_before(anchor_tag)
# overwrite the original HTML file
with open(htmlfile, 'w') as outfh:
outfh.write(str(soup))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to insert Dash TOC anchors in HTML files.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Add Dash-style anchors to already-generated HTML documentation.
This script iterates over pre-specified HTML files generated via
sphinx-build, finds all of the sections, and adds Dash-style anchors
so that when those HTML files are displayed in the Dash macOS app,
the sections are displayed in a Dash TOC on the right.
:author: Nitin Madnani (nmadnani@ets.org)
:organization: ETS
"""
import argparse
import logging
import re
import pathlib
import unicodedata
from urllib.parse import quote
from bs4 import BeautifulSoup
# pre-define the list of HTML files we want to miodify
FILES_TO_MODIFY = ["advanced_usage.html", "api.html", "contributing.html",
"custom_notebooks.html", "evaluation.html",
"getting_started.html", "pipeline.html",
"internal.html", "tutorial.html",
"usage_rsmtool.html", "utilities.html", "who.html"]
PILCROW = unicodedata.lookup('PILCROW SIGN')
def main(): # noqa: D103
# set up an argument parser
parser = argparse.ArgumentParser(prog='add_dash_anchors.py')
parser.add_argument("htmldir",
type=pathlib.Path,
help="path to the already-built HTML documentation")
# parse given command line arguments
args = parser.parse_args()
# set up the logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
# iterate over all the built HTML files
for htmlfile in args.htmldir.glob("**/*.html"):
# we only care about the pre-specified html files
if htmlfile.name in FILES_TO_MODIFY:
logging.info(f'Processing {htmlfile.name} ...')
# parse the file
with open(htmlfile, 'r') as htmlfh:
soup = BeautifulSoup(htmlfh, features='html.parser')
# each HTML file has a main section which we do not need
# but we need _all_ of the other sections
sections = soup.body.div.find_all("div", class_="section")[1:]
for section in sections:
section_title = section.find(re.compile(r'^h[0-9]')).text
section_title = section_title.rstrip(PILCROW)
# convert this title to percent-encoded format which will be
# the name of our entry
entry_name = quote(section_title)
entry_type = 'Section'
anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}"
# create a new anchor tag for this subsection
anchor_tag = soup.new_tag('a',
attrs={'name': anchor_name,
'class': "dashAnchor"})
# insert this new tag right before the section
section.insert_before(anchor_tag)
# overwrite the original HTML file
with open(htmlfile, 'w') as outfh:
outfh.write(str(soup))
if __name__ == '__main__':
main()
|
|
42301c24aa05c048e14c0f5c3ec42b13211d1e59
|
tests/unit/test_model_core.py
|
tests/unit/test_model_core.py
|
# -*- coding: UTF-8 -*-
"""
(Additional) Unit tests for :mod:`behave.model_core` module.
"""
from __future__ import print_function
from behave.model_core import Status
import pytest
# -----------------------------------------------------------------------------
# TESTS:
# -----------------------------------------------------------------------------
class TestStatus(object):
"""Test Status enum class.
In addition, checks if it is partly backward compatibility to
string-based status.
EXAMPLE::
status = Status.passed
assert status == "passed"
assert status != "failed"
assert status == Status.from_name("passed")
"""
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_string_value(self, enum_value):
"""Ensure that Status enum value can be compared with a string-status"""
assert enum_value == enum_value.name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_unknown_name(self, enum_value):
assert enum_value != "__UNKNOWN__"
assert not (enum_value == "__UNKNOWN__")
@pytest.mark.parametrize("enum_value, similar_name", [
(Status.passed, "Passed"),
(Status.failed, "FAILED"),
(Status.passed, "passed1"),
(Status.failed, "failed2"),
])
def test_equals__with_similar_name(self, enum_value, similar_name):
assert enum_value != similar_name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_from_name__with_known_names(self, enum_value):
assert enum_value == Status.from_name(enum_value.name)
@pytest.mark.parametrize("unknown_name", [
"Passed", "Failed", "passed2", "failed1"
])
def test_from_name__with_unknown_name_raises_lookuperror(self, unknown_name):
with pytest.raises(LookupError):
Status.from_name(unknown_name)
|
Add missing test for Status compatibility
|
Add missing test for Status compatibility
|
Python
|
bsd-2-clause
|
jenisys/behave,Abdoctor/behave,jenisys/behave,Abdoctor/behave
|
Add missing test for Status compatibility
|
# -*- coding: UTF-8 -*-
"""
(Additional) Unit tests for :mod:`behave.model_core` module.
"""
from __future__ import print_function
from behave.model_core import Status
import pytest
# -----------------------------------------------------------------------------
# TESTS:
# -----------------------------------------------------------------------------
class TestStatus(object):
"""Test Status enum class.
In addition, checks if it is partly backward compatibility to
string-based status.
EXAMPLE::
status = Status.passed
assert status == "passed"
assert status != "failed"
assert status == Status.from_name("passed")
"""
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_string_value(self, enum_value):
"""Ensure that Status enum value can be compared with a string-status"""
assert enum_value == enum_value.name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_unknown_name(self, enum_value):
assert enum_value != "__UNKNOWN__"
assert not (enum_value == "__UNKNOWN__")
@pytest.mark.parametrize("enum_value, similar_name", [
(Status.passed, "Passed"),
(Status.failed, "FAILED"),
(Status.passed, "passed1"),
(Status.failed, "failed2"),
])
def test_equals__with_similar_name(self, enum_value, similar_name):
assert enum_value != similar_name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_from_name__with_known_names(self, enum_value):
assert enum_value == Status.from_name(enum_value.name)
@pytest.mark.parametrize("unknown_name", [
"Passed", "Failed", "passed2", "failed1"
])
def test_from_name__with_unknown_name_raises_lookuperror(self, unknown_name):
with pytest.raises(LookupError):
Status.from_name(unknown_name)
|
<commit_before><commit_msg>Add missing test for Status compatibility<commit_after>
|
# -*- coding: UTF-8 -*-
"""
(Additional) Unit tests for :mod:`behave.model_core` module.
"""
from __future__ import print_function
from behave.model_core import Status
import pytest
# -----------------------------------------------------------------------------
# TESTS:
# -----------------------------------------------------------------------------
class TestStatus(object):
"""Test Status enum class.
In addition, checks if it is partly backward compatibility to
string-based status.
EXAMPLE::
status = Status.passed
assert status == "passed"
assert status != "failed"
assert status == Status.from_name("passed")
"""
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_string_value(self, enum_value):
"""Ensure that Status enum value can be compared with a string-status"""
assert enum_value == enum_value.name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_unknown_name(self, enum_value):
assert enum_value != "__UNKNOWN__"
assert not (enum_value == "__UNKNOWN__")
@pytest.mark.parametrize("enum_value, similar_name", [
(Status.passed, "Passed"),
(Status.failed, "FAILED"),
(Status.passed, "passed1"),
(Status.failed, "failed2"),
])
def test_equals__with_similar_name(self, enum_value, similar_name):
assert enum_value != similar_name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_from_name__with_known_names(self, enum_value):
assert enum_value == Status.from_name(enum_value.name)
@pytest.mark.parametrize("unknown_name", [
"Passed", "Failed", "passed2", "failed1"
])
def test_from_name__with_unknown_name_raises_lookuperror(self, unknown_name):
with pytest.raises(LookupError):
Status.from_name(unknown_name)
|
Add missing test for Status compatibility# -*- coding: UTF-8 -*-
"""
(Additional) Unit tests for :mod:`behave.model_core` module.
"""
from __future__ import print_function
from behave.model_core import Status
import pytest
# -----------------------------------------------------------------------------
# TESTS:
# -----------------------------------------------------------------------------
class TestStatus(object):
"""Test Status enum class.
In addition, checks if it is partly backward compatibility to
string-based status.
EXAMPLE::
status = Status.passed
assert status == "passed"
assert status != "failed"
assert status == Status.from_name("passed")
"""
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_string_value(self, enum_value):
"""Ensure that Status enum value can be compared with a string-status"""
assert enum_value == enum_value.name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_unknown_name(self, enum_value):
assert enum_value != "__UNKNOWN__"
assert not (enum_value == "__UNKNOWN__")
@pytest.mark.parametrize("enum_value, similar_name", [
(Status.passed, "Passed"),
(Status.failed, "FAILED"),
(Status.passed, "passed1"),
(Status.failed, "failed2"),
])
def test_equals__with_similar_name(self, enum_value, similar_name):
assert enum_value != similar_name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_from_name__with_known_names(self, enum_value):
assert enum_value == Status.from_name(enum_value.name)
@pytest.mark.parametrize("unknown_name", [
"Passed", "Failed", "passed2", "failed1"
])
def test_from_name__with_unknown_name_raises_lookuperror(self, unknown_name):
with pytest.raises(LookupError):
Status.from_name(unknown_name)
|
<commit_before><commit_msg>Add missing test for Status compatibility<commit_after># -*- coding: UTF-8 -*-
"""
(Additional) Unit tests for :mod:`behave.model_core` module.
"""
from __future__ import print_function
from behave.model_core import Status
import pytest
# -----------------------------------------------------------------------------
# TESTS:
# -----------------------------------------------------------------------------
class TestStatus(object):
"""Test Status enum class.
In addition, checks if it is partly backward compatibility to
string-based status.
EXAMPLE::
status = Status.passed
assert status == "passed"
assert status != "failed"
assert status == Status.from_name("passed")
"""
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_string_value(self, enum_value):
"""Ensure that Status enum value can be compared with a string-status"""
assert enum_value == enum_value.name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_equals__with_unknown_name(self, enum_value):
assert enum_value != "__UNKNOWN__"
assert not (enum_value == "__UNKNOWN__")
@pytest.mark.parametrize("enum_value, similar_name", [
(Status.passed, "Passed"),
(Status.failed, "FAILED"),
(Status.passed, "passed1"),
(Status.failed, "failed2"),
])
def test_equals__with_similar_name(self, enum_value, similar_name):
assert enum_value != similar_name
@pytest.mark.parametrize("enum_value", list(Status.__members__.values()))
def test_from_name__with_known_names(self, enum_value):
assert enum_value == Status.from_name(enum_value.name)
@pytest.mark.parametrize("unknown_name", [
"Passed", "Failed", "passed2", "failed1"
])
def test_from_name__with_unknown_name_raises_lookuperror(self, unknown_name):
with pytest.raises(LookupError):
Status.from_name(unknown_name)
|
|
1721d2badb2168f79587d8c018ca65d89733da88
|
tests/test_keras.py
|
tests/test_keras.py
|
from __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras.backend as K
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
# wrapper of ctc_cost
def ctc_cost(y_true, y_pred):
'''
CTC cost:
a theano wrapper for warp-ctc
Arguments:
y_true : label
y_pred : acts
'''
from theano_ctc import ctc_cost as warp_ctc_cost
# convert (batch size, timestep, target) to (timestep, batch size, target)
acts = K.permute_dimensions(y_pred, (1, 0, 2))
labels = K.cast(K.squeeze(y_true, axis=2), 'int32')
return warp_ctc_cost(acts, labels)
batch_size = 16
frame_len = 80
nb_feat = 120
nb_class = 36
nb_output = nb_class + 1 # add output for blank
inner_dim = 512
nb_cell = 1024
print("Building model...")
model = Sequential()
model.add(LSTM(inner_dim, input_shape = (frame_len, nb_feat), return_sequences = True))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(nb_output)))
model.summary()
# Compiling
opt = SGD(lr = 1e-5, momentum = 0.9, nesterov = True)
model.compile(optimizer = opt, loss = ctc_cost, sample_weight_mode = None)
# Generate dummy data
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat))
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, frame_len, 1))
# Training
model.fit(data, label, nb_epoch = 5, batch_size = batch_size)
|
Add simple test for Keras
|
Add simple test for Keras
|
Python
|
bsd-3-clause
|
mcf06/theano_ctc
|
Add simple test for Keras
|
from __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras.backend as K
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
# wrapper of ctc_cost
def ctc_cost(y_true, y_pred):
'''
CTC cost:
a theano wrapper for warp-ctc
Arguments:
y_true : label
y_pred : acts
'''
from theano_ctc import ctc_cost as warp_ctc_cost
# convert (batch size, timestep, target) to (timestep, batch size, target)
acts = K.permute_dimensions(y_pred, (1, 0, 2))
labels = K.cast(K.squeeze(y_true, axis=2), 'int32')
return warp_ctc_cost(acts, labels)
batch_size = 16
frame_len = 80
nb_feat = 120
nb_class = 36
nb_output = nb_class + 1 # add output for blank
inner_dim = 512
nb_cell = 1024
print("Building model...")
model = Sequential()
model.add(LSTM(inner_dim, input_shape = (frame_len, nb_feat), return_sequences = True))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(nb_output)))
model.summary()
# Compiling
opt = SGD(lr = 1e-5, momentum = 0.9, nesterov = True)
model.compile(optimizer = opt, loss = ctc_cost, sample_weight_mode = None)
# Generate dummy data
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat))
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, frame_len, 1))
# Training
model.fit(data, label, nb_epoch = 5, batch_size = batch_size)
|
<commit_before><commit_msg>Add simple test for Keras<commit_after>
|
from __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras.backend as K
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
# wrapper of ctc_cost
def ctc_cost(y_true, y_pred):
'''
CTC cost:
a theano wrapper for warp-ctc
Arguments:
y_true : label
y_pred : acts
'''
from theano_ctc import ctc_cost as warp_ctc_cost
# convert (batch size, timestep, target) to (timestep, batch size, target)
acts = K.permute_dimensions(y_pred, (1, 0, 2))
labels = K.cast(K.squeeze(y_true, axis=2), 'int32')
return warp_ctc_cost(acts, labels)
batch_size = 16
frame_len = 80
nb_feat = 120
nb_class = 36
nb_output = nb_class + 1 # add output for blank
inner_dim = 512
nb_cell = 1024
print("Building model...")
model = Sequential()
model.add(LSTM(inner_dim, input_shape = (frame_len, nb_feat), return_sequences = True))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(nb_output)))
model.summary()
# Compiling
opt = SGD(lr = 1e-5, momentum = 0.9, nesterov = True)
model.compile(optimizer = opt, loss = ctc_cost, sample_weight_mode = None)
# Generate dummy data
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat))
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, frame_len, 1))
# Training
model.fit(data, label, nb_epoch = 5, batch_size = batch_size)
|
Add simple test for Kerasfrom __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras.backend as K
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
# wrapper of ctc_cost
def ctc_cost(y_true, y_pred):
'''
CTC cost:
a theano wrapper for warp-ctc
Arguments:
y_true : label
y_pred : acts
'''
from theano_ctc import ctc_cost as warp_ctc_cost
# convert (batch size, timestep, target) to (timestep, batch size, target)
acts = K.permute_dimensions(y_pred, (1, 0, 2))
labels = K.cast(K.squeeze(y_true, axis=2), 'int32')
return warp_ctc_cost(acts, labels)
batch_size = 16
frame_len = 80
nb_feat = 120
nb_class = 36
nb_output = nb_class + 1 # add output for blank
inner_dim = 512
nb_cell = 1024
print("Building model...")
model = Sequential()
model.add(LSTM(inner_dim, input_shape = (frame_len, nb_feat), return_sequences = True))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(nb_output)))
model.summary()
# Compiling
opt = SGD(lr = 1e-5, momentum = 0.9, nesterov = True)
model.compile(optimizer = opt, loss = ctc_cost, sample_weight_mode = None)
# Generate dummy data
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat))
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, frame_len, 1))
# Training
model.fit(data, label, nb_epoch = 5, batch_size = batch_size)
|
<commit_before><commit_msg>Add simple test for Keras<commit_after>from __future__ import print_function
import numpy as np
np.random.seed(1337)
import keras.backend as K
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
# wrapper of ctc_cost
def ctc_cost(y_true, y_pred):
'''
CTC cost:
a theano wrapper for warp-ctc
Arguments:
y_true : label
y_pred : acts
'''
from theano_ctc import ctc_cost as warp_ctc_cost
# convert (batch size, timestep, target) to (timestep, batch size, target)
acts = K.permute_dimensions(y_pred, (1, 0, 2))
labels = K.cast(K.squeeze(y_true, axis=2), 'int32')
return warp_ctc_cost(acts, labels)
batch_size = 16
frame_len = 80
nb_feat = 120
nb_class = 36
nb_output = nb_class + 1 # add output for blank
inner_dim = 512
nb_cell = 1024
print("Building model...")
model = Sequential()
model.add(LSTM(inner_dim, input_shape = (frame_len, nb_feat), return_sequences = True))
model.add(BatchNormalization())
model.add(TimeDistributed(Dense(nb_output)))
model.summary()
# Compiling
opt = SGD(lr = 1e-5, momentum = 0.9, nesterov = True)
model.compile(optimizer = opt, loss = ctc_cost, sample_weight_mode = None)
# Generate dummy data
data = np.random.uniform(low = -5, high = 5, size = (batch_size, frame_len, nb_feat))
# Dummy labels in range [1,nb_class]. 0 = <blank>
label = 1 + np.random.randint(nb_class, size = (batch_size, frame_len, 1))
# Training
model.fit(data, label, nb_epoch = 5, batch_size = batch_size)
|
|
9548c4411938397b4f2d8a7b49b46cdc6aca0a3b
|
powerline/segments/i3wm.py
|
powerline/segments/i3wm.py
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = ["workspace"]
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
if w['focused']: return "w_focused"
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = []
if w['focused']: group.append( 'w_focused' )
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
group.append( 'workspace' )
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
Fix highlighting groups for workspaces segment
|
Fix highlighting groups for workspaces segment
|
Python
|
mit
|
Luffin/powerline,Luffin/powerline,areteix/powerline,cyrixhero/powerline,darac/powerline,russellb/powerline,bezhermoso/powerline,QuLogic/powerline,xfumihiro/powerline,prvnkumar/powerline,lukw00/powerline,bartvm/powerline,wfscheper/powerline,xxxhycl2010/powerline,s0undt3ch/powerline,blindFS/powerline,areteix/powerline,kenrachynski/powerline,blindFS/powerline,IvanAli/powerline,s0undt3ch/powerline,seanfisk/powerline,magus424/powerline,s0undt3ch/powerline,EricSB/powerline,IvanAli/powerline,Liangjianghao/powerline,russellb/powerline,wfscheper/powerline,xxxhycl2010/powerline,xxxhycl2010/powerline,QuLogic/powerline,wfscheper/powerline,bezhermoso/powerline,seanfisk/powerline,darac/powerline,prvnkumar/powerline,cyrixhero/powerline,junix/powerline,kenrachynski/powerline,Luffin/powerline,bezhermoso/powerline,blindFS/powerline,firebitsbr/powerline,dragon788/powerline,S0lll0s/powerline,dragon788/powerline,russellb/powerline,kenrachynski/powerline,Liangjianghao/powerline,magus424/powerline,cyrixhero/powerline,EricSB/powerline,dragon788/powerline,keelerm84/powerline,firebitsbr/powerline,lukw00/powerline,bartvm/powerline,lukw00/powerline,EricSB/powerline,junix/powerline,S0lll0s/powerline,bartvm/powerline,DoctorJellyface/powerline,xfumihiro/powerline,seanfisk/powerline,keelerm84/powerline,firebitsbr/powerline,junix/powerline,Liangjianghao/powerline,S0lll0s/powerline,DoctorJellyface/powerline,areteix/powerline,QuLogic/powerline,DoctorJellyface/powerline,magus424/powerline,IvanAli/powerline,prvnkumar/powerline,xfumihiro/powerline,darac/powerline
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = ["workspace"]
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
if w['focused']: return "w_focused"
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
Fix highlighting groups for workspaces segment
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = []
if w['focused']: group.append( 'w_focused' )
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
group.append( 'workspace' )
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
<commit_before># vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = ["workspace"]
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
if w['focused']: return "w_focused"
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
<commit_msg>Fix highlighting groups for workspaces segment<commit_after>
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = []
if w['focused']: group.append( 'w_focused' )
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
group.append( 'workspace' )
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = ["workspace"]
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
if w['focused']: return "w_focused"
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
Fix highlighting groups for workspaces segment# vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = []
if w['focused']: group.append( 'w_focused' )
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
group.append( 'workspace' )
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
<commit_before># vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = ["workspace"]
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
if w['focused']: return "w_focused"
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
<commit_msg>Fix highlighting groups for workspaces segment<commit_after># vim:fileencoding=utf-8:noet
from powerline.theme import requires_segment_info
import i3
def calcgrp( w ):
group = []
if w['focused']: group.append( 'w_focused' )
if w['urgent']: group.append( 'w_urgent' )
if w['visible']: group.append( 'w_visible' )
group.append( 'workspace' )
return group
def workspaces( pl ):
'''Return workspace list
Highlight groups used: ``workspace``, ``w_visible``, ``w_focused``, ``w_urgent``
'''
return [ {'contents': w['name'], 'highlight_group': calcgrp( w )} for w in i3.get_workspaces() ]
|
b9c2043489541c0eb55c171201f2908068f761fe
|
tests/test_schema_keywords.py
|
tests/test_schema_keywords.py
|
from . import PREFIX, CONN_INFO
import datajoint as dj
from nose.tools import assert_true
schema = dj.schema(PREFIX + '_keywords', locals(), connection=dj.conn(**CONN_INFO))
class A(dj.Manual):
definition = """
a_id: int # a id
"""
class B(dj.Manual):
source = None
definition = """
-> self.source
b_id: int # b id
"""
class C(dj.Part):
definition = """
-> master
name: varchar(128) # name
"""
class D(B):
source = A
def setup():
nonlocal A
nonlocal D
A = schema(A)
D = schema(D)
def teardown():
schema.drop(force=True)
def test_inherited_part_table():
assert_true('a_id' in D().heading.attributes)
assert_true('b_id' in D().heading.attributes)
assert_true('a_id' in D.C().heading.attributes)
assert_true('b_id' in D.C().heading.attributes)
assert_true('name' in D.C().heading.attributes)
|
Add tests for use of keywords in definition and using inherited Part table
|
Add tests for use of keywords in definition and using inherited Part table
|
Python
|
lgpl-2.1
|
dimitri-yatsenko/datajoint-python,eywalker/datajoint-python,datajoint/datajoint-python,fabiansinz/datajoint-python
|
Add tests for use of keywords in definition and using inherited Part table
|
from . import PREFIX, CONN_INFO
import datajoint as dj
from nose.tools import assert_true
schema = dj.schema(PREFIX + '_keywords', locals(), connection=dj.conn(**CONN_INFO))
class A(dj.Manual):
definition = """
a_id: int # a id
"""
class B(dj.Manual):
source = None
definition = """
-> self.source
b_id: int # b id
"""
class C(dj.Part):
definition = """
-> master
name: varchar(128) # name
"""
class D(B):
source = A
def setup():
nonlocal A
nonlocal D
A = schema(A)
D = schema(D)
def teardown():
schema.drop(force=True)
def test_inherited_part_table():
assert_true('a_id' in D().heading.attributes)
assert_true('b_id' in D().heading.attributes)
assert_true('a_id' in D.C().heading.attributes)
assert_true('b_id' in D.C().heading.attributes)
assert_true('name' in D.C().heading.attributes)
|
<commit_before><commit_msg>Add tests for use of keywords in definition and using inherited Part table<commit_after>
|
from . import PREFIX, CONN_INFO
import datajoint as dj
from nose.tools import assert_true
schema = dj.schema(PREFIX + '_keywords', locals(), connection=dj.conn(**CONN_INFO))
class A(dj.Manual):
definition = """
a_id: int # a id
"""
class B(dj.Manual):
source = None
definition = """
-> self.source
b_id: int # b id
"""
class C(dj.Part):
definition = """
-> master
name: varchar(128) # name
"""
class D(B):
source = A
def setup():
nonlocal A
nonlocal D
A = schema(A)
D = schema(D)
def teardown():
schema.drop(force=True)
def test_inherited_part_table():
assert_true('a_id' in D().heading.attributes)
assert_true('b_id' in D().heading.attributes)
assert_true('a_id' in D.C().heading.attributes)
assert_true('b_id' in D.C().heading.attributes)
assert_true('name' in D.C().heading.attributes)
|
Add tests for use of keywords in definition and using inherited Part tablefrom . import PREFIX, CONN_INFO
import datajoint as dj
from nose.tools import assert_true
schema = dj.schema(PREFIX + '_keywords', locals(), connection=dj.conn(**CONN_INFO))
class A(dj.Manual):
definition = """
a_id: int # a id
"""
class B(dj.Manual):
source = None
definition = """
-> self.source
b_id: int # b id
"""
class C(dj.Part):
definition = """
-> master
name: varchar(128) # name
"""
class D(B):
source = A
def setup():
nonlocal A
nonlocal D
A = schema(A)
D = schema(D)
def teardown():
schema.drop(force=True)
def test_inherited_part_table():
assert_true('a_id' in D().heading.attributes)
assert_true('b_id' in D().heading.attributes)
assert_true('a_id' in D.C().heading.attributes)
assert_true('b_id' in D.C().heading.attributes)
assert_true('name' in D.C().heading.attributes)
|
<commit_before><commit_msg>Add tests for use of keywords in definition and using inherited Part table<commit_after>from . import PREFIX, CONN_INFO
import datajoint as dj
from nose.tools import assert_true
schema = dj.schema(PREFIX + '_keywords', locals(), connection=dj.conn(**CONN_INFO))
class A(dj.Manual):
definition = """
a_id: int # a id
"""
class B(dj.Manual):
source = None
definition = """
-> self.source
b_id: int # b id
"""
class C(dj.Part):
definition = """
-> master
name: varchar(128) # name
"""
class D(B):
source = A
def setup():
nonlocal A
nonlocal D
A = schema(A)
D = schema(D)
def teardown():
schema.drop(force=True)
def test_inherited_part_table():
assert_true('a_id' in D().heading.attributes)
assert_true('b_id' in D().heading.attributes)
assert_true('a_id' in D.C().heading.attributes)
assert_true('b_id' in D.C().heading.attributes)
assert_true('name' in D.C().heading.attributes)
|
|
75043e0b91fe89d9be064ec65b7870f58f273c3d
|
python/simpleaudio_test.py
|
python/simpleaudio_test.py
|
import simpleaudio as sa
import time
import sys
wave_obj = sa.WaveObject.from_wave_file(sys.argv[1])
#for i in range(1000):
#play_obj = wave_obj.play()
#time.sleep(0.001)
play_obj = wave_obj.play()
play_obj.wait_done()
|
Add simpleaudio test play script
|
Add simpleaudio test play script
|
Python
|
mit
|
aapris/CernWall,aapris/CernWall
|
Add simpleaudio test play script
|
import simpleaudio as sa
import time
import sys
wave_obj = sa.WaveObject.from_wave_file(sys.argv[1])
#for i in range(1000):
#play_obj = wave_obj.play()
#time.sleep(0.001)
play_obj = wave_obj.play()
play_obj.wait_done()
|
<commit_before><commit_msg>Add simpleaudio test play script<commit_after>
|
import simpleaudio as sa
import time
import sys
wave_obj = sa.WaveObject.from_wave_file(sys.argv[1])
#for i in range(1000):
#play_obj = wave_obj.play()
#time.sleep(0.001)
play_obj = wave_obj.play()
play_obj.wait_done()
|
Add simpleaudio test play scriptimport simpleaudio as sa
import time
import sys
wave_obj = sa.WaveObject.from_wave_file(sys.argv[1])
#for i in range(1000):
#play_obj = wave_obj.play()
#time.sleep(0.001)
play_obj = wave_obj.play()
play_obj.wait_done()
|
<commit_before><commit_msg>Add simpleaudio test play script<commit_after>import simpleaudio as sa
import time
import sys
wave_obj = sa.WaveObject.from_wave_file(sys.argv[1])
#for i in range(1000):
#play_obj = wave_obj.play()
#time.sleep(0.001)
play_obj = wave_obj.play()
play_obj.wait_done()
|
|
6c0b0ea9b6e8ecf8ea1b1185ce7d17d12e9d6976
|
samples/scheduled_poweroff.py
|
samples/scheduled_poweroff.py
|
#!/usr/bin/env python
"""
Written by Gaël Berthaud-Müller
Github : https://github.com/blacksponge
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example code for using the task scheduler.
"""
import atexit
import argparse
import getpass
from datetime import datetime
from pyVmomi import vim
from pyVim import connect
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for scheduling a poweroff of a virtual machine')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--date', required=True, action='store',
help='Date and time used to create the scheduled task '
'with the format d/m/Y H:M')
parser.add_argument('-n', '--vmname', required=True, action='store',
help='VM name on which the action will be performed')
args = parser.parse_args()
return args
def main():
args = get_args()
try:
dt = datetime.strptime(args.date, '%d/%m/%Y %H:%M')
except ValueError:
print('Unrecognized date format')
raise
return -1
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except vim.fault.InvalidLogin:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, si)
view = si.content.viewManager.CreateContainerView(si.content.rootFolder,
[vim.VirtualMachine],
True)
vms = [vm for vm in view.view if vm.name == args.vmname]
if not vms:
print('VM not found')
connect.Disconnect(si)
return -1
vm = vms[0]
spec = vim.scheduler.ScheduledTaskSpec()
spec.name = 'PowerOff vm %s' % args.vmname
spec.description = ''
spec.scheduler = vim.scheduler.OnceTaskScheduler()
spec.scheduler.runAt = dt
spec.action = vim.action.MethodAction()
spec.action.name = vim.VirtualMachine.PowerOff
spec.enabled = True
si.content.scheduledTaskManager.CreateScheduledTask(vm, spec)
if __name__ == "__main__":
main()
|
Add sample for using the task scheduler
|
Add sample for using the task scheduler
|
Python
|
apache-2.0
|
pathcl/pyvmomi-community-samples,vmware/pyvmomi-community-samples,prziborowski/pyvmomi-community-samples,jm66/pyvmomi-community-samples
|
Add sample for using the task scheduler
|
#!/usr/bin/env python
"""
Written by Gaël Berthaud-Müller
Github : https://github.com/blacksponge
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example code for using the task scheduler.
"""
import atexit
import argparse
import getpass
from datetime import datetime
from pyVmomi import vim
from pyVim import connect
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for scheduling a poweroff of a virtual machine')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--date', required=True, action='store',
help='Date and time used to create the scheduled task '
'with the format d/m/Y H:M')
parser.add_argument('-n', '--vmname', required=True, action='store',
help='VM name on which the action will be performed')
args = parser.parse_args()
return args
def main():
args = get_args()
try:
dt = datetime.strptime(args.date, '%d/%m/%Y %H:%M')
except ValueError:
print('Unrecognized date format')
raise
return -1
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except vim.fault.InvalidLogin:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, si)
view = si.content.viewManager.CreateContainerView(si.content.rootFolder,
[vim.VirtualMachine],
True)
vms = [vm for vm in view.view if vm.name == args.vmname]
if not vms:
print('VM not found')
connect.Disconnect(si)
return -1
vm = vms[0]
spec = vim.scheduler.ScheduledTaskSpec()
spec.name = 'PowerOff vm %s' % args.vmname
spec.description = ''
spec.scheduler = vim.scheduler.OnceTaskScheduler()
spec.scheduler.runAt = dt
spec.action = vim.action.MethodAction()
spec.action.name = vim.VirtualMachine.PowerOff
spec.enabled = True
si.content.scheduledTaskManager.CreateScheduledTask(vm, spec)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add sample for using the task scheduler<commit_after>
|
#!/usr/bin/env python
"""
Written by Gaël Berthaud-Müller
Github : https://github.com/blacksponge
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example code for using the task scheduler.
"""
import atexit
import argparse
import getpass
from datetime import datetime
from pyVmomi import vim
from pyVim import connect
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for scheduling a poweroff of a virtual machine')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--date', required=True, action='store',
help='Date and time used to create the scheduled task '
'with the format d/m/Y H:M')
parser.add_argument('-n', '--vmname', required=True, action='store',
help='VM name on which the action will be performed')
args = parser.parse_args()
return args
def main():
args = get_args()
try:
dt = datetime.strptime(args.date, '%d/%m/%Y %H:%M')
except ValueError:
print('Unrecognized date format')
raise
return -1
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except vim.fault.InvalidLogin:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, si)
view = si.content.viewManager.CreateContainerView(si.content.rootFolder,
[vim.VirtualMachine],
True)
vms = [vm for vm in view.view if vm.name == args.vmname]
if not vms:
print('VM not found')
connect.Disconnect(si)
return -1
vm = vms[0]
spec = vim.scheduler.ScheduledTaskSpec()
spec.name = 'PowerOff vm %s' % args.vmname
spec.description = ''
spec.scheduler = vim.scheduler.OnceTaskScheduler()
spec.scheduler.runAt = dt
spec.action = vim.action.MethodAction()
spec.action.name = vim.VirtualMachine.PowerOff
spec.enabled = True
si.content.scheduledTaskManager.CreateScheduledTask(vm, spec)
if __name__ == "__main__":
main()
|
Add sample for using the task scheduler#!/usr/bin/env python
"""
Written by Gaël Berthaud-Müller
Github : https://github.com/blacksponge
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example code for using the task scheduler.
"""
import atexit
import argparse
import getpass
from datetime import datetime
from pyVmomi import vim
from pyVim import connect
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for scheduling a poweroff of a virtual machine')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--date', required=True, action='store',
help='Date and time used to create the scheduled task '
'with the format d/m/Y H:M')
parser.add_argument('-n', '--vmname', required=True, action='store',
help='VM name on which the action will be performed')
args = parser.parse_args()
return args
def main():
args = get_args()
try:
dt = datetime.strptime(args.date, '%d/%m/%Y %H:%M')
except ValueError:
print('Unrecognized date format')
raise
return -1
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except vim.fault.InvalidLogin:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, si)
view = si.content.viewManager.CreateContainerView(si.content.rootFolder,
[vim.VirtualMachine],
True)
vms = [vm for vm in view.view if vm.name == args.vmname]
if not vms:
print('VM not found')
connect.Disconnect(si)
return -1
vm = vms[0]
spec = vim.scheduler.ScheduledTaskSpec()
spec.name = 'PowerOff vm %s' % args.vmname
spec.description = ''
spec.scheduler = vim.scheduler.OnceTaskScheduler()
spec.scheduler.runAt = dt
spec.action = vim.action.MethodAction()
spec.action.name = vim.VirtualMachine.PowerOff
spec.enabled = True
si.content.scheduledTaskManager.CreateScheduledTask(vm, spec)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add sample for using the task scheduler<commit_after>#!/usr/bin/env python
"""
Written by Gaël Berthaud-Müller
Github : https://github.com/blacksponge
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Example code for using the task scheduler.
"""
import atexit
import argparse
import getpass
from datetime import datetime
from pyVmomi import vim
from pyVim import connect
def get_args():
parser = argparse.ArgumentParser(
description='Arguments for scheduling a poweroff of a virtual machine')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--date', required=True, action='store',
help='Date and time used to create the scheduled task '
'with the format d/m/Y H:M')
parser.add_argument('-n', '--vmname', required=True, action='store',
help='VM name on which the action will be performed')
args = parser.parse_args()
return args
def main():
args = get_args()
try:
dt = datetime.strptime(args.date, '%d/%m/%Y %H:%M')
except ValueError:
print('Unrecognized date format')
raise
return -1
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
try:
si = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except vim.fault.InvalidLogin:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(connect.Disconnect, si)
view = si.content.viewManager.CreateContainerView(si.content.rootFolder,
[vim.VirtualMachine],
True)
vms = [vm for vm in view.view if vm.name == args.vmname]
if not vms:
print('VM not found')
connect.Disconnect(si)
return -1
vm = vms[0]
spec = vim.scheduler.ScheduledTaskSpec()
spec.name = 'PowerOff vm %s' % args.vmname
spec.description = ''
spec.scheduler = vim.scheduler.OnceTaskScheduler()
spec.scheduler.runAt = dt
spec.action = vim.action.MethodAction()
spec.action.name = vim.VirtualMachine.PowerOff
spec.enabled = True
si.content.scheduledTaskManager.CreateScheduledTask(vm, spec)
if __name__ == "__main__":
main()
|
|
bae0666e929918923843995d56782baa5d7c5c33
|
overlay/DataManager.py
|
overlay/DataManager.py
|
import sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
|
Create class to manage data regions
|
Create class to manage data regions
This currently manages depth and temperature data only
|
Python
|
mit
|
thelonious/g2x,gizmo-cda/g2x,thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x
|
Create class to manage data regions
This currently manages depth and temperature data only
|
import sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
|
<commit_before><commit_msg>Create class to manage data regions
This currently manages depth and temperature data only<commit_after>
|
import sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
|
Create class to manage data regions
This currently manages depth and temperature data onlyimport sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
|
<commit_before><commit_msg>Create class to manage data regions
This currently manages depth and temperature data only<commit_after>import sqlite3
from DataRegion import DataRegion
SELECT_DEPTH = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','depth_feet')
"""
SELECT_TEMPERATURE = """
select
date, property, value
from
readings
where
device='Pressure/Temperature' and
property in ('running','fahrenheit')
"""
class DataManager:
def __init__(self):
self.depth_regions = []
self.temperature_regions = []
def load(self, db_file):
self.connection = sqlite3.connect(db_file)
self.depth_regions = self.process_query(SELECT_DEPTH)
self.temperature_regions = self.process_query(SELECT_TEMPERATURE)
self.connection.close()
def process_query(self, query):
running = False
regions = []
currentRegion = None
for (date, property, value) in self.connection.execute(query):
if property == "running":
running = value == 1.0
if running:
currentRegion = DataRegion()
regions.append(currentRegion)
else:
if currentRegion is not None and len(currentRegion.data) == 0:
regions.pop()
currentRegion = None
elif running:
currentRegion.addTimeData(date, value)
else:
print("hmm, got value, but we're not supposedly running")
return regions
def select_depths(self, start_time, end_time):
result = []
for region in self.depth_regions:
result.extend(region.dataInTimeRegion(start_time, end_time))
return result
|
|
1dcb41ba6444665a661fa425f07f3c1d2882d22f
|
src/python/BasicMapPartitions.py
|
src/python/BasicMapPartitions.py
|
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
>>> fetchCallSigns(b).size()
4
"""
import sys
import urllib3
from pyspark import SparkContext
def processCallSigns(signs):
"""Process call signs"""
http = urllib3.PoolManager()
requests = map(lambda x : http.request('GET', "http://qrzcq.com/call/" + x), signs)
return map(lambda x : x.data, requests)
def fetchCallSigns(input):
"""Fetch call signs"""
return input.mapPartitions(lambda callSigns : processCallSigns(callSigns))
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMapPartitions")
input = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
output = sorted(fetchCallSigns(input).collect())
for str in output:
print "%s " % (str)
|
Add a basic map partitions examples for python
|
Add a basic map partitions examples for python
|
Python
|
mit
|
noprom/learning-spark,feynman0825/learning-spark,qingkaikong/learning-spark-examples,jindalcastle/learning-spark,mohitsh/learning-spark,ramyasrigangula/learning-spark,SunGuo/learning-spark,mmirolim/learning-spark,junwucs/learning-spark,tengteng/learning-spark,ellis429/learning-spark,asarraf/learning-spark,shimizust/learning-spark,huydx/learning-spark,concerned3rdparty/learning-spark,dsdinter/learning-spark-examples,kod3r/learning-spark,UsterNes/learning-spark,holdenk/learning-spark-examples,jindalcastle/learning-spark,kpraveen420/learning-spark,noprom/learning-spark,qingkaikong/learning-spark-examples,DINESHKUMARMURUGAN/learning-spark,XiaoqingWang/learning-spark,baokunguo/learning-spark-examples,shimizust/learning-spark,UsterNes/learning-spark,huixiang/learning-spark,diogoaurelio/learning-spark,qingkaikong/learning-spark-examples,rex1100/learning-spark,databricks/learning-spark,XiaoqingWang/learning-spark,ramyasrigangula/learning-spark,junwucs/learning-spark,kpraveen420/learning-spark,dsdinter/learning-spark-examples,coursera4ashok/learning-spark,jaehyuk/learning-spark,ellis429/learning-spark-examples,kpraveen420/learning-spark,asarraf/learning-spark,concerned3rdparty/learning-spark,NBSW/learning-spark,mmirolim/learning-spark,junwucs/learning-spark,anjuncc/learning-spark-examples,databricks/learning-spark,negokaz/learning-spark,holdenk/learning-spark-examples,ellis429/learning-spark-examples,coursera4ashok/learning-spark,baokunguo/learning-spark-examples,NBSW/learning-spark,JerryTseng/learning-spark,feynman0825/learning-spark,zaxliu/learning-spark,UsterNes/learning-spark,negokaz/learning-spark,ramyasrigangula/learning-spark,ramyasrigangula/learning-spark,XiaoqingWang/learning-spark,DINESHKUMARMURUGAN/learning-spark,qingkaikong/learning-spark-examples,asarraf/learning-spark,rex1100/learning-spark,noprom/learning-spark,shimizust/learning-spark,obinsanni/learning-spark,mohitsh/learning-spark,gaoxuesong/learning-spark,dsdinter/learning-spark-examples,dsdinter/learning-spark-examples,DINESHKUMARMURUGAN/learning-spark,diogoaurelio/learning-spark,jindalcastle/learning-spark,GatsbyNewton/learning-spark,huydx/learning-spark,NBSW/learning-spark,noprom/learning-spark,ellis429/learning-spark,shimizust/learning-spark,noprom/learning-spark,holdenk/learning-spark-examples,jaehyuk/learning-spark,NBSW/learning-spark,jindalcastle/learning-spark,ellis429/learning-spark,concerned3rdparty/learning-spark,ellis429/learning-spark-examples,ellis429/learning-spark-examples,qingkaikong/learning-spark-examples,GatsbyNewton/learning-spark,kod3r/learning-spark,anjuncc/learning-spark-examples,huydx/learning-spark,JerryTseng/learning-spark,databricks/learning-spark,holdenk/learning-spark-examples,SunGuo/learning-spark,tengteng/learning-spark,ellis429/learning-spark-examples,diogoaurelio/learning-spark,obinsanni/learning-spark,jaehyuk/learning-spark,concerned3rdparty/learning-spark,tengteng/learning-spark,DINESHKUMARMURUGAN/learning-spark,mmirolim/learning-spark,baokunguo/learning-spark-examples,XiaoqingWang/learning-spark,rex1100/learning-spark,NBSW/learning-spark,negokaz/learning-spark,gaoxuesong/learning-spark,asarraf/learning-spark,zaxliu/learning-spark,dsdinter/learning-spark-examples,tengteng/learning-spark,ramyasrigangula/learning-spark,mohitsh/learning-spark,bhagatsingh/learning-spark,zaxliu/learning-spark,jaehyuk/learning-spark,bhagatsingh/learning-spark,zaxliu/learning-spark,coursera4ashok/learning-spark,jaehyuk/learning-spark,SunGuo/learning-spark,anjuncc/learning-spark-examples,mohitsh/learning-spark,SunGuo/learning-spark,GatsbyNewton/learning-spark,DINESHKUMARMURUGAN/learning-spark,kod3r/learning-spark,anjuncc/learning-spark-examples,gaoxuesong/learning-spark,junwucs/learning-spark,baokunguo/learning-spark-examples,ellis429/learning-spark,jindalcastle/learning-spark,asarraf/learning-spark,huixiang/learning-spark,kpraveen420/learning-spark,holdenk/learning-spark-examples,mmirolim/learning-spark,obinsanni/learning-spark,junwucs/learning-spark,kod3r/learning-spark,negokaz/learning-spark,feynman0825/learning-spark,databricks/learning-spark,huydx/learning-spark,bhagatsingh/learning-spark,zaxliu/learning-spark,feynman0825/learning-spark,huixiang/learning-spark,kpraveen420/learning-spark,bhagatsingh/learning-spark,obinsanni/learning-spark,concerned3rdparty/learning-spark,JerryTseng/learning-spark,shimizust/learning-spark,anjuncc/learning-spark-examples,diogoaurelio/learning-spark,UsterNes/learning-spark,JerryTseng/learning-spark,huydx/learning-spark,JerryTseng/learning-spark,diogoaurelio/learning-spark,GatsbyNewton/learning-spark,GatsbyNewton/learning-spark,huixiang/learning-spark,XiaoqingWang/learning-spark,feynman0825/learning-spark,obinsanni/learning-spark,ellis429/learning-spark,UsterNes/learning-spark,huixiang/learning-spark,tengteng/learning-spark,gaoxuesong/learning-spark,baokunguo/learning-spark-examples,mohitsh/learning-spark,mmirolim/learning-spark,bhagatsingh/learning-spark,kod3r/learning-spark,SunGuo/learning-spark,coursera4ashok/learning-spark,gaoxuesong/learning-spark,databricks/learning-spark,negokaz/learning-spark,coursera4ashok/learning-spark
|
Add a basic map partitions examples for python
|
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
>>> fetchCallSigns(b).size()
4
"""
import sys
import urllib3
from pyspark import SparkContext
def processCallSigns(signs):
"""Process call signs"""
http = urllib3.PoolManager()
requests = map(lambda x : http.request('GET', "http://qrzcq.com/call/" + x), signs)
return map(lambda x : x.data, requests)
def fetchCallSigns(input):
"""Fetch call signs"""
return input.mapPartitions(lambda callSigns : processCallSigns(callSigns))
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMapPartitions")
input = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
output = sorted(fetchCallSigns(input).collect())
for str in output:
print "%s " % (str)
|
<commit_before><commit_msg>Add a basic map partitions examples for python<commit_after>
|
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
>>> fetchCallSigns(b).size()
4
"""
import sys
import urllib3
from pyspark import SparkContext
def processCallSigns(signs):
"""Process call signs"""
http = urllib3.PoolManager()
requests = map(lambda x : http.request('GET', "http://qrzcq.com/call/" + x), signs)
return map(lambda x : x.data, requests)
def fetchCallSigns(input):
"""Fetch call signs"""
return input.mapPartitions(lambda callSigns : processCallSigns(callSigns))
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMapPartitions")
input = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
output = sorted(fetchCallSigns(input).collect())
for str in output:
print "%s " % (str)
|
Add a basic map partitions examples for python"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
>>> fetchCallSigns(b).size()
4
"""
import sys
import urllib3
from pyspark import SparkContext
def processCallSigns(signs):
"""Process call signs"""
http = urllib3.PoolManager()
requests = map(lambda x : http.request('GET', "http://qrzcq.com/call/" + x), signs)
return map(lambda x : x.data, requests)
def fetchCallSigns(input):
"""Fetch call signs"""
return input.mapPartitions(lambda callSigns : processCallSigns(callSigns))
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMapPartitions")
input = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
output = sorted(fetchCallSigns(input).collect())
for str in output:
print "%s " % (str)
|
<commit_before><commit_msg>Add a basic map partitions examples for python<commit_after>"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
>>> fetchCallSigns(b).size()
4
"""
import sys
import urllib3
from pyspark import SparkContext
def processCallSigns(signs):
"""Process call signs"""
http = urllib3.PoolManager()
requests = map(lambda x : http.request('GET', "http://qrzcq.com/call/" + x), signs)
return map(lambda x : x.data, requests)
def fetchCallSigns(input):
"""Fetch call signs"""
return input.mapPartitions(lambda callSigns : processCallSigns(callSigns))
if __name__ == "__main__":
master = "local"
if len(sys.argv) == 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMapPartitions")
input = sc.parallelize(["KK6JKQ", "Ve3UoW", "kk6jlk", "W6BB"])
output = sorted(fetchCallSigns(input).collect())
for str in output:
print "%s " % (str)
|
|
e100e6be59d5c78a600637d89399c55f39242918
|
examples/XArray_Projections.py
|
examples/XArray_Projections.py
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Ensure xarray accessors are available
import metpy.io # noqa: F401
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
|
Add example of using xarray projection info
|
ENH: Add example of using xarray projection info
|
Python
|
bsd-3-clause
|
Unidata/MetPy,ShawnMurd/MetPy,dopplershift/MetPy,jrleeman/MetPy,dopplershift/MetPy,ahaberlie/MetPy,ahaberlie/MetPy,jrleeman/MetPy,Unidata/MetPy
|
ENH: Add example of using xarray projection info
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Ensure xarray accessors are available
import metpy.io # noqa: F401
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
|
<commit_before><commit_msg>ENH: Add example of using xarray projection info<commit_after>
|
# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Ensure xarray accessors are available
import metpy.io # noqa: F401
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
|
ENH: Add example of using xarray projection info# Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Ensure xarray accessors are available
import metpy.io # noqa: F401
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
|
<commit_before><commit_msg>ENH: Add example of using xarray projection info<commit_after># Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Ensure xarray accessors are available
import metpy.io # noqa: F401
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
|
|
76a9ffd876a7bd678e64c5c0055a020cf775137d
|
random_walks.py
|
random_walks.py
|
import numpy as np
from matplotlib import pylab as plt
np.random.seed(31415926) # Set a seed for reproducibility
N_STEPS = 1000
random_data = np.random.randint(0, 2, N_STEPS)
# A symmetric random walk
random_walk = np.where(random_data > 0, 1, -1).cumsum()
N_WALKS = 1000
random_data_matrix = np.random.randint(0, 2, size=(N_WALKS, N_STEPS))
# Multiple symmetric random walks
multiple_random_walks = np.where(random_data_matrix > 0, 1, -1).cumsum(axis=1)
|
Add a symmetric random walk script
|
Add a symmetric random walk script
|
Python
|
mit
|
yassineAlouini/ml-experiments,yassineAlouini/ml-experiments
|
Add a symmetric random walk script
|
import numpy as np
from matplotlib import pylab as plt
np.random.seed(31415926) # Set a seed for reproducibility
N_STEPS = 1000
random_data = np.random.randint(0, 2, N_STEPS)
# A symmetric random walk
random_walk = np.where(random_data > 0, 1, -1).cumsum()
N_WALKS = 1000
random_data_matrix = np.random.randint(0, 2, size=(N_WALKS, N_STEPS))
# Multiple symmetric random walks
multiple_random_walks = np.where(random_data_matrix > 0, 1, -1).cumsum(axis=1)
|
<commit_before><commit_msg>Add a symmetric random walk script<commit_after>
|
import numpy as np
from matplotlib import pylab as plt
np.random.seed(31415926) # Set a seed for reproducibility
N_STEPS = 1000
random_data = np.random.randint(0, 2, N_STEPS)
# A symmetric random walk
random_walk = np.where(random_data > 0, 1, -1).cumsum()
N_WALKS = 1000
random_data_matrix = np.random.randint(0, 2, size=(N_WALKS, N_STEPS))
# Multiple symmetric random walks
multiple_random_walks = np.where(random_data_matrix > 0, 1, -1).cumsum(axis=1)
|
Add a symmetric random walk scriptimport numpy as np
from matplotlib import pylab as plt
np.random.seed(31415926) # Set a seed for reproducibility
N_STEPS = 1000
random_data = np.random.randint(0, 2, N_STEPS)
# A symmetric random walk
random_walk = np.where(random_data > 0, 1, -1).cumsum()
N_WALKS = 1000
random_data_matrix = np.random.randint(0, 2, size=(N_WALKS, N_STEPS))
# Multiple symmetric random walks
multiple_random_walks = np.where(random_data_matrix > 0, 1, -1).cumsum(axis=1)
|
<commit_before><commit_msg>Add a symmetric random walk script<commit_after>import numpy as np
from matplotlib import pylab as plt
np.random.seed(31415926) # Set a seed for reproducibility
N_STEPS = 1000
random_data = np.random.randint(0, 2, N_STEPS)
# A symmetric random walk
random_walk = np.where(random_data > 0, 1, -1).cumsum()
N_WALKS = 1000
random_data_matrix = np.random.randint(0, 2, size=(N_WALKS, N_STEPS))
# Multiple symmetric random walks
multiple_random_walks = np.where(random_data_matrix > 0, 1, -1).cumsum(axis=1)
|
|
9aba00c3a2b89170585bb8741da43bf1a1874d2e
|
pydbus/exitable.py
|
pydbus/exitable.py
|
import inspect
class Exitable(object):
__slots__ = ("_at_exit_cbs")
def _at_exit(self, cb):
try:
self._at_exit_cbs
except AttributeError:
self._at_exit_cbs = []
self._at_exit_cbs.append(cb)
def __enter__(self):
return self
def __exit__(self, exc_type = None, exc_value = None, traceback = None):
if self._exited:
return
for cb in reversed(self._at_exit_cbs):
if len(inspect.getargspec(cb).args) == 3:
cb.__exit__(exc_type, exc_value, traceback)
else:
cb()
self._at_exit_cbs = None
@property
def _exited(self):
return self._at_exit_cbs is None
def ExitableWithAliases(*exit_methods):
class CustomExitable(Exitable):
pass
def exit(self):
self.__exit__()
for exit_method_name in exit_methods:
setattr(CustomExitable, exit_method_name, exit)
return CustomExitable
|
Add Exitable - a tool to simplify context managers.
|
Add Exitable - a tool to simplify context managers.
|
Python
|
lgpl-2.1
|
LEW21/pydbus,LEW21/pydbus
|
Add Exitable - a tool to simplify context managers.
|
import inspect
class Exitable(object):
__slots__ = ("_at_exit_cbs")
def _at_exit(self, cb):
try:
self._at_exit_cbs
except AttributeError:
self._at_exit_cbs = []
self._at_exit_cbs.append(cb)
def __enter__(self):
return self
def __exit__(self, exc_type = None, exc_value = None, traceback = None):
if self._exited:
return
for cb in reversed(self._at_exit_cbs):
if len(inspect.getargspec(cb).args) == 3:
cb.__exit__(exc_type, exc_value, traceback)
else:
cb()
self._at_exit_cbs = None
@property
def _exited(self):
return self._at_exit_cbs is None
def ExitableWithAliases(*exit_methods):
class CustomExitable(Exitable):
pass
def exit(self):
self.__exit__()
for exit_method_name in exit_methods:
setattr(CustomExitable, exit_method_name, exit)
return CustomExitable
|
<commit_before><commit_msg>Add Exitable - a tool to simplify context managers.<commit_after>
|
import inspect
class Exitable(object):
__slots__ = ("_at_exit_cbs")
def _at_exit(self, cb):
try:
self._at_exit_cbs
except AttributeError:
self._at_exit_cbs = []
self._at_exit_cbs.append(cb)
def __enter__(self):
return self
def __exit__(self, exc_type = None, exc_value = None, traceback = None):
if self._exited:
return
for cb in reversed(self._at_exit_cbs):
if len(inspect.getargspec(cb).args) == 3:
cb.__exit__(exc_type, exc_value, traceback)
else:
cb()
self._at_exit_cbs = None
@property
def _exited(self):
return self._at_exit_cbs is None
def ExitableWithAliases(*exit_methods):
class CustomExitable(Exitable):
pass
def exit(self):
self.__exit__()
for exit_method_name in exit_methods:
setattr(CustomExitable, exit_method_name, exit)
return CustomExitable
|
Add Exitable - a tool to simplify context managers.import inspect
class Exitable(object):
__slots__ = ("_at_exit_cbs")
def _at_exit(self, cb):
try:
self._at_exit_cbs
except AttributeError:
self._at_exit_cbs = []
self._at_exit_cbs.append(cb)
def __enter__(self):
return self
def __exit__(self, exc_type = None, exc_value = None, traceback = None):
if self._exited:
return
for cb in reversed(self._at_exit_cbs):
if len(inspect.getargspec(cb).args) == 3:
cb.__exit__(exc_type, exc_value, traceback)
else:
cb()
self._at_exit_cbs = None
@property
def _exited(self):
return self._at_exit_cbs is None
def ExitableWithAliases(*exit_methods):
class CustomExitable(Exitable):
pass
def exit(self):
self.__exit__()
for exit_method_name in exit_methods:
setattr(CustomExitable, exit_method_name, exit)
return CustomExitable
|
<commit_before><commit_msg>Add Exitable - a tool to simplify context managers.<commit_after>import inspect
class Exitable(object):
__slots__ = ("_at_exit_cbs")
def _at_exit(self, cb):
try:
self._at_exit_cbs
except AttributeError:
self._at_exit_cbs = []
self._at_exit_cbs.append(cb)
def __enter__(self):
return self
def __exit__(self, exc_type = None, exc_value = None, traceback = None):
if self._exited:
return
for cb in reversed(self._at_exit_cbs):
if len(inspect.getargspec(cb).args) == 3:
cb.__exit__(exc_type, exc_value, traceback)
else:
cb()
self._at_exit_cbs = None
@property
def _exited(self):
return self._at_exit_cbs is None
def ExitableWithAliases(*exit_methods):
class CustomExitable(Exitable):
pass
def exit(self):
self.__exit__()
for exit_method_name in exit_methods:
setattr(CustomExitable, exit_method_name, exit)
return CustomExitable
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.