commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59372a01d2178616796379f4fe1cb76d9083eced
|
babybuddy/migrations/0020_update_language_en_to_en_us.py
|
babybuddy/migrations/0020_update_language_en_to_en_us.py
|
# Generated by Django 3.2.9 on 2021-12-13 21:25
from django.db import migrations
def update_language_en_to_en_us(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en':
settings.language = 'en-US'
settings.save()
def update_language_en_us_to_en(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en-US':
settings.language = 'en'
settings.save()
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0019_alter_settings_timezone'),
]
operations = [
migrations.RunPython(update_language_en_to_en_us, reverse_code=update_language_en_us_to_en),
]
|
Add migration for `en` to `en-US` language setting
|
Add migration for `en` to `en-US` language setting
Fixes #337
|
Python
|
bsd-2-clause
|
cdubz/babybuddy,cdubz/babybuddy,cdubz/babybuddy
|
Add migration for `en` to `en-US` language setting
Fixes #337
|
# Generated by Django 3.2.9 on 2021-12-13 21:25
from django.db import migrations
def update_language_en_to_en_us(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en':
settings.language = 'en-US'
settings.save()
def update_language_en_us_to_en(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en-US':
settings.language = 'en'
settings.save()
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0019_alter_settings_timezone'),
]
operations = [
migrations.RunPython(update_language_en_to_en_us, reverse_code=update_language_en_us_to_en),
]
|
<commit_before><commit_msg>Add migration for `en` to `en-US` language setting
Fixes #337<commit_after>
|
# Generated by Django 3.2.9 on 2021-12-13 21:25
from django.db import migrations
def update_language_en_to_en_us(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en':
settings.language = 'en-US'
settings.save()
def update_language_en_us_to_en(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en-US':
settings.language = 'en'
settings.save()
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0019_alter_settings_timezone'),
]
operations = [
migrations.RunPython(update_language_en_to_en_us, reverse_code=update_language_en_us_to_en),
]
|
Add migration for `en` to `en-US` language setting
Fixes #337# Generated by Django 3.2.9 on 2021-12-13 21:25
from django.db import migrations
def update_language_en_to_en_us(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en':
settings.language = 'en-US'
settings.save()
def update_language_en_us_to_en(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en-US':
settings.language = 'en'
settings.save()
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0019_alter_settings_timezone'),
]
operations = [
migrations.RunPython(update_language_en_to_en_us, reverse_code=update_language_en_us_to_en),
]
|
<commit_before><commit_msg>Add migration for `en` to `en-US` language setting
Fixes #337<commit_after># Generated by Django 3.2.9 on 2021-12-13 21:25
from django.db import migrations
def update_language_en_to_en_us(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en':
settings.language = 'en-US'
settings.save()
def update_language_en_us_to_en(apps, schema_editor):
Settings = apps.get_model('babybuddy', 'Settings')
for settings in Settings.objects.all():
if settings.language == 'en-US':
settings.language = 'en'
settings.save()
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0019_alter_settings_timezone'),
]
operations = [
migrations.RunPython(update_language_en_to_en_us, reverse_code=update_language_en_us_to_en),
]
|
|
e4eee76998a6dc957889d4cf6d5750303bca0968
|
numba-plsa/numba-plsa.py
|
numba-plsa/numba-plsa.py
|
import numpy as np
def normalize_basic(p):
p /= p.sum(axis=p.size[-1], keepdims=True)
def plsa(doc_term, n_topics, n_iter, method='basic'):
# Get size
n_docs, n_terms = doc_term.size
# Initialize distributions
topic_doc = np.random.rand(n_docs, n_topics)
normalize_basic(topic_doc)
term_topic = np.random.rand(n_topics, n_terms)
normalize_basic(term_topic)
# Run pLSA algorithm
if method == 'basic':
return plsa_basic(doc_term, topic_doc, term_topic, n_iter)
else:
raise ValueError('Unrecognized method <{0}>'.format(method))
def plsa_basic(doc_term, topic_doc, term_topic, n_iter):
n_docs, n_topics, n_terms = topic_doc.shape + [term_topic.shape[1]]
print """
Running basic pLSA algorithm
============================
Number of iterations: {0}
Number of documents: {1}
Number of terms: {2}
Number of topics: {3}
============================
""".format(n_iter, n_docs, n_terms, n_topics)
for i in range(n_iter):
print "Running iteration {0}".format(i)
### Expectation ###
topic_full = topic_doc[:, np.newaxis, :] * term_topic.T
normalize_basic(topic_full)
### Maximization ###
# Compute full likelihood table
dist_table = doc_term[:, :, np.newaxis] * topic_full
# Marginalize out documents
term_topic = np.sum(dist_table, axis=0).T
normalize_basic(term_topic)
# Marginalize out terms
topic_doc = np.sum(dist_table, axis=1)
normalize_basic(topic_doc)
return topic_full, topic_doc, term_topic
|
Add basic numpy pLSA implementation
|
Add basic numpy pLSA implementation
|
Python
|
mit
|
henryre/numba-plsa
|
Add basic numpy pLSA implementation
|
import numpy as np
def normalize_basic(p):
p /= p.sum(axis=p.size[-1], keepdims=True)
def plsa(doc_term, n_topics, n_iter, method='basic'):
# Get size
n_docs, n_terms = doc_term.size
# Initialize distributions
topic_doc = np.random.rand(n_docs, n_topics)
normalize_basic(topic_doc)
term_topic = np.random.rand(n_topics, n_terms)
normalize_basic(term_topic)
# Run pLSA algorithm
if method == 'basic':
return plsa_basic(doc_term, topic_doc, term_topic, n_iter)
else:
raise ValueError('Unrecognized method <{0}>'.format(method))
def plsa_basic(doc_term, topic_doc, term_topic, n_iter):
n_docs, n_topics, n_terms = topic_doc.shape + [term_topic.shape[1]]
print """
Running basic pLSA algorithm
============================
Number of iterations: {0}
Number of documents: {1}
Number of terms: {2}
Number of topics: {3}
============================
""".format(n_iter, n_docs, n_terms, n_topics)
for i in range(n_iter):
print "Running iteration {0}".format(i)
### Expectation ###
topic_full = topic_doc[:, np.newaxis, :] * term_topic.T
normalize_basic(topic_full)
### Maximization ###
# Compute full likelihood table
dist_table = doc_term[:, :, np.newaxis] * topic_full
# Marginalize out documents
term_topic = np.sum(dist_table, axis=0).T
normalize_basic(term_topic)
# Marginalize out terms
topic_doc = np.sum(dist_table, axis=1)
normalize_basic(topic_doc)
return topic_full, topic_doc, term_topic
|
<commit_before><commit_msg>Add basic numpy pLSA implementation<commit_after>
|
import numpy as np
def normalize_basic(p):
p /= p.sum(axis=p.size[-1], keepdims=True)
def plsa(doc_term, n_topics, n_iter, method='basic'):
# Get size
n_docs, n_terms = doc_term.size
# Initialize distributions
topic_doc = np.random.rand(n_docs, n_topics)
normalize_basic(topic_doc)
term_topic = np.random.rand(n_topics, n_terms)
normalize_basic(term_topic)
# Run pLSA algorithm
if method == 'basic':
return plsa_basic(doc_term, topic_doc, term_topic, n_iter)
else:
raise ValueError('Unrecognized method <{0}>'.format(method))
def plsa_basic(doc_term, topic_doc, term_topic, n_iter):
n_docs, n_topics, n_terms = topic_doc.shape + [term_topic.shape[1]]
print """
Running basic pLSA algorithm
============================
Number of iterations: {0}
Number of documents: {1}
Number of terms: {2}
Number of topics: {3}
============================
""".format(n_iter, n_docs, n_terms, n_topics)
for i in range(n_iter):
print "Running iteration {0}".format(i)
### Expectation ###
topic_full = topic_doc[:, np.newaxis, :] * term_topic.T
normalize_basic(topic_full)
### Maximization ###
# Compute full likelihood table
dist_table = doc_term[:, :, np.newaxis] * topic_full
# Marginalize out documents
term_topic = np.sum(dist_table, axis=0).T
normalize_basic(term_topic)
# Marginalize out terms
topic_doc = np.sum(dist_table, axis=1)
normalize_basic(topic_doc)
return topic_full, topic_doc, term_topic
|
Add basic numpy pLSA implementationimport numpy as np
def normalize_basic(p):
p /= p.sum(axis=p.size[-1], keepdims=True)
def plsa(doc_term, n_topics, n_iter, method='basic'):
# Get size
n_docs, n_terms = doc_term.size
# Initialize distributions
topic_doc = np.random.rand(n_docs, n_topics)
normalize_basic(topic_doc)
term_topic = np.random.rand(n_topics, n_terms)
normalize_basic(term_topic)
# Run pLSA algorithm
if method == 'basic':
return plsa_basic(doc_term, topic_doc, term_topic, n_iter)
else:
raise ValueError('Unrecognized method <{0}>'.format(method))
def plsa_basic(doc_term, topic_doc, term_topic, n_iter):
n_docs, n_topics, n_terms = topic_doc.shape + [term_topic.shape[1]]
print """
Running basic pLSA algorithm
============================
Number of iterations: {0}
Number of documents: {1}
Number of terms: {2}
Number of topics: {3}
============================
""".format(n_iter, n_docs, n_terms, n_topics)
for i in range(n_iter):
print "Running iteration {0}".format(i)
### Expectation ###
topic_full = topic_doc[:, np.newaxis, :] * term_topic.T
normalize_basic(topic_full)
### Maximization ###
# Compute full likelihood table
dist_table = doc_term[:, :, np.newaxis] * topic_full
# Marginalize out documents
term_topic = np.sum(dist_table, axis=0).T
normalize_basic(term_topic)
# Marginalize out terms
topic_doc = np.sum(dist_table, axis=1)
normalize_basic(topic_doc)
return topic_full, topic_doc, term_topic
|
<commit_before><commit_msg>Add basic numpy pLSA implementation<commit_after>import numpy as np
def normalize_basic(p):
p /= p.sum(axis=p.size[-1], keepdims=True)
def plsa(doc_term, n_topics, n_iter, method='basic'):
# Get size
n_docs, n_terms = doc_term.size
# Initialize distributions
topic_doc = np.random.rand(n_docs, n_topics)
normalize_basic(topic_doc)
term_topic = np.random.rand(n_topics, n_terms)
normalize_basic(term_topic)
# Run pLSA algorithm
if method == 'basic':
return plsa_basic(doc_term, topic_doc, term_topic, n_iter)
else:
raise ValueError('Unrecognized method <{0}>'.format(method))
def plsa_basic(doc_term, topic_doc, term_topic, n_iter):
n_docs, n_topics, n_terms = topic_doc.shape + [term_topic.shape[1]]
print """
Running basic pLSA algorithm
============================
Number of iterations: {0}
Number of documents: {1}
Number of terms: {2}
Number of topics: {3}
============================
""".format(n_iter, n_docs, n_terms, n_topics)
for i in range(n_iter):
print "Running iteration {0}".format(i)
### Expectation ###
topic_full = topic_doc[:, np.newaxis, :] * term_topic.T
normalize_basic(topic_full)
### Maximization ###
# Compute full likelihood table
dist_table = doc_term[:, :, np.newaxis] * topic_full
# Marginalize out documents
term_topic = np.sum(dist_table, axis=0).T
normalize_basic(term_topic)
# Marginalize out terms
topic_doc = np.sum(dist_table, axis=1)
normalize_basic(topic_doc)
return topic_full, topic_doc, term_topic
|
|
459cdc5c8f11510bd30f4a6553759bd778ad559f
|
mrequests/examples/get_deflate.py
|
mrequests/examples/get_deflate.py
|
import zlib
import mrequests as requests
host = "http://httpbin.org/"
#host = "http://localhost/"
url = host + "deflate"
r = requests.get(url, headers={"TE": "deflate"})
if r.status_code == 200:
print("Response body length: %i" % len(r.content))
text = zlib.decompress(r.content).decode("utf-8")
print("Deflated response text length: %i" % len(text))
print("Response text:\n")
print(text)
else:
print("Request failed. Status: {}".format(r.status_code))
r.close()
|
Add example script for 'deflate' transfer encoding
|
Add example script for 'deflate' transfer encoding
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de>
|
Python
|
mit
|
SpotlightKid/micropython-stm-lib
|
Add example script for 'deflate' transfer encoding
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de>
|
import zlib
import mrequests as requests
host = "http://httpbin.org/"
#host = "http://localhost/"
url = host + "deflate"
r = requests.get(url, headers={"TE": "deflate"})
if r.status_code == 200:
print("Response body length: %i" % len(r.content))
text = zlib.decompress(r.content).decode("utf-8")
print("Deflated response text length: %i" % len(text))
print("Response text:\n")
print(text)
else:
print("Request failed. Status: {}".format(r.status_code))
r.close()
|
<commit_before><commit_msg>Add example script for 'deflate' transfer encoding
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de><commit_after>
|
import zlib
import mrequests as requests
host = "http://httpbin.org/"
#host = "http://localhost/"
url = host + "deflate"
r = requests.get(url, headers={"TE": "deflate"})
if r.status_code == 200:
print("Response body length: %i" % len(r.content))
text = zlib.decompress(r.content).decode("utf-8")
print("Deflated response text length: %i" % len(text))
print("Response text:\n")
print(text)
else:
print("Request failed. Status: {}".format(r.status_code))
r.close()
|
Add example script for 'deflate' transfer encoding
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de>import zlib
import mrequests as requests
host = "http://httpbin.org/"
#host = "http://localhost/"
url = host + "deflate"
r = requests.get(url, headers={"TE": "deflate"})
if r.status_code == 200:
print("Response body length: %i" % len(r.content))
text = zlib.decompress(r.content).decode("utf-8")
print("Deflated response text length: %i" % len(text))
print("Response text:\n")
print(text)
else:
print("Request failed. Status: {}".format(r.status_code))
r.close()
|
<commit_before><commit_msg>Add example script for 'deflate' transfer encoding
Signed-off-by: Christopher Arndt <711c73f64afdce07b7e38039a96d2224209e9a6c@chrisarndt.de><commit_after>import zlib
import mrequests as requests
host = "http://httpbin.org/"
#host = "http://localhost/"
url = host + "deflate"
r = requests.get(url, headers={"TE": "deflate"})
if r.status_code == 200:
print("Response body length: %i" % len(r.content))
text = zlib.decompress(r.content).decode("utf-8")
print("Deflated response text length: %i" % len(text))
print("Response text:\n")
print(text)
else:
print("Request failed. Status: {}".format(r.status_code))
r.close()
|
|
baccda23c78fe8d4ec52ed3d912742f62b163aa5
|
osf/migrations/0130_merge_20180913_1438.py
|
osf/migrations/0130_merge_20180913_1438.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0129_merge_20180910_1926'),
('osf', '0129_merge_20180906_2006'),
]
operations = [
]
|
Add mergemigration for recent update with develop
|
Add mergemigration for recent update with develop
|
Python
|
apache-2.0
|
Johnetordoff/osf.io,felliott/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,baylee-d/osf.io,mattclark/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,felliott/osf.io,mfraezz/osf.io,adlius/osf.io,aaxelb/osf.io,mattclark/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,felliott/osf.io,felliott/osf.io,cslzchen/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,adlius/osf.io,pattisdr/osf.io,mfraezz/osf.io,mfraezz/osf.io,adlius/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,saradbowman/osf.io
|
Add mergemigration for recent update with develop
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0129_merge_20180910_1926'),
('osf', '0129_merge_20180906_2006'),
]
operations = [
]
|
<commit_before><commit_msg>Add mergemigration for recent update with develop<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0129_merge_20180910_1926'),
('osf', '0129_merge_20180906_2006'),
]
operations = [
]
|
Add mergemigration for recent update with develop# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0129_merge_20180910_1926'),
('osf', '0129_merge_20180906_2006'),
]
operations = [
]
|
<commit_before><commit_msg>Add mergemigration for recent update with develop<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-13 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0129_merge_20180910_1926'),
('osf', '0129_merge_20180906_2006'),
]
operations = [
]
|
|
de08545d699d2be9c1ee917bd208364ff902138b
|
wagtail/tests/testapp/migrations/0022_pagewithexcludedcopyfield.py
|
wagtail/tests/testapp/migrations/0022_pagewithexcludedcopyfield.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 01:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('tests', '0021_hidden_form_field'),
]
operations = [
migrations.CreateModel(
name='PageWithExcludedCopyField',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
('special_field', models.CharField(blank=True, default='Very Special', max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
Add testapp migratin for PageWithExcludedCopyField
|
Add testapp migratin for PageWithExcludedCopyField
|
Python
|
bsd-3-clause
|
mikedingjan/wagtail,kaedroho/wagtail,mikedingjan/wagtail,wagtail/wagtail,timorieber/wagtail,kaedroho/wagtail,torchbox/wagtail,gasman/wagtail,mixxorz/wagtail,mixxorz/wagtail,thenewguy/wagtail,nealtodd/wagtail,mixxorz/wagtail,gasman/wagtail,wagtail/wagtail,nealtodd/wagtail,mikedingjan/wagtail,thenewguy/wagtail,takeflight/wagtail,FlipperPA/wagtail,kaedroho/wagtail,wagtail/wagtail,takeflight/wagtail,jnns/wagtail,jnns/wagtail,rsalmaso/wagtail,takeflight/wagtail,wagtail/wagtail,jnns/wagtail,kaedroho/wagtail,rsalmaso/wagtail,wagtail/wagtail,FlipperPA/wagtail,nimasmi/wagtail,mixxorz/wagtail,torchbox/wagtail,gasman/wagtail,timorieber/wagtail,torchbox/wagtail,zerolab/wagtail,torchbox/wagtail,timorieber/wagtail,nimasmi/wagtail,zerolab/wagtail,nealtodd/wagtail,zerolab/wagtail,rsalmaso/wagtail,mixxorz/wagtail,nealtodd/wagtail,FlipperPA/wagtail,thenewguy/wagtail,jnns/wagtail,kaedroho/wagtail,thenewguy/wagtail,zerolab/wagtail,rsalmaso/wagtail,gasman/wagtail,rsalmaso/wagtail,timorieber/wagtail,FlipperPA/wagtail,nimasmi/wagtail,zerolab/wagtail,mikedingjan/wagtail,gasman/wagtail,thenewguy/wagtail,takeflight/wagtail,nimasmi/wagtail
|
Add testapp migratin for PageWithExcludedCopyField
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 01:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('tests', '0021_hidden_form_field'),
]
operations = [
migrations.CreateModel(
name='PageWithExcludedCopyField',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
('special_field', models.CharField(blank=True, default='Very Special', max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
<commit_before><commit_msg>Add testapp migratin for PageWithExcludedCopyField<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 01:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('tests', '0021_hidden_form_field'),
]
operations = [
migrations.CreateModel(
name='PageWithExcludedCopyField',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
('special_field', models.CharField(blank=True, default='Very Special', max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
Add testapp migratin for PageWithExcludedCopyField# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 01:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('tests', '0021_hidden_form_field'),
]
operations = [
migrations.CreateModel(
name='PageWithExcludedCopyField',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
('special_field', models.CharField(blank=True, default='Very Special', max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
<commit_before><commit_msg>Add testapp migratin for PageWithExcludedCopyField<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 01:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('tests', '0021_hidden_form_field'),
]
operations = [
migrations.CreateModel(
name='PageWithExcludedCopyField',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
('special_field', models.CharField(blank=True, default='Very Special', max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
|
85373313233dfad0183d52ba44235a5131cc0f7d
|
readthedocs/builds/migrations/0016_migrate_protected_versions_to_hidden.py
|
readthedocs/builds/migrations/0016_migrate_protected_versions_to_hidden.py
|
# Generated by Django 2.2.11 on 2020-03-18 18:27
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Migrate all protected versions to be hidden."""
Version = apps.get_model('builds', 'Version')
Version.objects.filter(privacy_level='protected').update(hidden=True)
class Migration(migrations.Migration):
dependencies = [
('builds', '0015_add_hidden_field_to_version'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate protected versions to be hidden
|
Migrate protected versions to be hidden
|
Python
|
mit
|
rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org
|
Migrate protected versions to be hidden
|
# Generated by Django 2.2.11 on 2020-03-18 18:27
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Migrate all protected versions to be hidden."""
Version = apps.get_model('builds', 'Version')
Version.objects.filter(privacy_level='protected').update(hidden=True)
class Migration(migrations.Migration):
dependencies = [
('builds', '0015_add_hidden_field_to_version'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
<commit_before><commit_msg>Migrate protected versions to be hidden<commit_after>
|
# Generated by Django 2.2.11 on 2020-03-18 18:27
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Migrate all protected versions to be hidden."""
Version = apps.get_model('builds', 'Version')
Version.objects.filter(privacy_level='protected').update(hidden=True)
class Migration(migrations.Migration):
dependencies = [
('builds', '0015_add_hidden_field_to_version'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate protected versions to be hidden# Generated by Django 2.2.11 on 2020-03-18 18:27
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Migrate all protected versions to be hidden."""
Version = apps.get_model('builds', 'Version')
Version.objects.filter(privacy_level='protected').update(hidden=True)
class Migration(migrations.Migration):
dependencies = [
('builds', '0015_add_hidden_field_to_version'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
<commit_before><commit_msg>Migrate protected versions to be hidden<commit_after># Generated by Django 2.2.11 on 2020-03-18 18:27
from django.db import migrations
def forwards_func(apps, schema_editor):
"""Migrate all protected versions to be hidden."""
Version = apps.get_model('builds', 'Version')
Version.objects.filter(privacy_level='protected').update(hidden=True)
class Migration(migrations.Migration):
dependencies = [
('builds', '0015_add_hidden_field_to_version'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
|
3cbd840a96628282e8ab99c2dc2cf4e7e711fa82
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/views.py
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/views.py
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
Use self.request.user instead of second query
|
Use self.request.user instead of second query
|
Python
|
bsd-3-clause
|
trungdong/cookiecutter-django,pydanny/cookiecutter-django,trungdong/cookiecutter-django,pydanny/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,ryankanno/cookiecutter-django,ryankanno/cookiecutter-django,trungdong/cookiecutter-django,trungdong/cookiecutter-django
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
Use self.request.user instead of second query
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
<commit_before>from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
<commit_msg>Use self.request.user instead of second query<commit_after>
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
Use self.request.user instead of second queryfrom django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
<commit_before>from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
<commit_msg>Use self.request.user instead of second query<commit_after>from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return self.request.user
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
1d1627a98d206f002afaa4595ad6c8f332bc1e31
|
tests/unit/utils/test_utils.py
|
tests/unit/utils/test_utils.py
|
# coding=utf-8
'''
Test case for utils/__init__.py
'''
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
try:
import pytest
except ImportError:
pytest = None
import salt.utils
@skipIf(pytest is None, 'PyTest is missing')
class UtilsTestCase(TestCase):
'''
Test case for utils/__init__.py
'''
def test_get_module_environment(self):
'''
Test for salt.utils.get_module_environment
:return:
'''
_globals = {}
salt.utils.get_module_environment(_globals)
|
Add test case init commit
|
Add test case init commit
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add test case init commit
|
# coding=utf-8
'''
Test case for utils/__init__.py
'''
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
try:
import pytest
except ImportError:
pytest = None
import salt.utils
@skipIf(pytest is None, 'PyTest is missing')
class UtilsTestCase(TestCase):
'''
Test case for utils/__init__.py
'''
def test_get_module_environment(self):
'''
Test for salt.utils.get_module_environment
:return:
'''
_globals = {}
salt.utils.get_module_environment(_globals)
|
<commit_before><commit_msg>Add test case init commit<commit_after>
|
# coding=utf-8
'''
Test case for utils/__init__.py
'''
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
try:
import pytest
except ImportError:
pytest = None
import salt.utils
@skipIf(pytest is None, 'PyTest is missing')
class UtilsTestCase(TestCase):
'''
Test case for utils/__init__.py
'''
def test_get_module_environment(self):
'''
Test for salt.utils.get_module_environment
:return:
'''
_globals = {}
salt.utils.get_module_environment(_globals)
|
Add test case init commit# coding=utf-8
'''
Test case for utils/__init__.py
'''
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
try:
import pytest
except ImportError:
pytest = None
import salt.utils
@skipIf(pytest is None, 'PyTest is missing')
class UtilsTestCase(TestCase):
'''
Test case for utils/__init__.py
'''
def test_get_module_environment(self):
'''
Test for salt.utils.get_module_environment
:return:
'''
_globals = {}
salt.utils.get_module_environment(_globals)
|
<commit_before><commit_msg>Add test case init commit<commit_after># coding=utf-8
'''
Test case for utils/__init__.py
'''
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
try:
import pytest
except ImportError:
pytest = None
import salt.utils
@skipIf(pytest is None, 'PyTest is missing')
class UtilsTestCase(TestCase):
'''
Test case for utils/__init__.py
'''
def test_get_module_environment(self):
'''
Test for salt.utils.get_module_environment
:return:
'''
_globals = {}
salt.utils.get_module_environment(_globals)
|
|
e03bc6a462eca76a0b963a9c01b72aa474d6dd68
|
scripts/different_features.py
|
scripts/different_features.py
|
# different_features.py
# Invoke on the command line like: python common_features.py pbtd aui
# Creates a set of features common to both groups and then outputs the
# difference between these sets.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def feature_names(filename):
'''Load a list of available features from a given feature matrix file.
'''
with open(filename, 'r') as f:
header = list(csv.reader(f))[0]
return [feature for feature in header if feature != 'IPA']
def feature_set(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = {}
for feature, value in target_segments[0].items():
if feature != 'IPA':
if all(segment[feature] == value for segment in target_segments):
common_features[feature] = value
return common_features
def main(first, second):
first_features = feature_set(first)
second_features = feature_set(second)
all_features = feature_names(path.join(base_directory, 'engine', 'data',
'features.csv'))
results = []
for feature in all_features:
if feature in first_features and feature in second_features:
if first_features[feature] != second_features[feature]:
results.append([feature, first_features[feature],
second_features[feature]])
print(tabulate(results, headers=['Feature', f'Group 1 ({first})', f'Group 2 ({second})']))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
Add script to isolate differences between two groups of segments
|
Add script to isolate differences between two groups of segments
|
Python
|
mit
|
kdelwat/LangEvolve,kdelwat/LangEvolve,kdelwat/LangEvolve
|
Add script to isolate differences between two groups of segments
|
# different_features.py
# Invoke on the command line like: python common_features.py pbtd aui
# Creates a set of features common to both groups and then outputs the
# difference between these sets.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def feature_names(filename):
'''Load a list of available features from a given feature matrix file.
'''
with open(filename, 'r') as f:
header = list(csv.reader(f))[0]
return [feature for feature in header if feature != 'IPA']
def feature_set(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = {}
for feature, value in target_segments[0].items():
if feature != 'IPA':
if all(segment[feature] == value for segment in target_segments):
common_features[feature] = value
return common_features
def main(first, second):
first_features = feature_set(first)
second_features = feature_set(second)
all_features = feature_names(path.join(base_directory, 'engine', 'data',
'features.csv'))
results = []
for feature in all_features:
if feature in first_features and feature in second_features:
if first_features[feature] != second_features[feature]:
results.append([feature, first_features[feature],
second_features[feature]])
print(tabulate(results, headers=['Feature', f'Group 1 ({first})', f'Group 2 ({second})']))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script to isolate differences between two groups of segments<commit_after>
|
# different_features.py
# Invoke on the command line like: python common_features.py pbtd aui
# Creates a set of features common to both groups and then outputs the
# difference between these sets.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def feature_names(filename):
'''Load a list of available features from a given feature matrix file.
'''
with open(filename, 'r') as f:
header = list(csv.reader(f))[0]
return [feature for feature in header if feature != 'IPA']
def feature_set(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = {}
for feature, value in target_segments[0].items():
if feature != 'IPA':
if all(segment[feature] == value for segment in target_segments):
common_features[feature] = value
return common_features
def main(first, second):
first_features = feature_set(first)
second_features = feature_set(second)
all_features = feature_names(path.join(base_directory, 'engine', 'data',
'features.csv'))
results = []
for feature in all_features:
if feature in first_features and feature in second_features:
if first_features[feature] != second_features[feature]:
results.append([feature, first_features[feature],
second_features[feature]])
print(tabulate(results, headers=['Feature', f'Group 1 ({first})', f'Group 2 ({second})']))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
Add script to isolate differences between two groups of segments# different_features.py
# Invoke on the command line like: python common_features.py pbtd aui
# Creates a set of features common to both groups and then outputs the
# difference between these sets.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def feature_names(filename):
'''Load a list of available features from a given feature matrix file.
'''
with open(filename, 'r') as f:
header = list(csv.reader(f))[0]
return [feature for feature in header if feature != 'IPA']
def feature_set(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = {}
for feature, value in target_segments[0].items():
if feature != 'IPA':
if all(segment[feature] == value for segment in target_segments):
common_features[feature] = value
return common_features
def main(first, second):
first_features = feature_set(first)
second_features = feature_set(second)
all_features = feature_names(path.join(base_directory, 'engine', 'data',
'features.csv'))
results = []
for feature in all_features:
if feature in first_features and feature in second_features:
if first_features[feature] != second_features[feature]:
results.append([feature, first_features[feature],
second_features[feature]])
print(tabulate(results, headers=['Feature', f'Group 1 ({first})', f'Group 2 ({second})']))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script to isolate differences between two groups of segments<commit_after># different_features.py
# Invoke on the command line like: python common_features.py pbtd aui
# Creates a set of features common to both groups and then outputs the
# difference between these sets.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def feature_names(filename):
'''Load a list of available features from a given feature matrix file.
'''
with open(filename, 'r') as f:
header = list(csv.reader(f))[0]
return [feature for feature in header if feature != 'IPA']
def feature_set(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = {}
for feature, value in target_segments[0].items():
if feature != 'IPA':
if all(segment[feature] == value for segment in target_segments):
common_features[feature] = value
return common_features
def main(first, second):
first_features = feature_set(first)
second_features = feature_set(second)
all_features = feature_names(path.join(base_directory, 'engine', 'data',
'features.csv'))
results = []
for feature in all_features:
if feature in first_features and feature in second_features:
if first_features[feature] != second_features[feature]:
results.append([feature, first_features[feature],
second_features[feature]])
print(tabulate(results, headers=['Feature', f'Group 1 ({first})', f'Group 2 ({second})']))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
|
c23c9d562fc7f3fb99b1f57832db2efd2441d992
|
new_equation.py
|
new_equation.py
|
#! /usr/bin/env python
from __future__ import print_function
import datetime
import os
import sys
import json
import uuid
if len(sys.argv) > 1:
sys.stderr.write("Usage: python "+sys.argv[0]+" 'command-invocation'"+'\n')
sys.exit(1)
def get_year():
now = datetime.datetime.now()
return now.year
def get_username():
try:
# POSIX only
import pwd
gecos_field = pwd.getpwuid(os.getuid()).pw_gecos
full_name = gecos_field.split(',')[0]
return full_name
except ImportError:
import getpass
return getpass.getuser()
new_uuid = str(uuid.uuid4())
equajson = \
{
"copying": {
"authors": [
get_username()
],
"license-name": "MIT (Expat) License",
"license-url": "http://opensource.org/licenses/MIT",
"year": get_year(),
},
"description": {
"verbose": "<FIXME>",
"terse": "<FIXME>",
},
"markup-languages": {
"LaTeX": [
{
"markup": "<FIXME>"
}
]
},
"relevant-urls": [
{
"url": "<FIXME>"
}
],
"unicode-pretty-print": {
"multiline": [
"<FIXME>",
"<FIXME>",
"<FIXME>",
],
"one-line": "<FIXME>",
"parameters": {
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
},
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
}
}
},
"uuid": new_uuid
}
root = sys.path[0]
new_filename = new_uuid + '.json'
new_filepath = os.path.join(root, 'equajson', new_filename)
with open(new_filepath, 'w') as new_file:
json.dump(equajson, new_file, indent=4, separators=(',', ': '), sort_keys=True)
print('Created new equation:\n{}'.format(new_filepath))
|
Add an equation generator script.
|
Add an equation generator script.
|
Python
|
mit
|
nbeaver/equajson
|
Add an equation generator script.
|
#! /usr/bin/env python
from __future__ import print_function
import datetime
import os
import sys
import json
import uuid
if len(sys.argv) > 1:
sys.stderr.write("Usage: python "+sys.argv[0]+" 'command-invocation'"+'\n')
sys.exit(1)
def get_year():
now = datetime.datetime.now()
return now.year
def get_username():
try:
# POSIX only
import pwd
gecos_field = pwd.getpwuid(os.getuid()).pw_gecos
full_name = gecos_field.split(',')[0]
return full_name
except ImportError:
import getpass
return getpass.getuser()
new_uuid = str(uuid.uuid4())
equajson = \
{
"copying": {
"authors": [
get_username()
],
"license-name": "MIT (Expat) License",
"license-url": "http://opensource.org/licenses/MIT",
"year": get_year(),
},
"description": {
"verbose": "<FIXME>",
"terse": "<FIXME>",
},
"markup-languages": {
"LaTeX": [
{
"markup": "<FIXME>"
}
]
},
"relevant-urls": [
{
"url": "<FIXME>"
}
],
"unicode-pretty-print": {
"multiline": [
"<FIXME>",
"<FIXME>",
"<FIXME>",
],
"one-line": "<FIXME>",
"parameters": {
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
},
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
}
}
},
"uuid": new_uuid
}
root = sys.path[0]
new_filename = new_uuid + '.json'
new_filepath = os.path.join(root, 'equajson', new_filename)
with open(new_filepath, 'w') as new_file:
json.dump(equajson, new_file, indent=4, separators=(',', ': '), sort_keys=True)
print('Created new equation:\n{}'.format(new_filepath))
|
<commit_before><commit_msg>Add an equation generator script.<commit_after>
|
#! /usr/bin/env python
from __future__ import print_function
import datetime
import os
import sys
import json
import uuid
if len(sys.argv) > 1:
sys.stderr.write("Usage: python "+sys.argv[0]+" 'command-invocation'"+'\n')
sys.exit(1)
def get_year():
now = datetime.datetime.now()
return now.year
def get_username():
try:
# POSIX only
import pwd
gecos_field = pwd.getpwuid(os.getuid()).pw_gecos
full_name = gecos_field.split(',')[0]
return full_name
except ImportError:
import getpass
return getpass.getuser()
new_uuid = str(uuid.uuid4())
equajson = \
{
"copying": {
"authors": [
get_username()
],
"license-name": "MIT (Expat) License",
"license-url": "http://opensource.org/licenses/MIT",
"year": get_year(),
},
"description": {
"verbose": "<FIXME>",
"terse": "<FIXME>",
},
"markup-languages": {
"LaTeX": [
{
"markup": "<FIXME>"
}
]
},
"relevant-urls": [
{
"url": "<FIXME>"
}
],
"unicode-pretty-print": {
"multiline": [
"<FIXME>",
"<FIXME>",
"<FIXME>",
],
"one-line": "<FIXME>",
"parameters": {
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
},
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
}
}
},
"uuid": new_uuid
}
root = sys.path[0]
new_filename = new_uuid + '.json'
new_filepath = os.path.join(root, 'equajson', new_filename)
with open(new_filepath, 'w') as new_file:
json.dump(equajson, new_file, indent=4, separators=(',', ': '), sort_keys=True)
print('Created new equation:\n{}'.format(new_filepath))
|
Add an equation generator script.#! /usr/bin/env python
from __future__ import print_function
import datetime
import os
import sys
import json
import uuid
if len(sys.argv) > 1:
sys.stderr.write("Usage: python "+sys.argv[0]+" 'command-invocation'"+'\n')
sys.exit(1)
def get_year():
now = datetime.datetime.now()
return now.year
def get_username():
try:
# POSIX only
import pwd
gecos_field = pwd.getpwuid(os.getuid()).pw_gecos
full_name = gecos_field.split(',')[0]
return full_name
except ImportError:
import getpass
return getpass.getuser()
new_uuid = str(uuid.uuid4())
equajson = \
{
"copying": {
"authors": [
get_username()
],
"license-name": "MIT (Expat) License",
"license-url": "http://opensource.org/licenses/MIT",
"year": get_year(),
},
"description": {
"verbose": "<FIXME>",
"terse": "<FIXME>",
},
"markup-languages": {
"LaTeX": [
{
"markup": "<FIXME>"
}
]
},
"relevant-urls": [
{
"url": "<FIXME>"
}
],
"unicode-pretty-print": {
"multiline": [
"<FIXME>",
"<FIXME>",
"<FIXME>",
],
"one-line": "<FIXME>",
"parameters": {
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
},
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
}
}
},
"uuid": new_uuid
}
root = sys.path[0]
new_filename = new_uuid + '.json'
new_filepath = os.path.join(root, 'equajson', new_filename)
with open(new_filepath, 'w') as new_file:
json.dump(equajson, new_file, indent=4, separators=(',', ': '), sort_keys=True)
print('Created new equation:\n{}'.format(new_filepath))
|
<commit_before><commit_msg>Add an equation generator script.<commit_after>#! /usr/bin/env python
from __future__ import print_function
import datetime
import os
import sys
import json
import uuid
if len(sys.argv) > 1:
sys.stderr.write("Usage: python "+sys.argv[0]+" 'command-invocation'"+'\n')
sys.exit(1)
def get_year():
now = datetime.datetime.now()
return now.year
def get_username():
try:
# POSIX only
import pwd
gecos_field = pwd.getpwuid(os.getuid()).pw_gecos
full_name = gecos_field.split(',')[0]
return full_name
except ImportError:
import getpass
return getpass.getuser()
new_uuid = str(uuid.uuid4())
equajson = \
{
"copying": {
"authors": [
get_username()
],
"license-name": "MIT (Expat) License",
"license-url": "http://opensource.org/licenses/MIT",
"year": get_year(),
},
"description": {
"verbose": "<FIXME>",
"terse": "<FIXME>",
},
"markup-languages": {
"LaTeX": [
{
"markup": "<FIXME>"
}
]
},
"relevant-urls": [
{
"url": "<FIXME>"
}
],
"unicode-pretty-print": {
"multiline": [
"<FIXME>",
"<FIXME>",
"<FIXME>",
],
"one-line": "<FIXME>",
"parameters": {
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
},
"<FIXME>" : {
"label": "<FIXME>",
"dimensionless": "<FIXME>",
"fixed-constant": "<FIXME>",
"urls": [
"<FIXME>"
]
}
}
},
"uuid": new_uuid
}
root = sys.path[0]
new_filename = new_uuid + '.json'
new_filepath = os.path.join(root, 'equajson', new_filename)
with open(new_filepath, 'w') as new_file:
json.dump(equajson, new_file, indent=4, separators=(',', ': '), sort_keys=True)
print('Created new equation:\n{}'.format(new_filepath))
|
|
374117742479ce7d0d31a5c059faa94a94a8b398
|
gv.py
|
gv.py
|
#!/usr/bin/python
import sys
import argparse
from graphviz import Digraph
parser = argparse.ArgumentParser(
description="Generates a GraphViz file from *.graph and *.data files."
)
parser.add_argument("--data", type=argparse.FileType("r"),
help="Data input file.")
parser.add_argument("--graph", type=argparse.FileType("r"),
help="Graph input file.")
if __name__ == "__main__":
args = parser.parse_args()
dot = Digraph(
graph_attr={'size':'64,40'},
node_attr={'shape':'circle', 'label':'', 'style':'filled',
'fillcolor':'lightskyblue2', 'nodesep':'1.0', 'ranksep':'1.0'},
edge_attr={'weight':'1.5'}
)
for line in args.data:
node, value = line.split(",", 1)
dot.node(node)
for line in args.graph:
a, b = line.split(",", 1)
dot.edge(a, b)
sys.stdout.write(dot.source)
|
Add script to generate GraphViz files from .graph and .data files.
|
Add script to generate GraphViz files from .graph and .data files.
|
Python
|
mit
|
ucsb-igert/slice-tree,ucsb-igert/slice-tree,ucsb-igert/slice-tree,ucsb-igert/slice-tree
|
Add script to generate GraphViz files from .graph and .data files.
|
#!/usr/bin/python
import sys
import argparse
from graphviz import Digraph
parser = argparse.ArgumentParser(
description="Generates a GraphViz file from *.graph and *.data files."
)
parser.add_argument("--data", type=argparse.FileType("r"),
help="Data input file.")
parser.add_argument("--graph", type=argparse.FileType("r"),
help="Graph input file.")
if __name__ == "__main__":
args = parser.parse_args()
dot = Digraph(
graph_attr={'size':'64,40'},
node_attr={'shape':'circle', 'label':'', 'style':'filled',
'fillcolor':'lightskyblue2', 'nodesep':'1.0', 'ranksep':'1.0'},
edge_attr={'weight':'1.5'}
)
for line in args.data:
node, value = line.split(",", 1)
dot.node(node)
for line in args.graph:
a, b = line.split(",", 1)
dot.edge(a, b)
sys.stdout.write(dot.source)
|
<commit_before><commit_msg>Add script to generate GraphViz files from .graph and .data files.<commit_after>
|
#!/usr/bin/python
import sys
import argparse
from graphviz import Digraph
parser = argparse.ArgumentParser(
description="Generates a GraphViz file from *.graph and *.data files."
)
parser.add_argument("--data", type=argparse.FileType("r"),
help="Data input file.")
parser.add_argument("--graph", type=argparse.FileType("r"),
help="Graph input file.")
if __name__ == "__main__":
args = parser.parse_args()
dot = Digraph(
graph_attr={'size':'64,40'},
node_attr={'shape':'circle', 'label':'', 'style':'filled',
'fillcolor':'lightskyblue2', 'nodesep':'1.0', 'ranksep':'1.0'},
edge_attr={'weight':'1.5'}
)
for line in args.data:
node, value = line.split(",", 1)
dot.node(node)
for line in args.graph:
a, b = line.split(",", 1)
dot.edge(a, b)
sys.stdout.write(dot.source)
|
Add script to generate GraphViz files from .graph and .data files.#!/usr/bin/python
import sys
import argparse
from graphviz import Digraph
parser = argparse.ArgumentParser(
description="Generates a GraphViz file from *.graph and *.data files."
)
parser.add_argument("--data", type=argparse.FileType("r"),
help="Data input file.")
parser.add_argument("--graph", type=argparse.FileType("r"),
help="Graph input file.")
if __name__ == "__main__":
args = parser.parse_args()
dot = Digraph(
graph_attr={'size':'64,40'},
node_attr={'shape':'circle', 'label':'', 'style':'filled',
'fillcolor':'lightskyblue2', 'nodesep':'1.0', 'ranksep':'1.0'},
edge_attr={'weight':'1.5'}
)
for line in args.data:
node, value = line.split(",", 1)
dot.node(node)
for line in args.graph:
a, b = line.split(",", 1)
dot.edge(a, b)
sys.stdout.write(dot.source)
|
<commit_before><commit_msg>Add script to generate GraphViz files from .graph and .data files.<commit_after>#!/usr/bin/python
import sys
import argparse
from graphviz import Digraph
parser = argparse.ArgumentParser(
description="Generates a GraphViz file from *.graph and *.data files."
)
parser.add_argument("--data", type=argparse.FileType("r"),
help="Data input file.")
parser.add_argument("--graph", type=argparse.FileType("r"),
help="Graph input file.")
if __name__ == "__main__":
args = parser.parse_args()
dot = Digraph(
graph_attr={'size':'64,40'},
node_attr={'shape':'circle', 'label':'', 'style':'filled',
'fillcolor':'lightskyblue2', 'nodesep':'1.0', 'ranksep':'1.0'},
edge_attr={'weight':'1.5'}
)
for line in args.data:
node, value = line.split(",", 1)
dot.node(node)
for line in args.graph:
a, b = line.split(",", 1)
dot.edge(a, b)
sys.stdout.write(dot.source)
|
|
cd61764cfd3f8cd188a2650508fe3216a231d5a7
|
plugins/misc.py
|
plugins/misc.py
|
# Copyright (c) 2013-2014 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin
@admin("set")
def setcommand(m):
"""Adjust the settings on a command."""
if len(m.line) < 3:
m.bot.private_message(m.location, "Please format the command: !set [command] [setting]")
else:
m.bot.command_settings[m.line[1].lower()] = m.line[2].lower()
print(m.bot.command_settings)
|
Allow command settings to be saved
|
Allow command settings to be saved
|
Python
|
mit
|
molly/GorillaBot,quanticle/GorillaBot,molly/GorillaBot,quanticle/GorillaBot
|
Allow command settings to be saved
|
# Copyright (c) 2013-2014 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin
@admin("set")
def setcommand(m):
"""Adjust the settings on a command."""
if len(m.line) < 3:
m.bot.private_message(m.location, "Please format the command: !set [command] [setting]")
else:
m.bot.command_settings[m.line[1].lower()] = m.line[2].lower()
print(m.bot.command_settings)
|
<commit_before><commit_msg>Allow command settings to be saved<commit_after>
|
# Copyright (c) 2013-2014 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin
@admin("set")
def setcommand(m):
"""Adjust the settings on a command."""
if len(m.line) < 3:
m.bot.private_message(m.location, "Please format the command: !set [command] [setting]")
else:
m.bot.command_settings[m.line[1].lower()] = m.line[2].lower()
print(m.bot.command_settings)
|
Allow command settings to be saved# Copyright (c) 2013-2014 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin
@admin("set")
def setcommand(m):
"""Adjust the settings on a command."""
if len(m.line) < 3:
m.bot.private_message(m.location, "Please format the command: !set [command] [setting]")
else:
m.bot.command_settings[m.line[1].lower()] = m.line[2].lower()
print(m.bot.command_settings)
|
<commit_before><commit_msg>Allow command settings to be saved<commit_after># Copyright (c) 2013-2014 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import admin
@admin("set")
def setcommand(m):
"""Adjust the settings on a command."""
if len(m.line) < 3:
m.bot.private_message(m.location, "Please format the command: !set [command] [setting]")
else:
m.bot.command_settings[m.line[1].lower()] = m.line[2].lower()
print(m.bot.command_settings)
|
|
da71a95586f17de48cb1067a8809da1e583b42cf
|
other/wrapping-cpp/swig/cpointerproblem/test_examples.py
|
other/wrapping-cpp/swig/cpointerproblem/test_examples.py
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
Add pytest.raises for test that fails on purpose.
|
Add pytest.raises for test that fails on purpose.
|
Python
|
bsd-2-clause
|
ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
Add pytest.raises for test that fails on purpose.
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
<commit_before>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
<commit_msg>Add pytest.raises for test that fails on purpose.<commit_after>
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
Add pytest.raises for test that fails on purpose."""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
<commit_before>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
<commit_msg>Add pytest.raises for test that fails on purpose.<commit_after>"""
The code this example is all based on is from http://tinyurl.com/pmmnbxv
Some notes on this in the oommf-devnotes repo
"""
import os
import pytest
#print("pwd:")
#os.system('pwd')
#import subprocess
#subprocess.check_output('pwd')
os.system('make all')
import example1
def test_f():
assert example1.f(1) - 1 <= 10 ** -7
def test_myfun():
"""Demonstrate that calling code with wrong object type results
in TypeError exception."""
with pytest.raises(TypeError):
assert example1.myfun(example1.f, 2.0) - 4.0 <= 10 ** -7
os.system('make alternate')
import example2
def test2_f():
assert example2.f(1) - 1 <= 10 ** -7
def test2_myfun():
assert example2.myfun(example2.f, 2.0) - 4.0 <= 10 ** -7
os.system('make clean')
|
644038ee51fa4219b96ef7a8edbebe9e6310cedf
|
plot_scores.py
|
plot_scores.py
|
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
scores_path = os.path.join(args.dir, 'scores.txt')
scores = pd.read_csv(scores_path, names=('t', 'score'), delimiter=' ')
scores.plot(x='t', y='score')
plt.show()
if __name__ == '__main__':
main()
|
Add a script to plot scores
|
Add a script to plot scores
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add a script to plot scores
|
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
scores_path = os.path.join(args.dir, 'scores.txt')
scores = pd.read_csv(scores_path, names=('t', 'score'), delimiter=' ')
scores.plot(x='t', y='score')
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to plot scores<commit_after>
|
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
scores_path = os.path.join(args.dir, 'scores.txt')
scores = pd.read_csv(scores_path, names=('t', 'score'), delimiter=' ')
scores.plot(x='t', y='score')
plt.show()
if __name__ == '__main__':
main()
|
Add a script to plot scoresimport argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
scores_path = os.path.join(args.dir, 'scores.txt')
scores = pd.read_csv(scores_path, names=('t', 'score'), delimiter=' ')
scores.plot(x='t', y='score')
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to plot scores<commit_after>import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
scores_path = os.path.join(args.dir, 'scores.txt')
scores = pd.read_csv(scores_path, names=('t', 'score'), delimiter=' ')
scores.plot(x='t', y='score')
plt.show()
if __name__ == '__main__':
main()
|
|
1ddd0c97059301614d4043fc3f749f3247f19599
|
utilities/duplicate_cleaner.py
|
utilities/duplicate_cleaner.py
|
"""Removes duplicate entries from dictionaries"""
import sys
import argparse
sys.path.append('../')
import namealizer
def main(dict_path, stat_path):
dictionary = namealizer.import_dictionary(dict_path)
sorted_words = []
updated = {}
running_total = 0
statistics = {
"old": {},
"new": {}
}
print("Removing duplicates")
for key, item in dictionary.items():
statistics["old"][key] = len(item)
updated[key] = sorted(set(item))
statistics["new"][key] = len(updated[key])
running_total += len(item) - len(updated[key])
print(key, len(item), len(updated[key]))
for key, item in updated.items():
for word in item:
sorted_words.append(word)
print("Beginning sort")
sorted_words = sorted(sorted_words)
print("Done sorting")
# write out the dictionary
with open(dict_path, "w") as dict_file:
for word in sorted_words:
dict_file.write(str(word) + '\n')
# write out the statistics
if stat_path is not None:
with open(stat_path, "w") as stat_file:
stat_file.write(str(statistics))
print("Removed a total of: {} words".format(running_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dictionary",
type=str,
help="Dictionary to clean, will be overwritten")
parser.add_argument("statistics",
type=str,
nargs="?",
default=None,
help="Where to put the statistics log")
args = parser.parse_args()
main(args.dictionary, args.statistics)
|
Add script to remove duplicates from dictionaries
|
Add script to remove duplicates from dictionaries
|
Python
|
mit
|
LeonardMH/namealizer
|
Add script to remove duplicates from dictionaries
|
"""Removes duplicate entries from dictionaries"""
import sys
import argparse
sys.path.append('../')
import namealizer
def main(dict_path, stat_path):
dictionary = namealizer.import_dictionary(dict_path)
sorted_words = []
updated = {}
running_total = 0
statistics = {
"old": {},
"new": {}
}
print("Removing duplicates")
for key, item in dictionary.items():
statistics["old"][key] = len(item)
updated[key] = sorted(set(item))
statistics["new"][key] = len(updated[key])
running_total += len(item) - len(updated[key])
print(key, len(item), len(updated[key]))
for key, item in updated.items():
for word in item:
sorted_words.append(word)
print("Beginning sort")
sorted_words = sorted(sorted_words)
print("Done sorting")
# write out the dictionary
with open(dict_path, "w") as dict_file:
for word in sorted_words:
dict_file.write(str(word) + '\n')
# write out the statistics
if stat_path is not None:
with open(stat_path, "w") as stat_file:
stat_file.write(str(statistics))
print("Removed a total of: {} words".format(running_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dictionary",
type=str,
help="Dictionary to clean, will be overwritten")
parser.add_argument("statistics",
type=str,
nargs="?",
default=None,
help="Where to put the statistics log")
args = parser.parse_args()
main(args.dictionary, args.statistics)
|
<commit_before><commit_msg>Add script to remove duplicates from dictionaries<commit_after>
|
"""Removes duplicate entries from dictionaries"""
import sys
import argparse
sys.path.append('../')
import namealizer
def main(dict_path, stat_path):
dictionary = namealizer.import_dictionary(dict_path)
sorted_words = []
updated = {}
running_total = 0
statistics = {
"old": {},
"new": {}
}
print("Removing duplicates")
for key, item in dictionary.items():
statistics["old"][key] = len(item)
updated[key] = sorted(set(item))
statistics["new"][key] = len(updated[key])
running_total += len(item) - len(updated[key])
print(key, len(item), len(updated[key]))
for key, item in updated.items():
for word in item:
sorted_words.append(word)
print("Beginning sort")
sorted_words = sorted(sorted_words)
print("Done sorting")
# write out the dictionary
with open(dict_path, "w") as dict_file:
for word in sorted_words:
dict_file.write(str(word) + '\n')
# write out the statistics
if stat_path is not None:
with open(stat_path, "w") as stat_file:
stat_file.write(str(statistics))
print("Removed a total of: {} words".format(running_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dictionary",
type=str,
help="Dictionary to clean, will be overwritten")
parser.add_argument("statistics",
type=str,
nargs="?",
default=None,
help="Where to put the statistics log")
args = parser.parse_args()
main(args.dictionary, args.statistics)
|
Add script to remove duplicates from dictionaries"""Removes duplicate entries from dictionaries"""
import sys
import argparse
sys.path.append('../')
import namealizer
def main(dict_path, stat_path):
dictionary = namealizer.import_dictionary(dict_path)
sorted_words = []
updated = {}
running_total = 0
statistics = {
"old": {},
"new": {}
}
print("Removing duplicates")
for key, item in dictionary.items():
statistics["old"][key] = len(item)
updated[key] = sorted(set(item))
statistics["new"][key] = len(updated[key])
running_total += len(item) - len(updated[key])
print(key, len(item), len(updated[key]))
for key, item in updated.items():
for word in item:
sorted_words.append(word)
print("Beginning sort")
sorted_words = sorted(sorted_words)
print("Done sorting")
# write out the dictionary
with open(dict_path, "w") as dict_file:
for word in sorted_words:
dict_file.write(str(word) + '\n')
# write out the statistics
if stat_path is not None:
with open(stat_path, "w") as stat_file:
stat_file.write(str(statistics))
print("Removed a total of: {} words".format(running_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dictionary",
type=str,
help="Dictionary to clean, will be overwritten")
parser.add_argument("statistics",
type=str,
nargs="?",
default=None,
help="Where to put the statistics log")
args = parser.parse_args()
main(args.dictionary, args.statistics)
|
<commit_before><commit_msg>Add script to remove duplicates from dictionaries<commit_after>"""Removes duplicate entries from dictionaries"""
import sys
import argparse
sys.path.append('../')
import namealizer
def main(dict_path, stat_path):
dictionary = namealizer.import_dictionary(dict_path)
sorted_words = []
updated = {}
running_total = 0
statistics = {
"old": {},
"new": {}
}
print("Removing duplicates")
for key, item in dictionary.items():
statistics["old"][key] = len(item)
updated[key] = sorted(set(item))
statistics["new"][key] = len(updated[key])
running_total += len(item) - len(updated[key])
print(key, len(item), len(updated[key]))
for key, item in updated.items():
for word in item:
sorted_words.append(word)
print("Beginning sort")
sorted_words = sorted(sorted_words)
print("Done sorting")
# write out the dictionary
with open(dict_path, "w") as dict_file:
for word in sorted_words:
dict_file.write(str(word) + '\n')
# write out the statistics
if stat_path is not None:
with open(stat_path, "w") as stat_file:
stat_file.write(str(statistics))
print("Removed a total of: {} words".format(running_total))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dictionary",
type=str,
help="Dictionary to clean, will be overwritten")
parser.add_argument("statistics",
type=str,
nargs="?",
default=None,
help="Where to put the statistics log")
args = parser.parse_args()
main(args.dictionary, args.statistics)
|
|
e7da5713a676c48248e51c0f8692d0ef5630df4f
|
png/makepng.py
|
png/makepng.py
|
#!/usr/bin/python
import png
device_size = [0]
chunks = []
width = 1200
height = 800
num_pixels = width * height
pixels = [[] for x in xrange(num_pixels)]
lines = open('output').read().splitlines()
for line in lines:
fields = line.split()
if fields[0] == 'chunk':
chunks.append({
'type': int(fields[4]),
'devid': int(fields[8]),
'offset': int(fields[10]),
'length': int(fields[12]),
'used': int(fields[14]),
})
elif fields[0] == 'dev':
device_size.append(int(fields[6]))
device_offset = []
for i in range(len(device_size)):
device_offset.append(sum(device_size[:i]))
bytes_per_pixel = float(sum(device_size)) / num_pixels
print("bytes per pixel: %s" % bytes_per_pixel)
for chunk in chunks:
first_byte = device_offset[chunk['devid']] + chunk['offset']
last_byte = first_byte + chunk['length'] - 1
used_pct = float(chunk['used']) / float(chunk['length'])
first_pixel = int(first_byte / bytes_per_pixel)
last_pixel = int(last_byte / bytes_per_pixel)
if first_pixel == last_pixel:
pct_of_pixel = chunk['length'] / bytes_per_pixel
pixels[first_pixel].append((pct_of_pixel, used_pct))
else:
pct_of_first_pixel = (bytes_per_pixel - (first_byte % bytes_per_pixel)) / bytes_per_pixel
pixels[first_pixel].append((pct_of_first_pixel, used_pct))
for intermediate_pixel in xrange(first_pixel + 1, last_pixel):
pixels[intermediate_pixel].append((1, used_pct))
pct_of_last_pixel = (last_byte % bytes_per_pixel) / bytes_per_pixel
pixels[last_pixel].append((pct_of_last_pixel, used_pct))
for i in xrange(len(pixels)):
if isinstance(pixels[i], list):
if len(pixels[i]) == 0:
pixels[i] = 0
else:
gradient = 0
for pct, used in pixels[i]:
gradient = gradient + (255 * pct * used)
pixels[i] = int(gradient)
else:
pixels[i] = int(255 * pixels[i])
png_grid = []
for i in range(0, len(pixels), width):
png_grid.append(pixels[i:i+width])
png.from_array(png_grid, 'L').save("heatmap.png")
|
Convert output into png image
|
Convert output into png image
|
Python
|
mit
|
knorrie/btrfs-heatmap
|
Convert output into png image
|
#!/usr/bin/python
import png
device_size = [0]
chunks = []
width = 1200
height = 800
num_pixels = width * height
pixels = [[] for x in xrange(num_pixels)]
lines = open('output').read().splitlines()
for line in lines:
fields = line.split()
if fields[0] == 'chunk':
chunks.append({
'type': int(fields[4]),
'devid': int(fields[8]),
'offset': int(fields[10]),
'length': int(fields[12]),
'used': int(fields[14]),
})
elif fields[0] == 'dev':
device_size.append(int(fields[6]))
device_offset = []
for i in range(len(device_size)):
device_offset.append(sum(device_size[:i]))
bytes_per_pixel = float(sum(device_size)) / num_pixels
print("bytes per pixel: %s" % bytes_per_pixel)
for chunk in chunks:
first_byte = device_offset[chunk['devid']] + chunk['offset']
last_byte = first_byte + chunk['length'] - 1
used_pct = float(chunk['used']) / float(chunk['length'])
first_pixel = int(first_byte / bytes_per_pixel)
last_pixel = int(last_byte / bytes_per_pixel)
if first_pixel == last_pixel:
pct_of_pixel = chunk['length'] / bytes_per_pixel
pixels[first_pixel].append((pct_of_pixel, used_pct))
else:
pct_of_first_pixel = (bytes_per_pixel - (first_byte % bytes_per_pixel)) / bytes_per_pixel
pixels[first_pixel].append((pct_of_first_pixel, used_pct))
for intermediate_pixel in xrange(first_pixel + 1, last_pixel):
pixels[intermediate_pixel].append((1, used_pct))
pct_of_last_pixel = (last_byte % bytes_per_pixel) / bytes_per_pixel
pixels[last_pixel].append((pct_of_last_pixel, used_pct))
for i in xrange(len(pixels)):
if isinstance(pixels[i], list):
if len(pixels[i]) == 0:
pixels[i] = 0
else:
gradient = 0
for pct, used in pixels[i]:
gradient = gradient + (255 * pct * used)
pixels[i] = int(gradient)
else:
pixels[i] = int(255 * pixels[i])
png_grid = []
for i in range(0, len(pixels), width):
png_grid.append(pixels[i:i+width])
png.from_array(png_grid, 'L').save("heatmap.png")
|
<commit_before><commit_msg>Convert output into png image<commit_after>
|
#!/usr/bin/python
import png
device_size = [0]
chunks = []
width = 1200
height = 800
num_pixels = width * height
pixels = [[] for x in xrange(num_pixels)]
lines = open('output').read().splitlines()
for line in lines:
fields = line.split()
if fields[0] == 'chunk':
chunks.append({
'type': int(fields[4]),
'devid': int(fields[8]),
'offset': int(fields[10]),
'length': int(fields[12]),
'used': int(fields[14]),
})
elif fields[0] == 'dev':
device_size.append(int(fields[6]))
device_offset = []
for i in range(len(device_size)):
device_offset.append(sum(device_size[:i]))
bytes_per_pixel = float(sum(device_size)) / num_pixels
print("bytes per pixel: %s" % bytes_per_pixel)
for chunk in chunks:
first_byte = device_offset[chunk['devid']] + chunk['offset']
last_byte = first_byte + chunk['length'] - 1
used_pct = float(chunk['used']) / float(chunk['length'])
first_pixel = int(first_byte / bytes_per_pixel)
last_pixel = int(last_byte / bytes_per_pixel)
if first_pixel == last_pixel:
pct_of_pixel = chunk['length'] / bytes_per_pixel
pixels[first_pixel].append((pct_of_pixel, used_pct))
else:
pct_of_first_pixel = (bytes_per_pixel - (first_byte % bytes_per_pixel)) / bytes_per_pixel
pixels[first_pixel].append((pct_of_first_pixel, used_pct))
for intermediate_pixel in xrange(first_pixel + 1, last_pixel):
pixels[intermediate_pixel].append((1, used_pct))
pct_of_last_pixel = (last_byte % bytes_per_pixel) / bytes_per_pixel
pixels[last_pixel].append((pct_of_last_pixel, used_pct))
for i in xrange(len(pixels)):
if isinstance(pixels[i], list):
if len(pixels[i]) == 0:
pixels[i] = 0
else:
gradient = 0
for pct, used in pixels[i]:
gradient = gradient + (255 * pct * used)
pixels[i] = int(gradient)
else:
pixels[i] = int(255 * pixels[i])
png_grid = []
for i in range(0, len(pixels), width):
png_grid.append(pixels[i:i+width])
png.from_array(png_grid, 'L').save("heatmap.png")
|
Convert output into png image#!/usr/bin/python
import png
device_size = [0]
chunks = []
width = 1200
height = 800
num_pixels = width * height
pixels = [[] for x in xrange(num_pixels)]
lines = open('output').read().splitlines()
for line in lines:
fields = line.split()
if fields[0] == 'chunk':
chunks.append({
'type': int(fields[4]),
'devid': int(fields[8]),
'offset': int(fields[10]),
'length': int(fields[12]),
'used': int(fields[14]),
})
elif fields[0] == 'dev':
device_size.append(int(fields[6]))
device_offset = []
for i in range(len(device_size)):
device_offset.append(sum(device_size[:i]))
bytes_per_pixel = float(sum(device_size)) / num_pixels
print("bytes per pixel: %s" % bytes_per_pixel)
for chunk in chunks:
first_byte = device_offset[chunk['devid']] + chunk['offset']
last_byte = first_byte + chunk['length'] - 1
used_pct = float(chunk['used']) / float(chunk['length'])
first_pixel = int(first_byte / bytes_per_pixel)
last_pixel = int(last_byte / bytes_per_pixel)
if first_pixel == last_pixel:
pct_of_pixel = chunk['length'] / bytes_per_pixel
pixels[first_pixel].append((pct_of_pixel, used_pct))
else:
pct_of_first_pixel = (bytes_per_pixel - (first_byte % bytes_per_pixel)) / bytes_per_pixel
pixels[first_pixel].append((pct_of_first_pixel, used_pct))
for intermediate_pixel in xrange(first_pixel + 1, last_pixel):
pixels[intermediate_pixel].append((1, used_pct))
pct_of_last_pixel = (last_byte % bytes_per_pixel) / bytes_per_pixel
pixels[last_pixel].append((pct_of_last_pixel, used_pct))
for i in xrange(len(pixels)):
if isinstance(pixels[i], list):
if len(pixels[i]) == 0:
pixels[i] = 0
else:
gradient = 0
for pct, used in pixels[i]:
gradient = gradient + (255 * pct * used)
pixels[i] = int(gradient)
else:
pixels[i] = int(255 * pixels[i])
png_grid = []
for i in range(0, len(pixels), width):
png_grid.append(pixels[i:i+width])
png.from_array(png_grid, 'L').save("heatmap.png")
|
<commit_before><commit_msg>Convert output into png image<commit_after>#!/usr/bin/python
import png
device_size = [0]
chunks = []
width = 1200
height = 800
num_pixels = width * height
pixels = [[] for x in xrange(num_pixels)]
lines = open('output').read().splitlines()
for line in lines:
fields = line.split()
if fields[0] == 'chunk':
chunks.append({
'type': int(fields[4]),
'devid': int(fields[8]),
'offset': int(fields[10]),
'length': int(fields[12]),
'used': int(fields[14]),
})
elif fields[0] == 'dev':
device_size.append(int(fields[6]))
device_offset = []
for i in range(len(device_size)):
device_offset.append(sum(device_size[:i]))
bytes_per_pixel = float(sum(device_size)) / num_pixels
print("bytes per pixel: %s" % bytes_per_pixel)
for chunk in chunks:
first_byte = device_offset[chunk['devid']] + chunk['offset']
last_byte = first_byte + chunk['length'] - 1
used_pct = float(chunk['used']) / float(chunk['length'])
first_pixel = int(first_byte / bytes_per_pixel)
last_pixel = int(last_byte / bytes_per_pixel)
if first_pixel == last_pixel:
pct_of_pixel = chunk['length'] / bytes_per_pixel
pixels[first_pixel].append((pct_of_pixel, used_pct))
else:
pct_of_first_pixel = (bytes_per_pixel - (first_byte % bytes_per_pixel)) / bytes_per_pixel
pixels[first_pixel].append((pct_of_first_pixel, used_pct))
for intermediate_pixel in xrange(first_pixel + 1, last_pixel):
pixels[intermediate_pixel].append((1, used_pct))
pct_of_last_pixel = (last_byte % bytes_per_pixel) / bytes_per_pixel
pixels[last_pixel].append((pct_of_last_pixel, used_pct))
for i in xrange(len(pixels)):
if isinstance(pixels[i], list):
if len(pixels[i]) == 0:
pixels[i] = 0
else:
gradient = 0
for pct, used in pixels[i]:
gradient = gradient + (255 * pct * used)
pixels[i] = int(gradient)
else:
pixels[i] = int(255 * pixels[i])
png_grid = []
for i in range(0, len(pixels), width):
png_grid.append(pixels[i:i+width])
png.from_array(png_grid, 'L').save("heatmap.png")
|
|
4536fe1df5081c69ac736d217721557e7a182ba7
|
skyfield/tests/test_io_parsing.py
|
skyfield/tests/test_io_parsing.py
|
"""Tests of how well we parse various file formats."""
from skyfield.functions import BytesIO
from skyfield.iokit import parse_celestrak_tle
sample_celestrak_text = b"""\
ISS (ZARYA) \n\
1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998
2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452
FLOCK 2E-1 \n\
1 41483U 98067JD 18135.38689952 .00096183 14684-4 28212-3 0 9990
2 41483 51.6270 103.3896 0004826 61.7810 298.3684 15.92672255114129
"""
def test_celestrak():
f = BytesIO(sample_celestrak_text)
d = dict(parse_celestrak_tle(f))
assert len(d) == 4
assert d['ISS'] == d['ISS (ZARYA)'] == d['ZARYA']
assert d['FLOCK 2E-1']
|
Add a test for our existing Celestrak parsing
|
Add a test for our existing Celestrak parsing
|
Python
|
mit
|
skyfielders/python-skyfield,skyfielders/python-skyfield
|
Add a test for our existing Celestrak parsing
|
"""Tests of how well we parse various file formats."""
from skyfield.functions import BytesIO
from skyfield.iokit import parse_celestrak_tle
sample_celestrak_text = b"""\
ISS (ZARYA) \n\
1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998
2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452
FLOCK 2E-1 \n\
1 41483U 98067JD 18135.38689952 .00096183 14684-4 28212-3 0 9990
2 41483 51.6270 103.3896 0004826 61.7810 298.3684 15.92672255114129
"""
def test_celestrak():
f = BytesIO(sample_celestrak_text)
d = dict(parse_celestrak_tle(f))
assert len(d) == 4
assert d['ISS'] == d['ISS (ZARYA)'] == d['ZARYA']
assert d['FLOCK 2E-1']
|
<commit_before><commit_msg>Add a test for our existing Celestrak parsing<commit_after>
|
"""Tests of how well we parse various file formats."""
from skyfield.functions import BytesIO
from skyfield.iokit import parse_celestrak_tle
sample_celestrak_text = b"""\
ISS (ZARYA) \n\
1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998
2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452
FLOCK 2E-1 \n\
1 41483U 98067JD 18135.38689952 .00096183 14684-4 28212-3 0 9990
2 41483 51.6270 103.3896 0004826 61.7810 298.3684 15.92672255114129
"""
def test_celestrak():
f = BytesIO(sample_celestrak_text)
d = dict(parse_celestrak_tle(f))
assert len(d) == 4
assert d['ISS'] == d['ISS (ZARYA)'] == d['ZARYA']
assert d['FLOCK 2E-1']
|
Add a test for our existing Celestrak parsing"""Tests of how well we parse various file formats."""
from skyfield.functions import BytesIO
from skyfield.iokit import parse_celestrak_tle
sample_celestrak_text = b"""\
ISS (ZARYA) \n\
1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998
2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452
FLOCK 2E-1 \n\
1 41483U 98067JD 18135.38689952 .00096183 14684-4 28212-3 0 9990
2 41483 51.6270 103.3896 0004826 61.7810 298.3684 15.92672255114129
"""
def test_celestrak():
f = BytesIO(sample_celestrak_text)
d = dict(parse_celestrak_tle(f))
assert len(d) == 4
assert d['ISS'] == d['ISS (ZARYA)'] == d['ZARYA']
assert d['FLOCK 2E-1']
|
<commit_before><commit_msg>Add a test for our existing Celestrak parsing<commit_after>"""Tests of how well we parse various file formats."""
from skyfield.functions import BytesIO
from skyfield.iokit import parse_celestrak_tle
sample_celestrak_text = b"""\
ISS (ZARYA) \n\
1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998
2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452
FLOCK 2E-1 \n\
1 41483U 98067JD 18135.38689952 .00096183 14684-4 28212-3 0 9990
2 41483 51.6270 103.3896 0004826 61.7810 298.3684 15.92672255114129
"""
def test_celestrak():
f = BytesIO(sample_celestrak_text)
d = dict(parse_celestrak_tle(f))
assert len(d) == 4
assert d['ISS'] == d['ISS (ZARYA)'] == d['ZARYA']
assert d['FLOCK 2E-1']
|
|
28787be24b1c251200c52ff8d2abc70b356811d8
|
src/Scripts/sum-cachelines.py
|
src/Scripts/sum-cachelines.py
|
import csv
with open("/tmp/int/QueryPipelineStatistics.csv") as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
sum = 0
for row in reader:
sum += int(row[3])
print(sum)
|
Add quick script for cacheline counting.
|
Add quick script for cacheline counting.
|
Python
|
mit
|
BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel
|
Add quick script for cacheline counting.
|
import csv
with open("/tmp/int/QueryPipelineStatistics.csv") as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
sum = 0
for row in reader:
sum += int(row[3])
print(sum)
|
<commit_before><commit_msg>Add quick script for cacheline counting.<commit_after>
|
import csv
with open("/tmp/int/QueryPipelineStatistics.csv") as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
sum = 0
for row in reader:
sum += int(row[3])
print(sum)
|
Add quick script for cacheline counting.import csv
with open("/tmp/int/QueryPipelineStatistics.csv") as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
sum = 0
for row in reader:
sum += int(row[3])
print(sum)
|
<commit_before><commit_msg>Add quick script for cacheline counting.<commit_after>import csv
with open("/tmp/int/QueryPipelineStatistics.csv") as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
sum = 0
for row in reader:
sum += int(row[3])
print(sum)
|
|
b03476b85c94bdba0258f555b9e89b6e7c84f7e1
|
foobargoogle1.py
|
foobargoogle1.py
|
import math
def check_prime(number, primes):
limit = math.sqrt(number)
for prime in primes:
if prime > limit:
return True
if number % prime == 0:
return False
return True
def solution(i):
primes = []
number = 2
while len(primes) < (i + 5):
is_prime = check_prime(number, primes)
if is_prime:
primes.append(number)
number += 1
five_digit = "".join([str(x) for x in primes])[i : i + 5]
return five_digit
print(solution(0))
print(solution(3))
print(solution(30))
|
Add Google foobar question 1
|
Add Google foobar question 1
|
Python
|
mit
|
ismailsunni/scripts
|
Add Google foobar question 1
|
import math
def check_prime(number, primes):
limit = math.sqrt(number)
for prime in primes:
if prime > limit:
return True
if number % prime == 0:
return False
return True
def solution(i):
primes = []
number = 2
while len(primes) < (i + 5):
is_prime = check_prime(number, primes)
if is_prime:
primes.append(number)
number += 1
five_digit = "".join([str(x) for x in primes])[i : i + 5]
return five_digit
print(solution(0))
print(solution(3))
print(solution(30))
|
<commit_before><commit_msg>Add Google foobar question 1<commit_after>
|
import math
def check_prime(number, primes):
limit = math.sqrt(number)
for prime in primes:
if prime > limit:
return True
if number % prime == 0:
return False
return True
def solution(i):
primes = []
number = 2
while len(primes) < (i + 5):
is_prime = check_prime(number, primes)
if is_prime:
primes.append(number)
number += 1
five_digit = "".join([str(x) for x in primes])[i : i + 5]
return five_digit
print(solution(0))
print(solution(3))
print(solution(30))
|
Add Google foobar question 1import math
def check_prime(number, primes):
limit = math.sqrt(number)
for prime in primes:
if prime > limit:
return True
if number % prime == 0:
return False
return True
def solution(i):
primes = []
number = 2
while len(primes) < (i + 5):
is_prime = check_prime(number, primes)
if is_prime:
primes.append(number)
number += 1
five_digit = "".join([str(x) for x in primes])[i : i + 5]
return five_digit
print(solution(0))
print(solution(3))
print(solution(30))
|
<commit_before><commit_msg>Add Google foobar question 1<commit_after>import math
def check_prime(number, primes):
limit = math.sqrt(number)
for prime in primes:
if prime > limit:
return True
if number % prime == 0:
return False
return True
def solution(i):
primes = []
number = 2
while len(primes) < (i + 5):
is_prime = check_prime(number, primes)
if is_prime:
primes.append(number)
number += 1
five_digit = "".join([str(x) for x in primes])[i : i + 5]
return five_digit
print(solution(0))
print(solution(3))
print(solution(30))
|
|
03b24e97b1239f28cdaa4311f5d51c7974308bce
|
postgres2redis.py
|
postgres2redis.py
|
#!/usr/bin/env python
import ConfigParser
import json
import psycopg2
import redis
import time
def main():
config = ConfigParser.RawConfigParser()
config.read(('default.cfg', 'local.cfg',))
db_host = config.get('Database', 'host')
db_user = config.get('Database', 'user')
db_pass = config.get('Database', 'password')
conn = psycopg2.connect(host=db_host, user=db_user, password=db_pass)
rconn = redis.Redis(db='urlencode')
rconn.connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM urlurlurl')
results = cursor.fetchall()
for result in results:
_id, url, enc, rurl, created, flags = result
created = time.mktime(created.utctimetuple())
# Forward mapping
rconn[enc] = json.dumps({'url' : url, 'rurl' : rurl, 'created' : created, 'flags' : flags})
# Reverse mapping
rconn[url] = enc
finally:
conn.close()
rconn.disconnect()
if __name__ == '__main__':
main()
|
Add a simple script to prefill Redis with the old postgres data
|
Add a simple script to prefill Redis with the old postgres data
|
Python
|
mit
|
rtyler/urlenco.de
|
Add a simple script to prefill Redis with the old postgres data
|
#!/usr/bin/env python
import ConfigParser
import json
import psycopg2
import redis
import time
def main():
config = ConfigParser.RawConfigParser()
config.read(('default.cfg', 'local.cfg',))
db_host = config.get('Database', 'host')
db_user = config.get('Database', 'user')
db_pass = config.get('Database', 'password')
conn = psycopg2.connect(host=db_host, user=db_user, password=db_pass)
rconn = redis.Redis(db='urlencode')
rconn.connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM urlurlurl')
results = cursor.fetchall()
for result in results:
_id, url, enc, rurl, created, flags = result
created = time.mktime(created.utctimetuple())
# Forward mapping
rconn[enc] = json.dumps({'url' : url, 'rurl' : rurl, 'created' : created, 'flags' : flags})
# Reverse mapping
rconn[url] = enc
finally:
conn.close()
rconn.disconnect()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a simple script to prefill Redis with the old postgres data<commit_after>
|
#!/usr/bin/env python
import ConfigParser
import json
import psycopg2
import redis
import time
def main():
config = ConfigParser.RawConfigParser()
config.read(('default.cfg', 'local.cfg',))
db_host = config.get('Database', 'host')
db_user = config.get('Database', 'user')
db_pass = config.get('Database', 'password')
conn = psycopg2.connect(host=db_host, user=db_user, password=db_pass)
rconn = redis.Redis(db='urlencode')
rconn.connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM urlurlurl')
results = cursor.fetchall()
for result in results:
_id, url, enc, rurl, created, flags = result
created = time.mktime(created.utctimetuple())
# Forward mapping
rconn[enc] = json.dumps({'url' : url, 'rurl' : rurl, 'created' : created, 'flags' : flags})
# Reverse mapping
rconn[url] = enc
finally:
conn.close()
rconn.disconnect()
if __name__ == '__main__':
main()
|
Add a simple script to prefill Redis with the old postgres data#!/usr/bin/env python
import ConfigParser
import json
import psycopg2
import redis
import time
def main():
config = ConfigParser.RawConfigParser()
config.read(('default.cfg', 'local.cfg',))
db_host = config.get('Database', 'host')
db_user = config.get('Database', 'user')
db_pass = config.get('Database', 'password')
conn = psycopg2.connect(host=db_host, user=db_user, password=db_pass)
rconn = redis.Redis(db='urlencode')
rconn.connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM urlurlurl')
results = cursor.fetchall()
for result in results:
_id, url, enc, rurl, created, flags = result
created = time.mktime(created.utctimetuple())
# Forward mapping
rconn[enc] = json.dumps({'url' : url, 'rurl' : rurl, 'created' : created, 'flags' : flags})
# Reverse mapping
rconn[url] = enc
finally:
conn.close()
rconn.disconnect()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a simple script to prefill Redis with the old postgres data<commit_after>#!/usr/bin/env python
import ConfigParser
import json
import psycopg2
import redis
import time
def main():
config = ConfigParser.RawConfigParser()
config.read(('default.cfg', 'local.cfg',))
db_host = config.get('Database', 'host')
db_user = config.get('Database', 'user')
db_pass = config.get('Database', 'password')
conn = psycopg2.connect(host=db_host, user=db_user, password=db_pass)
rconn = redis.Redis(db='urlencode')
rconn.connect()
try:
cursor = conn.cursor()
cursor.execute('SELECT * FROM urlurlurl')
results = cursor.fetchall()
for result in results:
_id, url, enc, rurl, created, flags = result
created = time.mktime(created.utctimetuple())
# Forward mapping
rconn[enc] = json.dumps({'url' : url, 'rurl' : rurl, 'created' : created, 'flags' : flags})
# Reverse mapping
rconn[url] = enc
finally:
conn.close()
rconn.disconnect()
if __name__ == '__main__':
main()
|
|
2cdd4ac059dd21dcde654dd4d775d07dffb7a53e
|
test/test_cli.py
|
test/test_cli.py
|
import unittest
import subprocess
import tempfile
import fuchsia
from sage.all import SR
def sh(*cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Command %s exited with code %s" % (cmd, p.returncode))
return stdout, stderr
class Test(unittest.TestCase):
def test_help(t):
sh("bin/fuchsia", "-h")
def assertTransformation(t, m1_path, x_name, t_path, m2_path):
M1 = fuchsia.import_matrix_from_file(m1_path)
T = fuchsia.import_matrix_from_file(t_path)
M2 = fuchsia.import_matrix_from_file(m2_path)
t.assertEqual(M2.simplify_rational(),
fuchsia.transform(M1, SR.var(x_name), T).simplify_rational())
def assertIsFuchsian(t, m_path, x_name):
M = fuchsia.import_matrix_from_file(m_path)
x = SR.var(x_name)
pranks = fuchsia.singularities(M, x).values()
t.assertEqual(pranks, [0]*len(pranks))
def test_fuchsify_1(t):
with tempfile.NamedTemporaryFile() as mf:
with tempfile.NamedTemporaryFile() as tf:
sh("bin/fuchsia", "fuchsify", "-m", mf.name, "-t", tf.name,
"examples/git_409.mtx")
t.assertTransformation("examples/git_409.mtx", "x", tf.name, mf.name)
t.assertIsFuchsian(mf.name, "x")
|
Add a simple CLI test
|
Add a simple CLI test
|
Python
|
isc
|
gituliar/fuchsia,gituliar/fuchsia
|
Add a simple CLI test
|
import unittest
import subprocess
import tempfile
import fuchsia
from sage.all import SR
def sh(*cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Command %s exited with code %s" % (cmd, p.returncode))
return stdout, stderr
class Test(unittest.TestCase):
def test_help(t):
sh("bin/fuchsia", "-h")
def assertTransformation(t, m1_path, x_name, t_path, m2_path):
M1 = fuchsia.import_matrix_from_file(m1_path)
T = fuchsia.import_matrix_from_file(t_path)
M2 = fuchsia.import_matrix_from_file(m2_path)
t.assertEqual(M2.simplify_rational(),
fuchsia.transform(M1, SR.var(x_name), T).simplify_rational())
def assertIsFuchsian(t, m_path, x_name):
M = fuchsia.import_matrix_from_file(m_path)
x = SR.var(x_name)
pranks = fuchsia.singularities(M, x).values()
t.assertEqual(pranks, [0]*len(pranks))
def test_fuchsify_1(t):
with tempfile.NamedTemporaryFile() as mf:
with tempfile.NamedTemporaryFile() as tf:
sh("bin/fuchsia", "fuchsify", "-m", mf.name, "-t", tf.name,
"examples/git_409.mtx")
t.assertTransformation("examples/git_409.mtx", "x", tf.name, mf.name)
t.assertIsFuchsian(mf.name, "x")
|
<commit_before><commit_msg>Add a simple CLI test<commit_after>
|
import unittest
import subprocess
import tempfile
import fuchsia
from sage.all import SR
def sh(*cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Command %s exited with code %s" % (cmd, p.returncode))
return stdout, stderr
class Test(unittest.TestCase):
def test_help(t):
sh("bin/fuchsia", "-h")
def assertTransformation(t, m1_path, x_name, t_path, m2_path):
M1 = fuchsia.import_matrix_from_file(m1_path)
T = fuchsia.import_matrix_from_file(t_path)
M2 = fuchsia.import_matrix_from_file(m2_path)
t.assertEqual(M2.simplify_rational(),
fuchsia.transform(M1, SR.var(x_name), T).simplify_rational())
def assertIsFuchsian(t, m_path, x_name):
M = fuchsia.import_matrix_from_file(m_path)
x = SR.var(x_name)
pranks = fuchsia.singularities(M, x).values()
t.assertEqual(pranks, [0]*len(pranks))
def test_fuchsify_1(t):
with tempfile.NamedTemporaryFile() as mf:
with tempfile.NamedTemporaryFile() as tf:
sh("bin/fuchsia", "fuchsify", "-m", mf.name, "-t", tf.name,
"examples/git_409.mtx")
t.assertTransformation("examples/git_409.mtx", "x", tf.name, mf.name)
t.assertIsFuchsian(mf.name, "x")
|
Add a simple CLI testimport unittest
import subprocess
import tempfile
import fuchsia
from sage.all import SR
def sh(*cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Command %s exited with code %s" % (cmd, p.returncode))
return stdout, stderr
class Test(unittest.TestCase):
def test_help(t):
sh("bin/fuchsia", "-h")
def assertTransformation(t, m1_path, x_name, t_path, m2_path):
M1 = fuchsia.import_matrix_from_file(m1_path)
T = fuchsia.import_matrix_from_file(t_path)
M2 = fuchsia.import_matrix_from_file(m2_path)
t.assertEqual(M2.simplify_rational(),
fuchsia.transform(M1, SR.var(x_name), T).simplify_rational())
def assertIsFuchsian(t, m_path, x_name):
M = fuchsia.import_matrix_from_file(m_path)
x = SR.var(x_name)
pranks = fuchsia.singularities(M, x).values()
t.assertEqual(pranks, [0]*len(pranks))
def test_fuchsify_1(t):
with tempfile.NamedTemporaryFile() as mf:
with tempfile.NamedTemporaryFile() as tf:
sh("bin/fuchsia", "fuchsify", "-m", mf.name, "-t", tf.name,
"examples/git_409.mtx")
t.assertTransformation("examples/git_409.mtx", "x", tf.name, mf.name)
t.assertIsFuchsian(mf.name, "x")
|
<commit_before><commit_msg>Add a simple CLI test<commit_after>import unittest
import subprocess
import tempfile
import fuchsia
from sage.all import SR
def sh(*cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Command %s exited with code %s" % (cmd, p.returncode))
return stdout, stderr
class Test(unittest.TestCase):
def test_help(t):
sh("bin/fuchsia", "-h")
def assertTransformation(t, m1_path, x_name, t_path, m2_path):
M1 = fuchsia.import_matrix_from_file(m1_path)
T = fuchsia.import_matrix_from_file(t_path)
M2 = fuchsia.import_matrix_from_file(m2_path)
t.assertEqual(M2.simplify_rational(),
fuchsia.transform(M1, SR.var(x_name), T).simplify_rational())
def assertIsFuchsian(t, m_path, x_name):
M = fuchsia.import_matrix_from_file(m_path)
x = SR.var(x_name)
pranks = fuchsia.singularities(M, x).values()
t.assertEqual(pranks, [0]*len(pranks))
def test_fuchsify_1(t):
with tempfile.NamedTemporaryFile() as mf:
with tempfile.NamedTemporaryFile() as tf:
sh("bin/fuchsia", "fuchsify", "-m", mf.name, "-t", tf.name,
"examples/git_409.mtx")
t.assertTransformation("examples/git_409.mtx", "x", tf.name, mf.name)
t.assertIsFuchsian(mf.name, "x")
|
|
dcdc91d890fc96e76f21e0ee51fccb6b9d0bac52
|
nodeconductor/structure/migrations/0022_init_global_count_quotas.py
|
nodeconductor/structure/migrations/0022_init_global_count_quotas.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from django.db import migrations
def create_quotas(apps, schema_editor):
Project = apps.get_model('structure', 'Project')
Customer = apps.get_model('structure', 'Customer')
ProjectGroup = apps.get_model('structure', 'ProjectGroup')
Quota = apps.get_model('quotas', 'Quota')
# We can not use model constants in migrations because they can be changed in future
quota_name_map = {
Project: 'nc_global_project_count',
Customer: 'nc_global_customer_count',
ProjectGroup: 'nc_global_project_group_count',
}
for model in [Project, Customer, ProjectGroup]:
name = quota_name_map[model]
usage = model.objects.count()
if not Quota.objects.filter(name=name, object_id__isnull=True).exists():
Quota.objects.create(uuid=uuid4().hex, name=name, usage=usage)
else:
Quota.objects.filter(name=name, object_id__isnull=True).update(usage=usage)
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_balancehistory'),
('quotas', '0002_make_quota_scope_nullable'),
]
operations = [
migrations.RunPython(create_quotas),
]
|
Add global count quotas calculation migration
|
Add global count quotas calculation migration
- nc-860
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Add global count quotas calculation migration
- nc-860
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from django.db import migrations
def create_quotas(apps, schema_editor):
Project = apps.get_model('structure', 'Project')
Customer = apps.get_model('structure', 'Customer')
ProjectGroup = apps.get_model('structure', 'ProjectGroup')
Quota = apps.get_model('quotas', 'Quota')
# We can not use model constants in migrations because they can be changed in future
quota_name_map = {
Project: 'nc_global_project_count',
Customer: 'nc_global_customer_count',
ProjectGroup: 'nc_global_project_group_count',
}
for model in [Project, Customer, ProjectGroup]:
name = quota_name_map[model]
usage = model.objects.count()
if not Quota.objects.filter(name=name, object_id__isnull=True).exists():
Quota.objects.create(uuid=uuid4().hex, name=name, usage=usage)
else:
Quota.objects.filter(name=name, object_id__isnull=True).update(usage=usage)
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_balancehistory'),
('quotas', '0002_make_quota_scope_nullable'),
]
operations = [
migrations.RunPython(create_quotas),
]
|
<commit_before><commit_msg>Add global count quotas calculation migration
- nc-860<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from django.db import migrations
def create_quotas(apps, schema_editor):
Project = apps.get_model('structure', 'Project')
Customer = apps.get_model('structure', 'Customer')
ProjectGroup = apps.get_model('structure', 'ProjectGroup')
Quota = apps.get_model('quotas', 'Quota')
# We can not use model constants in migrations because they can be changed in future
quota_name_map = {
Project: 'nc_global_project_count',
Customer: 'nc_global_customer_count',
ProjectGroup: 'nc_global_project_group_count',
}
for model in [Project, Customer, ProjectGroup]:
name = quota_name_map[model]
usage = model.objects.count()
if not Quota.objects.filter(name=name, object_id__isnull=True).exists():
Quota.objects.create(uuid=uuid4().hex, name=name, usage=usage)
else:
Quota.objects.filter(name=name, object_id__isnull=True).update(usage=usage)
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_balancehistory'),
('quotas', '0002_make_quota_scope_nullable'),
]
operations = [
migrations.RunPython(create_quotas),
]
|
Add global count quotas calculation migration
- nc-860# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from django.db import migrations
def create_quotas(apps, schema_editor):
Project = apps.get_model('structure', 'Project')
Customer = apps.get_model('structure', 'Customer')
ProjectGroup = apps.get_model('structure', 'ProjectGroup')
Quota = apps.get_model('quotas', 'Quota')
# We can not use model constants in migrations because they can be changed in future
quota_name_map = {
Project: 'nc_global_project_count',
Customer: 'nc_global_customer_count',
ProjectGroup: 'nc_global_project_group_count',
}
for model in [Project, Customer, ProjectGroup]:
name = quota_name_map[model]
usage = model.objects.count()
if not Quota.objects.filter(name=name, object_id__isnull=True).exists():
Quota.objects.create(uuid=uuid4().hex, name=name, usage=usage)
else:
Quota.objects.filter(name=name, object_id__isnull=True).update(usage=usage)
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_balancehistory'),
('quotas', '0002_make_quota_scope_nullable'),
]
operations = [
migrations.RunPython(create_quotas),
]
|
<commit_before><commit_msg>Add global count quotas calculation migration
- nc-860<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from django.db import migrations
def create_quotas(apps, schema_editor):
Project = apps.get_model('structure', 'Project')
Customer = apps.get_model('structure', 'Customer')
ProjectGroup = apps.get_model('structure', 'ProjectGroup')
Quota = apps.get_model('quotas', 'Quota')
# We can not use model constants in migrations because they can be changed in future
quota_name_map = {
Project: 'nc_global_project_count',
Customer: 'nc_global_customer_count',
ProjectGroup: 'nc_global_project_group_count',
}
for model in [Project, Customer, ProjectGroup]:
name = quota_name_map[model]
usage = model.objects.count()
if not Quota.objects.filter(name=name, object_id__isnull=True).exists():
Quota.objects.create(uuid=uuid4().hex, name=name, usage=usage)
else:
Quota.objects.filter(name=name, object_id__isnull=True).update(usage=usage)
class Migration(migrations.Migration):
dependencies = [
('structure', '0021_balancehistory'),
('quotas', '0002_make_quota_scope_nullable'),
]
operations = [
migrations.RunPython(create_quotas),
]
|
|
117809ed43c6c0cab4525ca4207764b9909020af
|
migrations/versions/910_delete_copied_from_brief_id_column.py
|
migrations/versions/910_delete_copied_from_brief_id_column.py
|
"""Remove copied_from_brief_id column from briefs table as it has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made from it. Reason for this is
that the original and the copy were bound by a parent-child database relationship.
Revision ID: 910
Revises: 900
Create Date: 2017-06-01
"""
# revision identifiers, used by Alembic.
revision = '910'
down_revision = '900'
from alembic import op
from sqlalchemy import Column, ForeignKey, INTEGER
def upgrade():
"""Drop column as it has been superseded with 'is_a-copy' boolaean copy to fix a bug."""
op.drop_column(
'briefs',
'copied_from_brief_id'
)
def downgrade():
"""Reinstates copied_from_brief_id column, but without populating with data."""
op.add_column(
'briefs',
Column('copied_from_brief_id', INTEGER, ForeignKey('briefs.id'))
)
|
Delete 'copied_from_brief_id' column and create a correspnding migration
|
Delete 'copied_from_brief_id' column and create a correspnding migration
This is done because this field has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made
from it. Reason for this is that the original and the copy were bound by
a parent-child database relationship.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Delete 'copied_from_brief_id' column and create a correspnding migration
This is done because this field has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made
from it. Reason for this is that the original and the copy were bound by
a parent-child database relationship.
|
"""Remove copied_from_brief_id column from briefs table as it has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made from it. Reason for this is
that the original and the copy were bound by a parent-child database relationship.
Revision ID: 910
Revises: 900
Create Date: 2017-06-01
"""
# revision identifiers, used by Alembic.
revision = '910'
down_revision = '900'
from alembic import op
from sqlalchemy import Column, ForeignKey, INTEGER
def upgrade():
"""Drop column as it has been superseded with 'is_a-copy' boolaean copy to fix a bug."""
op.drop_column(
'briefs',
'copied_from_brief_id'
)
def downgrade():
"""Reinstates copied_from_brief_id column, but without populating with data."""
op.add_column(
'briefs',
Column('copied_from_brief_id', INTEGER, ForeignKey('briefs.id'))
)
|
<commit_before><commit_msg>Delete 'copied_from_brief_id' column and create a correspnding migration
This is done because this field has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made
from it. Reason for this is that the original and the copy were bound by
a parent-child database relationship.<commit_after>
|
"""Remove copied_from_brief_id column from briefs table as it has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made from it. Reason for this is
that the original and the copy were bound by a parent-child database relationship.
Revision ID: 910
Revises: 900
Create Date: 2017-06-01
"""
# revision identifiers, used by Alembic.
revision = '910'
down_revision = '900'
from alembic import op
from sqlalchemy import Column, ForeignKey, INTEGER
def upgrade():
"""Drop column as it has been superseded with 'is_a-copy' boolaean copy to fix a bug."""
op.drop_column(
'briefs',
'copied_from_brief_id'
)
def downgrade():
"""Reinstates copied_from_brief_id column, but without populating with data."""
op.add_column(
'briefs',
Column('copied_from_brief_id', INTEGER, ForeignKey('briefs.id'))
)
|
Delete 'copied_from_brief_id' column and create a correspnding migration
This is done because this field has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made
from it. Reason for this is that the original and the copy were bound by
a parent-child database relationship."""Remove copied_from_brief_id column from briefs table as it has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made from it. Reason for this is
that the original and the copy were bound by a parent-child database relationship.
Revision ID: 910
Revises: 900
Create Date: 2017-06-01
"""
# revision identifiers, used by Alembic.
revision = '910'
down_revision = '900'
from alembic import op
from sqlalchemy import Column, ForeignKey, INTEGER
def upgrade():
"""Drop column as it has been superseded with 'is_a-copy' boolaean copy to fix a bug."""
op.drop_column(
'briefs',
'copied_from_brief_id'
)
def downgrade():
"""Reinstates copied_from_brief_id column, but without populating with data."""
op.add_column(
'briefs',
Column('copied_from_brief_id', INTEGER, ForeignKey('briefs.id'))
)
|
<commit_before><commit_msg>Delete 'copied_from_brief_id' column and create a correspnding migration
This is done because this field has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made
from it. Reason for this is that the original and the copy were bound by
a parent-child database relationship.<commit_after>"""Remove copied_from_brief_id column from briefs table as it has been superseded with 'is_a-copy' boolaean column
to fix a bug. The bug made it impossible to delete a draft brief if a copy was made from it. Reason for this is
that the original and the copy were bound by a parent-child database relationship.
Revision ID: 910
Revises: 900
Create Date: 2017-06-01
"""
# revision identifiers, used by Alembic.
revision = '910'
down_revision = '900'
from alembic import op
from sqlalchemy import Column, ForeignKey, INTEGER
def upgrade():
"""Drop column as it has been superseded with 'is_a-copy' boolaean copy to fix a bug."""
op.drop_column(
'briefs',
'copied_from_brief_id'
)
def downgrade():
"""Reinstates copied_from_brief_id column, but without populating with data."""
op.add_column(
'briefs',
Column('copied_from_brief_id', INTEGER, ForeignKey('briefs.id'))
)
|
|
02b3548b557c4a10de8bd14ce609f924009baaf2
|
core/migrations/0002_auto_20170522_0640.py
|
core/migrations/0002_auto_20170522_0640.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 10:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='date',
),
migrations.RemoveField(
model_name='invoice',
name='hourly_rate',
),
migrations.AddField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='paid',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Make migrations for invoice changes
|
Make migrations for invoice changes
|
Python
|
bsd-2-clause
|
overshard/timestrap,cdubz/timestrap,muhleder/timestrap,cdubz/timestrap,overshard/timestrap,muhleder/timestrap,overshard/timestrap,muhleder/timestrap,cdubz/timestrap
|
Make migrations for invoice changes
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 10:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='date',
),
migrations.RemoveField(
model_name='invoice',
name='hourly_rate',
),
migrations.AddField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='paid',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Make migrations for invoice changes<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 10:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='date',
),
migrations.RemoveField(
model_name='invoice',
name='hourly_rate',
),
migrations.AddField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='paid',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Make migrations for invoice changes# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 10:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='date',
),
migrations.RemoveField(
model_name='invoice',
name='hourly_rate',
),
migrations.AddField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='paid',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Make migrations for invoice changes<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-22 10:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='invoice',
name='date',
),
migrations.RemoveField(
model_name='invoice',
name='hourly_rate',
),
migrations.AddField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='paid',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
|
b0f0beae34de0a1fb5ddc704cba5ae9346e92341
|
set_offline.py
|
set_offline.py
|
import asyncio
import os
import discord
from discord.ext import commands
import SLA_bot.channelupdater as ChannelUpdater
import SLA_bot.config as cf
curr_dir = os.path.dirname(__file__)
default_config = os.path.join(curr_dir, 'default_config.ini'),
user_config = os.path.join(curr_dir, 'config.ini')
cf.load_configs(default_config, user_config)
prefix = cf.get('General', 'command_prefix')
bot = commands.Bot(command_prefix=prefix)
async def set_offline():
ChannelUpdater.bot = bot
await ChannelUpdater.load_channels()
embed=discord.Embed(title='Bot is offline.', color=0xdc4a4a)
for channel, messages in ChannelUpdater.channel_messages.items():
await ChannelUpdater.write_content(channel, None, embed)
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
await set_offline()
await bot.logout()
bot.run(cf.get('General', 'bot_token'))
|
Add script to set an offline message on channels
|
Add script to set an offline message on channels
|
Python
|
mit
|
EsqWiggles/SLA-bot,EsqWiggles/SLA-bot
|
Add script to set an offline message on channels
|
import asyncio
import os
import discord
from discord.ext import commands
import SLA_bot.channelupdater as ChannelUpdater
import SLA_bot.config as cf
curr_dir = os.path.dirname(__file__)
default_config = os.path.join(curr_dir, 'default_config.ini'),
user_config = os.path.join(curr_dir, 'config.ini')
cf.load_configs(default_config, user_config)
prefix = cf.get('General', 'command_prefix')
bot = commands.Bot(command_prefix=prefix)
async def set_offline():
ChannelUpdater.bot = bot
await ChannelUpdater.load_channels()
embed=discord.Embed(title='Bot is offline.', color=0xdc4a4a)
for channel, messages in ChannelUpdater.channel_messages.items():
await ChannelUpdater.write_content(channel, None, embed)
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
await set_offline()
await bot.logout()
bot.run(cf.get('General', 'bot_token'))
|
<commit_before><commit_msg>Add script to set an offline message on channels<commit_after>
|
import asyncio
import os
import discord
from discord.ext import commands
import SLA_bot.channelupdater as ChannelUpdater
import SLA_bot.config as cf
curr_dir = os.path.dirname(__file__)
default_config = os.path.join(curr_dir, 'default_config.ini'),
user_config = os.path.join(curr_dir, 'config.ini')
cf.load_configs(default_config, user_config)
prefix = cf.get('General', 'command_prefix')
bot = commands.Bot(command_prefix=prefix)
async def set_offline():
ChannelUpdater.bot = bot
await ChannelUpdater.load_channels()
embed=discord.Embed(title='Bot is offline.', color=0xdc4a4a)
for channel, messages in ChannelUpdater.channel_messages.items():
await ChannelUpdater.write_content(channel, None, embed)
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
await set_offline()
await bot.logout()
bot.run(cf.get('General', 'bot_token'))
|
Add script to set an offline message on channelsimport asyncio
import os
import discord
from discord.ext import commands
import SLA_bot.channelupdater as ChannelUpdater
import SLA_bot.config as cf
curr_dir = os.path.dirname(__file__)
default_config = os.path.join(curr_dir, 'default_config.ini'),
user_config = os.path.join(curr_dir, 'config.ini')
cf.load_configs(default_config, user_config)
prefix = cf.get('General', 'command_prefix')
bot = commands.Bot(command_prefix=prefix)
async def set_offline():
ChannelUpdater.bot = bot
await ChannelUpdater.load_channels()
embed=discord.Embed(title='Bot is offline.', color=0xdc4a4a)
for channel, messages in ChannelUpdater.channel_messages.items():
await ChannelUpdater.write_content(channel, None, embed)
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
await set_offline()
await bot.logout()
bot.run(cf.get('General', 'bot_token'))
|
<commit_before><commit_msg>Add script to set an offline message on channels<commit_after>import asyncio
import os
import discord
from discord.ext import commands
import SLA_bot.channelupdater as ChannelUpdater
import SLA_bot.config as cf
curr_dir = os.path.dirname(__file__)
default_config = os.path.join(curr_dir, 'default_config.ini'),
user_config = os.path.join(curr_dir, 'config.ini')
cf.load_configs(default_config, user_config)
prefix = cf.get('General', 'command_prefix')
bot = commands.Bot(command_prefix=prefix)
async def set_offline():
ChannelUpdater.bot = bot
await ChannelUpdater.load_channels()
embed=discord.Embed(title='Bot is offline.', color=0xdc4a4a)
for channel, messages in ChannelUpdater.channel_messages.items():
await ChannelUpdater.write_content(channel, None, embed)
@bot.event
async def on_ready():
print('Logged in as: {}'.format(bot.user.name))
print('------')
await set_offline()
await bot.logout()
bot.run(cf.get('General', 'bot_token'))
|
|
15a05b3639c47014642cf962bc8a4da1c991b30b
|
script/upload-windows-pdb.py
|
script/upload-windows-pdb.py
|
#!/usr/bin/env python
import os
from lib.util import execute, rm_rf, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
PDB_LIST = [
'out\\Release\\atom.exe.pdb',
'vendor\\brightray\\vendor\\download\\libchromiumcontent\\Release\\chromiumcontent.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
if __name__ == '__main__':
import sys
sys.exit(main())
|
Add script to call symstore
|
Add script to call symstore
|
Python
|
mit
|
carsonmcdonald/electron,mirrh/electron,kazupon/electron,trigrass2/electron,Evercoder/electron,jhen0409/electron,icattlecoder/electron,jtburke/electron,seanchas116/electron,vipulroxx/electron,fritx/electron,jlhbaseball15/electron,posix4e/electron,chrisswk/electron,mubassirhayat/electron,posix4e/electron,deed02392/electron,rprichard/electron,destan/electron,rajatsingla28/electron,John-Lin/electron,medixdev/electron,shennushi/electron,kenmozi/electron,nicholasess/electron,deepak1556/atom-shell,John-Lin/electron,stevekinney/electron,neutrous/electron,tincan24/electron,systembugtj/electron,gstack/infinium-shell,carsonmcdonald/electron,lzpfmh/electron,pandoraui/electron,leolujuyi/electron,zhakui/electron,zhakui/electron,SufianHassan/electron,tinydew4/electron,iftekeriba/electron,gstack/infinium-shell,systembugtj/electron,Evercoder/electron,IonicaBizauKitchen/electron,seanchas116/electron,deepak1556/atom-shell,thompsonemerson/electron,sshiting/electron,egoist/electron,pombredanne/electron,joaomoreno/atom-shell,greyhwndz/electron,aecca/electron,Gerhut/electron,gstack/infinium-shell,michaelchiche/electron,cos2004/electron,fffej/electron,Faiz7412/electron,kostia/electron,digideskio/electron,mubassirhayat/electron,ankitaggarwal011/electron,saronwei/electron,JussMee15/electron,fritx/electron,etiktin/electron,BionicClick/electron,tomashanacek/electron,maxogden/atom-shell,baiwyc119/electron,mjaniszew/electron,electron/electron,chriskdon/electron,jcblw/electron,neutrous/electron,dahal/electron,mhkeller/electron,mhkeller/electron,carsonmcdonald/electron,digideskio/electron,wan-qy/electron,GoooIce/electron,synaptek/electron,Neron-X5/electron,mjaniszew/electron,aliib/electron,sky7sea/electron,Zagorakiss/electron,etiktin/electron,mattdesl/electron,baiwyc119/electron,natgolov/electron,kikong/electron,tonyganch/electron,nicholasess/electron,digideskio/electron,aliib/electron,trigrass2/electron,ianscrivener/electron,xfstudio/electron,fomojola/electron,joneit/electron,Faiz7412/electron,trankmichael/electron,wolfflow/electron,cqqccqc/electron,Faiz7412/electron,vHanda/electron,bpasero/electron,joaomoreno/atom-shell,BionicClick/electron,kazupon/electron,setzer777/electron,jiaz/electron,sky7sea/electron,rreimann/electron,mubassirhayat/electron,JussMee15/electron,benweissmann/electron,bbondy/electron,greyhwndz/electron,anko/electron,natgolov/electron,BionicClick/electron,LadyNaggaga/electron,gabrielPeart/electron,fireball-x/atom-shell,wan-qy/electron,mattdesl/electron,astoilkov/electron,yalexx/electron,nicobot/electron,John-Lin/electron,gbn972/electron,saronwei/electron,sircharleswatson/electron,soulteary/electron,DivyaKMenon/electron,mubassirhayat/electron,deed02392/electron,aecca/electron,tincan24/electron,baiwyc119/electron,michaelchiche/electron,smczk/electron,wan-qy/electron,jonatasfreitasv/electron,deepak1556/atom-shell,gabrielPeart/electron,michaelchiche/electron,egoist/electron,stevekinney/electron,mhkeller/electron,gbn972/electron,gabrielPeart/electron,renaesop/electron,sircharleswatson/electron,bbondy/electron,jannishuebl/electron,JesselJohn/electron,nicobot/electron,felixrieseberg/electron,RobertJGabriel/electron,stevemao/electron,kcrt/electron,arturts/electron,aliib/electron,seanchas116/electron,preco21/electron,kenmozi/electron,ianscrivener/electron,smczk/electron,Neron-X5/electron,leftstick/electron,systembugtj/electron,matiasinsaurralde/electron,d-salas/electron,Ivshti/electron,jannishuebl/electron,tomashanacek/electron,medixdev/electron,jtburke/electron,jsutcodes/electron,fffej/electron,Rokt33r/electron,evgenyzinoviev/electron,GoooIce/electron,noikiy/electron,bitemyapp/electron,tomashanacek/electron,takashi/electron,wan-qy/electron,evgenyzinoviev/electron,thingsinjars/electron,posix4e/electron,jiaz/electron,tinydew4/electron,bruce/electron,timruffles/electron,vHanda/electron,jacksondc/electron,mrwizard82d1/electron,webmechanicx/electron,rreimann/electron,iftekeriba/electron,xiruibing/electron,jlord/electron,fomojola/electron,mhkeller/electron,bwiggs/electron,bruce/electron,kostia/electron,micalan/electron,leolujuyi/electron,maxogden/atom-shell,natgolov/electron,tylergibson/electron,arusakov/electron,fireball-x/atom-shell,sky7sea/electron,Neron-X5/electron,setzer777/electron,voidbridge/electron,adcentury/electron,electron/electron,jsutcodes/electron,bpasero/electron,sircharleswatson/electron,cos2004/electron,posix4e/electron,zhakui/electron,wolfflow/electron,aaron-goshine/electron,evgenyzinoviev/electron,meowlab/electron,kenmozi/electron,bpasero/electron,noikiy/electron,LadyNaggaga/electron,xiruibing/electron,minggo/electron,yan-foto/electron,pombredanne/electron,voidbridge/electron,jlord/electron,vHanda/electron,christian-bromann/electron,robinvandernoord/electron,neutrous/electron,Jonekee/electron,gabriel/electron,neutrous/electron,nekuz0r/electron,shaundunne/electron,shaundunne/electron,jsutcodes/electron,jlord/electron,leolujuyi/electron,bright-sparks/electron,matiasinsaurralde/electron,rsvip/electron,BionicClick/electron,bitemyapp/electron,destan/electron,thingsinjars/electron,anko/electron,Ivshti/electron,thompsonemerson/electron,oiledCode/electron,lrlna/electron,stevekinney/electron,simongregory/electron,Rokt33r/electron,ankitaggarwal011/electron,pirafrank/electron,electron/electron,felixrieseberg/electron,miniak/electron,adcentury/electron,gabrielPeart/electron,dahal/electron,mrwizard82d1/electron,DivyaKMenon/electron,eriser/electron,jjz/electron,the-ress/electron,RobertJGabriel/electron,d-salas/electron,pirafrank/electron,tylergibson/electron,tincan24/electron,aaron-goshine/electron,hokein/atom-shell,mattotodd/electron,nagyistoce/electron-atom-shell,thompsonemerson/electron,arturts/electron,xiruibing/electron,jannishuebl/electron,matiasinsaurralde/electron,kostia/electron,jacksondc/electron,adcentury/electron,SufianHassan/electron,bwiggs/electron,benweissmann/electron,ervinb/electron,kazupon/electron,kazupon/electron,vipulroxx/electron,shennushi/electron,icattlecoder/electron,roadev/electron,iftekeriba/electron,chriskdon/electron,coderhaoxin/electron,chriskdon/electron,christian-bromann/electron,faizalpribadi/electron,DivyaKMenon/electron,yalexx/electron,benweissmann/electron,shiftkey/electron,Andrey-Pavlov/electron,rajatsingla28/electron,JussMee15/electron,kcrt/electron,cqqccqc/electron,roadev/electron,the-ress/electron,aaron-goshine/electron,bwiggs/electron,brenca/electron,simongregory/electron,preco21/electron,rajatsingla28/electron,zhakui/electron,JesselJohn/electron,smczk/electron,subblue/electron,Evercoder/electron,micalan/electron,LadyNaggaga/electron,JesselJohn/electron,christian-bromann/electron,tinydew4/electron,deed02392/electron,nicobot/electron,leethomas/electron,fomojola/electron,wan-qy/electron,mjaniszew/electron,bitemyapp/electron,eric-seekas/electron,Jacobichou/electron,abhishekgahlot/electron,pombredanne/electron,jaanus/electron,pandoraui/electron,roadev/electron,brave/electron,jlhbaseball15/electron,shockone/electron,nagyistoce/electron-atom-shell,bpasero/electron,farmisen/electron,arusakov/electron,kostia/electron,DivyaKMenon/electron,fritx/electron,mjaniszew/electron,rsvip/electron,gbn972/electron,icattlecoder/electron,beni55/electron,IonicaBizauKitchen/electron,gerhardberger/electron,meowlab/electron,edulan/electron,SufianHassan/electron,jaanus/electron,IonicaBizauKitchen/electron,yan-foto/electron,jlhbaseball15/electron,kenmozi/electron,rsvip/electron,tincan24/electron,Jonekee/electron,rprichard/electron,brenca/electron,aecca/electron,Evercoder/electron,cos2004/electron,nekuz0r/electron,chrisswk/electron,xfstudio/electron,brave/electron,tonyganch/electron,carsonmcdonald/electron,arusakov/electron,sircharleswatson/electron,fffej/electron,beni55/electron,iftekeriba/electron,bruce/electron,benweissmann/electron,pandoraui/electron,Evercoder/electron,farmisen/electron,brave/muon,cqqccqc/electron,digideskio/electron,Evercoder/electron,bruce/electron,gabrielPeart/electron,soulteary/electron,edulan/electron,leolujuyi/electron,bpasero/electron,nekuz0r/electron,tonyganch/electron,biblerule/UMCTelnetHub,bright-sparks/electron,Jacobichou/electron,eric-seekas/electron,bitemyapp/electron,rhencke/electron,the-ress/electron,tylergibson/electron,evgenyzinoviev/electron,Rokt33r/electron,Ivshti/electron,vaginessa/electron,michaelchiche/electron,micalan/electron,shaundunne/electron,hokein/atom-shell,xfstudio/electron,egoist/electron,thingsinjars/electron,davazp/electron,brenca/electron,maxogden/atom-shell,arturts/electron,abhishekgahlot/electron,gerhardberger/electron,xfstudio/electron,etiktin/electron,egoist/electron,yalexx/electron,Zagorakiss/electron,tonyganch/electron,thomsonreuters/electron,chriskdon/electron,greyhwndz/electron,dongjoon-hyun/electron,sky7sea/electron,chriskdon/electron,oiledCode/electron,baiwyc119/electron,leolujuyi/electron,rprichard/electron,GoooIce/electron,d-salas/electron,jjz/electron,howmuchcomputer/electron,MaxWhere/electron,adamjgray/electron,gbn972/electron,stevemao/electron,RIAEvangelist/electron,aliib/electron,tonyganch/electron,greyhwndz/electron,dahal/electron,shennushi/electron,mhkeller/electron,ervinb/electron,lzpfmh/electron,yan-foto/electron,wolfflow/electron,joaomoreno/atom-shell,timruffles/electron,RobertJGabriel/electron,rhencke/electron,lzpfmh/electron,bwiggs/electron,medixdev/electron,felixrieseberg/electron,egoist/electron,medixdev/electron,mattotodd/electron,farmisen/electron,gamedevsam/electron,jiaz/electron,kikong/electron,pirafrank/electron,renaesop/electron,webmechanicx/electron,nekuz0r/electron,edulan/electron,shennushi/electron,kcrt/electron,howmuchcomputer/electron,Jonekee/electron,gbn972/electron,noikiy/electron,bobwol/electron,eriser/electron,vipulroxx/electron,Faiz7412/electron,SufianHassan/electron,posix4e/electron,IonicaBizauKitchen/electron,Jacobichou/electron,bbondy/electron,simonfork/electron,xfstudio/electron,sshiting/electron,jsutcodes/electron,electron/electron,jhen0409/electron,rreimann/electron,jtburke/electron,fritx/electron,benweissmann/electron,MaxGraey/electron,gabriel/electron,shockone/electron,gamedevsam/electron,tylergibson/electron,maxogden/atom-shell,wolfflow/electron,RIAEvangelist/electron,JesselJohn/electron,xiruibing/electron,synaptek/electron,destan/electron,yalexx/electron,JesselJohn/electron,GoooIce/electron,lzpfmh/electron,anko/electron,simongregory/electron,farmisen/electron,mattotodd/electron,bpasero/electron,tincan24/electron,dkfiresky/electron,soulteary/electron,jannishuebl/electron,mirrh/electron,leethomas/electron,arusakov/electron,thomsonreuters/electron,ianscrivener/electron,vHanda/electron,mattdesl/electron,MaxWhere/electron,twolfson/electron,Jacobichou/electron,mjaniszew/electron,kokdemo/electron,biblerule/UMCTelnetHub,baiwyc119/electron,etiktin/electron,mhkeller/electron,maxogden/atom-shell,deepak1556/atom-shell,faizalpribadi/electron,natgolov/electron,Floato/electron,gstack/infinium-shell,felixrieseberg/electron,dahal/electron,aaron-goshine/electron,Zagorakiss/electron,bright-sparks/electron,mattotodd/electron,thingsinjars/electron,astoilkov/electron,fabien-d/electron,nicholasess/electron,evgenyzinoviev/electron,cqqccqc/electron,noikiy/electron,ervinb/electron,jjz/electron,gstack/infinium-shell,kokdemo/electron,trigrass2/electron,d-salas/electron,pirafrank/electron,MaxGraey/electron,neutrous/electron,jcblw/electron,nicobot/electron,RIAEvangelist/electron,Gerhut/electron,tylergibson/electron,digideskio/electron,anko/electron,saronwei/electron,stevekinney/electron,miniak/electron,Gerhut/electron,jiaz/electron,sshiting/electron,minggo/electron,pandoraui/electron,hokein/atom-shell,kazupon/electron,miniak/electron,voidbridge/electron,smczk/electron,brave/electron,gabrielPeart/electron,renaesop/electron,fomojola/electron,shaundunne/electron,stevekinney/electron,webmechanicx/electron,preco21/electron,trankmichael/electron,systembugtj/electron,brave/electron,IonicaBizauKitchen/electron,chriskdon/electron,davazp/electron,shockone/electron,jlhbaseball15/electron,dongjoon-hyun/electron,systembugtj/electron,thingsinjars/electron,Ivshti/electron,adcentury/electron,simongregory/electron,wolfflow/electron,electron/electron,abhishekgahlot/electron,trigrass2/electron,biblerule/UMCTelnetHub,kokdemo/electron,subblue/electron,Neron-X5/electron,pombredanne/electron,coderhaoxin/electron,yan-foto/electron,iftekeriba/electron,howmuchcomputer/electron,eric-seekas/electron,voidbridge/electron,smczk/electron,posix4e/electron,fabien-d/electron,icattlecoder/electron,lrlna/electron,miniak/electron,rajatsingla28/electron,gerhardberger/electron,kazupon/electron,setzer777/electron,synaptek/electron,ankitaggarwal011/electron,yalexx/electron,carsonmcdonald/electron,GoooIce/electron,gerhardberger/electron,rsvip/electron,RobertJGabriel/electron,rhencke/electron,leethomas/electron,shiftkey/electron,kcrt/electron,IonicaBizauKitchen/electron,yalexx/electron,rprichard/electron,mattotodd/electron,sshiting/electron,eriser/electron,joneit/electron,etiktin/electron,GoooIce/electron,oiledCode/electron,smczk/electron,aichingm/electron,jhen0409/electron,John-Lin/electron,subblue/electron,jlord/electron,eric-seekas/electron,minggo/electron,shiftkey/electron,darwin/electron,fffej/electron,cqqccqc/electron,noikiy/electron,edulan/electron,dahal/electron,dkfiresky/electron,Jonekee/electron,beni55/electron,aichingm/electron,brave/electron,jtburke/electron,adamjgray/electron,beni55/electron,oiledCode/electron,jaanus/electron,noikiy/electron,leethomas/electron,bright-sparks/electron,tomashanacek/electron,ervinb/electron,abhishekgahlot/electron,seanchas116/electron,Andrey-Pavlov/electron,dongjoon-hyun/electron,astoilkov/electron,gabriel/electron,matiasinsaurralde/electron,setzer777/electron,Gerhut/electron,adamjgray/electron,bobwol/electron,micalan/electron,nekuz0r/electron,gabriel/electron,tonyganch/electron,pombredanne/electron,kokdemo/electron,synaptek/electron,aecca/electron,darwin/electron,astoilkov/electron,christian-bromann/electron,kostia/electron,LadyNaggaga/electron,lrlna/electron,cos2004/electron,coderhaoxin/electron,tylergibson/electron,dkfiresky/electron,beni55/electron,jonatasfreitasv/electron,electron/electron,aichingm/electron,cqqccqc/electron,RIAEvangelist/electron,digideskio/electron,Andrey-Pavlov/electron,rreimann/electron,jlord/electron,takashi/electron,zhakui/electron,lrlna/electron,sky7sea/electron,howmuchcomputer/electron,Jacobichou/electron,felixrieseberg/electron,davazp/electron,fireball-x/atom-shell,Floato/electron,MaxWhere/electron,ankitaggarwal011/electron,jacksondc/electron,jsutcodes/electron,bitemyapp/electron,kenmozi/electron,felixrieseberg/electron,vaginessa/electron,sshiting/electron,meowlab/electron,meowlab/electron,twolfson/electron,pirafrank/electron,xfstudio/electron,jacksondc/electron,preco21/electron,tincan24/electron,thomsonreuters/electron,twolfson/electron,thomsonreuters/electron,sshiting/electron,nagyistoce/electron-atom-shell,jhen0409/electron,thomsonreuters/electron,michaelchiche/electron,gerhardberger/electron,wolfflow/electron,adamjgray/electron,astoilkov/electron,oiledCode/electron,arturts/electron,trankmichael/electron,simonfork/electron,shockone/electron,carsonmcdonald/electron,Floato/electron,stevemao/electron,systembugtj/electron,baiwyc119/electron,Gerhut/electron,gabriel/electron,jiaz/electron,fabien-d/electron,xiruibing/electron,subblue/electron,Zagorakiss/electron,soulteary/electron,bobwol/electron,takashi/electron,brave/muon,jjz/electron,meowlab/electron,ianscrivener/electron,ervinb/electron,fireball-x/atom-shell,trankmichael/electron,vHanda/electron,miniak/electron,minggo/electron,robinvandernoord/electron,shockone/electron,fabien-d/electron,rajatsingla28/electron,simonfork/electron,seanchas116/electron,timruffles/electron,arturts/electron,stevemao/electron,yan-foto/electron,icattlecoder/electron,adamjgray/electron,stevekinney/electron,synaptek/electron,kostia/electron,destan/electron,roadev/electron,Rokt33r/electron,aaron-goshine/electron,Floato/electron,Zagorakiss/electron,gamedevsam/electron,ankitaggarwal011/electron,Faiz7412/electron,Jonekee/electron,Gerhut/electron,dkfiresky/electron,nicobot/electron,Neron-X5/electron,timruffles/electron,gerhardberger/electron,fffej/electron,MaxWhere/electron,ankitaggarwal011/electron,synaptek/electron,robinvandernoord/electron,leftstick/electron,tomashanacek/electron,jonatasfreitasv/electron,chrisswk/electron,the-ress/electron,brave/muon,farmisen/electron,evgenyzinoviev/electron,adcentury/electron,joneit/electron,RIAEvangelist/electron,saronwei/electron,pandoraui/electron,benweissmann/electron,bobwol/electron,kikong/electron,mirrh/electron,JesselJohn/electron,mjaniszew/electron,brenca/electron,faizalpribadi/electron,pandoraui/electron,coderhaoxin/electron,etiktin/electron,bpasero/electron,dahal/electron,biblerule/UMCTelnetHub,mubassirhayat/electron,jaanus/electron,leethomas/electron,roadev/electron,trigrass2/electron,leethomas/electron,bwiggs/electron,fffej/electron,renaesop/electron,farmisen/electron,fomojola/electron,subblue/electron,joaomoreno/atom-shell,mrwizard82d1/electron,jlhbaseball15/electron,simonfork/electron,minggo/electron,bitemyapp/electron,simongregory/electron,tomashanacek/electron,setzer777/electron,eric-seekas/electron,brave/electron,kikong/electron,mrwizard82d1/electron,cos2004/electron,coderhaoxin/electron,stevemao/electron,fomojola/electron,nicobot/electron,LadyNaggaga/electron,bobwol/electron,vHanda/electron,meowlab/electron,micalan/electron,mirrh/electron,roadev/electron,nekuz0r/electron,pirafrank/electron,faizalpribadi/electron,gamedevsam/electron,brenca/electron,vipulroxx/electron,mirrh/electron,robinvandernoord/electron,jannishuebl/electron,SufianHassan/electron,takashi/electron,setzer777/electron,ianscrivener/electron,nicholasess/electron,arturts/electron,rhencke/electron,kokdemo/electron,edulan/electron,robinvandernoord/electron,mattdesl/electron,joaomoreno/atom-shell,JussMee15/electron,adamjgray/electron,twolfson/electron,davazp/electron,lrlna/electron,MaxGraey/electron,hokein/atom-shell,leftstick/electron,jjz/electron,davazp/electron,lrlna/electron,vipulroxx/electron,brenca/electron,jcblw/electron,bobwol/electron,thingsinjars/electron,kcrt/electron,MaxWhere/electron,destan/electron,shockone/electron,coderhaoxin/electron,biblerule/UMCTelnetHub,astoilkov/electron,oiledCode/electron,edulan/electron,the-ress/electron,Jacobichou/electron,brave/muon,leftstick/electron,kokdemo/electron,deed02392/electron,joaomoreno/atom-shell,darwin/electron,nicholasess/electron,jacksondc/electron,rhencke/electron,howmuchcomputer/electron,DivyaKMenon/electron,simonfork/electron,minggo/electron,rreimann/electron,fireball-x/atom-shell,bruce/electron,medixdev/electron,twolfson/electron,bbondy/electron,faizalpribadi/electron,kcrt/electron,jhen0409/electron,webmechanicx/electron,saronwei/electron,cos2004/electron,RobertJGabriel/electron,gabriel/electron,brave/muon,BionicClick/electron,the-ress/electron,christian-bromann/electron,natgolov/electron,matiasinsaurralde/electron,takashi/electron,voidbridge/electron,tinydew4/electron,bbondy/electron,BionicClick/electron,jacksondc/electron,fritx/electron,RIAEvangelist/electron,abhishekgahlot/electron,robinvandernoord/electron,greyhwndz/electron,shennushi/electron,nicholasess/electron,sircharleswatson/electron,MaxGraey/electron,davazp/electron,thomsonreuters/electron,twolfson/electron,vaginessa/electron,joneit/electron,renaesop/electron,joneit/electron,mrwizard82d1/electron,MaxGraey/electron,bright-sparks/electron,leftstick/electron,rsvip/electron,takashi/electron,arusakov/electron,Rokt33r/electron,leftstick/electron,trigrass2/electron,anko/electron,shaundunne/electron,tinydew4/electron,jonatasfreitasv/electron,michaelchiche/electron,JussMee15/electron,jsutcodes/electron,ianscrivener/electron,natgolov/electron,eriser/electron,aichingm/electron,thompsonemerson/electron,Andrey-Pavlov/electron,trankmichael/electron,matiasinsaurralde/electron,egoist/electron,joneit/electron,Andrey-Pavlov/electron,arusakov/electron,Andrey-Pavlov/electron,timruffles/electron,rajatsingla28/electron,webmechanicx/electron,renaesop/electron,sircharleswatson/electron,destan/electron,eriser/electron,Zagorakiss/electron,jannishuebl/electron,saronwei/electron,jtburke/electron,gamedevsam/electron,Jonekee/electron,simongregory/electron,chrisswk/electron,shiftkey/electron,JussMee15/electron,RobertJGabriel/electron,simonfork/electron,sky7sea/electron,stevemao/electron,miniak/electron,fritx/electron,jcblw/electron,preco21/electron,Floato/electron,ervinb/electron,lzpfmh/electron,fabien-d/electron,thompsonemerson/electron,shennushi/electron,voidbridge/electron,jcblw/electron,subblue/electron,gamedevsam/electron,Neron-X5/electron,brave/muon,vaginessa/electron,soulteary/electron,jaanus/electron,shaundunne/electron,John-Lin/electron,pombredanne/electron,nagyistoce/electron-atom-shell,gbn972/electron,gerhardberger/electron,bright-sparks/electron,mattdesl/electron,kenmozi/electron,dkfiresky/electron,webmechanicx/electron,thompsonemerson/electron,seanchas116/electron,mirrh/electron,jjz/electron,xiruibing/electron,micalan/electron,medixdev/electron,bwiggs/electron,faizalpribadi/electron,mrwizard82d1/electron,deed02392/electron,wan-qy/electron,jcblw/electron,vipulroxx/electron,jhen0409/electron,jiaz/electron,aecca/electron,vaginessa/electron,dkfiresky/electron,rhencke/electron,tinydew4/electron,dongjoon-hyun/electron,nagyistoce/electron-atom-shell,kikong/electron,aichingm/electron,the-ress/electron,beni55/electron,d-salas/electron,soulteary/electron,icattlecoder/electron,bruce/electron,dongjoon-hyun/electron,Ivshti/electron,abhishekgahlot/electron,darwin/electron,anko/electron,eric-seekas/electron,neutrous/electron,d-salas/electron,howmuchcomputer/electron,mattotodd/electron,jlhbaseball15/electron,jtburke/electron,zhakui/electron,shiftkey/electron,Floato/electron,lzpfmh/electron,Rokt33r/electron,aichingm/electron,darwin/electron,greyhwndz/electron,biblerule/UMCTelnetHub,hokein/atom-shell,mattdesl/electron,iftekeriba/electron,aliib/electron,jonatasfreitasv/electron,SufianHassan/electron,deepak1556/atom-shell,yan-foto/electron,John-Lin/electron,aliib/electron,adcentury/electron,trankmichael/electron,leolujuyi/electron,preco21/electron,electron/electron,deed02392/electron,shiftkey/electron,aaron-goshine/electron,aecca/electron,DivyaKMenon/electron,chrisswk/electron,eriser/electron,rreimann/electron,dongjoon-hyun/electron,vaginessa/electron,jaanus/electron,bbondy/electron,christian-bromann/electron,jonatasfreitasv/electron,LadyNaggaga/electron,MaxWhere/electron
|
Add script to call symstore
|
#!/usr/bin/env python
import os
from lib.util import execute, rm_rf, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
PDB_LIST = [
'out\\Release\\atom.exe.pdb',
'vendor\\brightray\\vendor\\download\\libchromiumcontent\\Release\\chromiumcontent.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
if __name__ == '__main__':
import sys
sys.exit(main())
|
<commit_before><commit_msg>Add script to call symstore<commit_after>
|
#!/usr/bin/env python
import os
from lib.util import execute, rm_rf, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
PDB_LIST = [
'out\\Release\\atom.exe.pdb',
'vendor\\brightray\\vendor\\download\\libchromiumcontent\\Release\\chromiumcontent.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
if __name__ == '__main__':
import sys
sys.exit(main())
|
Add script to call symstore#!/usr/bin/env python
import os
from lib.util import execute, rm_rf, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
PDB_LIST = [
'out\\Release\\atom.exe.pdb',
'vendor\\brightray\\vendor\\download\\libchromiumcontent\\Release\\chromiumcontent.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
if __name__ == '__main__':
import sys
sys.exit(main())
|
<commit_before><commit_msg>Add script to call symstore<commit_after>#!/usr/bin/env python
import os
from lib.util import execute, rm_rf, safe_mkdir
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
PDB_LIST = [
'out\\Release\\atom.exe.pdb',
'vendor\\brightray\\vendor\\download\\libchromiumcontent\\Release\\chromiumcontent.dll.pdb',
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, 'AtomShell')
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
if __name__ == '__main__':
import sys
sys.exit(main())
|
|
44922934f55cb1cb8a64eba4afabb66563d66349
|
tests/basics/for_else.py
|
tests/basics/for_else.py
|
# test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
|
Add tests for for-else statement.
|
tests/basics: Add tests for for-else statement.
|
Python
|
mit
|
selste/micropython,pfalcon/micropython,Timmenem/micropython,MrSurly/micropython,Timmenem/micropython,infinnovation/micropython,HenrikSolver/micropython,bvernoux/micropython,torwag/micropython,pfalcon/micropython,pozetroninc/micropython,micropython/micropython-esp32,SHA2017-badge/micropython-esp32,henriknelson/micropython,MrSurly/micropython,kerneltask/micropython,dmazzella/micropython,chrisdearman/micropython,oopy/micropython,deshipu/micropython,torwag/micropython,alex-robbins/micropython,MrSurly/micropython-esp32,deshipu/micropython,HenrikSolver/micropython,micropython/micropython-esp32,kerneltask/micropython,infinnovation/micropython,trezor/micropython,pozetroninc/micropython,deshipu/micropython,AriZuu/micropython,chrisdearman/micropython,micropython/micropython-esp32,oopy/micropython,AriZuu/micropython,MrSurly/micropython-esp32,micropython/micropython-esp32,PappaPeppar/micropython,blazewicz/micropython,torwag/micropython,pramasoul/micropython,PappaPeppar/micropython,swegener/micropython,AriZuu/micropython,adafruit/micropython,blazewicz/micropython,infinnovation/micropython,pramasoul/micropython,HenrikSolver/micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,SHA2017-badge/micropython-esp32,pozetroninc/micropython,torwag/micropython,pfalcon/micropython,selste/micropython,PappaPeppar/micropython,kerneltask/micropython,adafruit/micropython,bvernoux/micropython,deshipu/micropython,tralamazza/micropython,micropython/micropython-esp32,alex-robbins/micropython,torwag/micropython,pfalcon/micropython,infinnovation/micropython,swegener/micropython,chrisdearman/micropython,TDAbboud/micropython,adafruit/circuitpython,Timmenem/micropython,lowRISC/micropython,ryannathans/micropython,pozetroninc/micropython,adafruit/circuitpython,adafruit/circuitpython,tobbad/micropython,tobbad/micropython,alex-robbins/micropython,dmazzella/micropython,henriknelson/micropython,tobbad/micropython,infinnovation/micropython,AriZuu/micropython,blazewicz/micropython,ryannathans/micropython,lowRISC/micropython,pramasoul/micropython,bvernoux/micropython,MrSurly/micropython-esp32,henriknelson/micropython,adafruit/circuitpython,tralamazza/micropython,oopy/micropython,HenrikSolver/micropython,tralamazza/micropython,chrisdearman/micropython,lowRISC/micropython,Timmenem/micropython,pramasoul/micropython,bvernoux/micropython,blazewicz/micropython,selste/micropython,HenrikSolver/micropython,PappaPeppar/micropython,trezor/micropython,lowRISC/micropython,kerneltask/micropython,Timmenem/micropython,ryannathans/micropython,selste/micropython,swegener/micropython,MrSurly/micropython,selste/micropython,adafruit/micropython,blazewicz/micropython,SHA2017-badge/micropython-esp32,kerneltask/micropython,swegener/micropython,bvernoux/micropython,adafruit/circuitpython,swegener/micropython,trezor/micropython,pozetroninc/micropython,MrSurly/micropython,oopy/micropython,pramasoul/micropython,dmazzella/micropython,MrSurly/micropython-esp32,tobbad/micropython,TDAbboud/micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython-esp32,alex-robbins/micropython,adafruit/circuitpython,trezor/micropython,alex-robbins/micropython,chrisdearman/micropython,tobbad/micropython,oopy/micropython,adafruit/micropython,trezor/micropython,tralamazza/micropython,MrSurly/micropython,henriknelson/micropython,lowRISC/micropython,AriZuu/micropython,deshipu/micropython,pfalcon/micropython,ryannathans/micropython,PappaPeppar/micropython,TDAbboud/micropython,dmazzella/micropython,ryannathans/micropython,henriknelson/micropython,adafruit/micropython
|
tests/basics: Add tests for for-else statement.
|
# test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
|
<commit_before><commit_msg>tests/basics: Add tests for for-else statement.<commit_after>
|
# test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
|
tests/basics: Add tests for for-else statement.# test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
|
<commit_before><commit_msg>tests/basics: Add tests for for-else statement.<commit_after># test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
|
|
dad157bdeb548a101a8ed5bb629539e8bcf4a686
|
corehq/apps/hqadmin/management/commands/stale_cases_in_es.py
|
corehq/apps/hqadmin/management/commands/stale_cases_in_es.py
|
import inspect
from django.core.management.base import BaseCommand
from datetime import datetime
from dimagi.utils.chunked import chunked
from casexml.apps.case.models import CommCareCase
from corehq.apps.es import CaseES
from corehq.elastic import ES_EXPORT_INSTANCE
from corehq.util.dates import iso_string_to_datetime
from corehq.util.couch_helpers import paginate_view
class Command(BaseCommand):
"""
Returns list of couch case_ids that are not updated in ES
$ ./manage.py stale_cases_in_es <DOMAIN> > case_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
for case_id in get_server_modified_on_for_domain(domain):
print(case_id)
def get_server_modified_on_for_domain(domain):
start_time = datetime.utcnow()
chunk_size = 1000
chunked_iterator = chunked(paginate_view(
CommCareCase.get_db(),
'cases_by_server_date/by_server_modified_on',
chunk_size=chunk_size,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False
), chunk_size)
for chunk in chunked_iterator:
case_ids = [row['id'] for row in chunk]
results = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE)
.domain(domain)
.case_ids(case_ids)
.values_list('_id', 'server_modified_on'))
es_modified_on_by_ids = {_id: modified_on for _id, modified_on in results }
for row in chunk:
case_id, couch_modified_on = row['id'], row['value']
if iso_string_to_datetime(couch_modified_on) > start_time:
# skip cases modified after the script started
continue
es_modified_on = es_modified_on_by_ids.get(case_id)
if not es_modified_on or (es_modified_on != couch_modified_on):
yield (case_id, es_modified_on, couch_modified_on)
|
Add management command to get couch cases non updated in ES
|
Add management command to get couch cases non updated in ES
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add management command to get couch cases non updated in ES
|
import inspect
from django.core.management.base import BaseCommand
from datetime import datetime
from dimagi.utils.chunked import chunked
from casexml.apps.case.models import CommCareCase
from corehq.apps.es import CaseES
from corehq.elastic import ES_EXPORT_INSTANCE
from corehq.util.dates import iso_string_to_datetime
from corehq.util.couch_helpers import paginate_view
class Command(BaseCommand):
"""
Returns list of couch case_ids that are not updated in ES
$ ./manage.py stale_cases_in_es <DOMAIN> > case_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
for case_id in get_server_modified_on_for_domain(domain):
print(case_id)
def get_server_modified_on_for_domain(domain):
start_time = datetime.utcnow()
chunk_size = 1000
chunked_iterator = chunked(paginate_view(
CommCareCase.get_db(),
'cases_by_server_date/by_server_modified_on',
chunk_size=chunk_size,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False
), chunk_size)
for chunk in chunked_iterator:
case_ids = [row['id'] for row in chunk]
results = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE)
.domain(domain)
.case_ids(case_ids)
.values_list('_id', 'server_modified_on'))
es_modified_on_by_ids = {_id: modified_on for _id, modified_on in results }
for row in chunk:
case_id, couch_modified_on = row['id'], row['value']
if iso_string_to_datetime(couch_modified_on) > start_time:
# skip cases modified after the script started
continue
es_modified_on = es_modified_on_by_ids.get(case_id)
if not es_modified_on or (es_modified_on != couch_modified_on):
yield (case_id, es_modified_on, couch_modified_on)
|
<commit_before><commit_msg>Add management command to get couch cases non updated in ES<commit_after>
|
import inspect
from django.core.management.base import BaseCommand
from datetime import datetime
from dimagi.utils.chunked import chunked
from casexml.apps.case.models import CommCareCase
from corehq.apps.es import CaseES
from corehq.elastic import ES_EXPORT_INSTANCE
from corehq.util.dates import iso_string_to_datetime
from corehq.util.couch_helpers import paginate_view
class Command(BaseCommand):
"""
Returns list of couch case_ids that are not updated in ES
$ ./manage.py stale_cases_in_es <DOMAIN> > case_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
for case_id in get_server_modified_on_for_domain(domain):
print(case_id)
def get_server_modified_on_for_domain(domain):
start_time = datetime.utcnow()
chunk_size = 1000
chunked_iterator = chunked(paginate_view(
CommCareCase.get_db(),
'cases_by_server_date/by_server_modified_on',
chunk_size=chunk_size,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False
), chunk_size)
for chunk in chunked_iterator:
case_ids = [row['id'] for row in chunk]
results = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE)
.domain(domain)
.case_ids(case_ids)
.values_list('_id', 'server_modified_on'))
es_modified_on_by_ids = {_id: modified_on for _id, modified_on in results }
for row in chunk:
case_id, couch_modified_on = row['id'], row['value']
if iso_string_to_datetime(couch_modified_on) > start_time:
# skip cases modified after the script started
continue
es_modified_on = es_modified_on_by_ids.get(case_id)
if not es_modified_on or (es_modified_on != couch_modified_on):
yield (case_id, es_modified_on, couch_modified_on)
|
Add management command to get couch cases non updated in ES
import inspect
from django.core.management.base import BaseCommand
from datetime import datetime
from dimagi.utils.chunked import chunked
from casexml.apps.case.models import CommCareCase
from corehq.apps.es import CaseES
from corehq.elastic import ES_EXPORT_INSTANCE
from corehq.util.dates import iso_string_to_datetime
from corehq.util.couch_helpers import paginate_view
class Command(BaseCommand):
"""
Returns list of couch case_ids that are not updated in ES
$ ./manage.py stale_cases_in_es <DOMAIN> > case_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
for case_id in get_server_modified_on_for_domain(domain):
print(case_id)
def get_server_modified_on_for_domain(domain):
start_time = datetime.utcnow()
chunk_size = 1000
chunked_iterator = chunked(paginate_view(
CommCareCase.get_db(),
'cases_by_server_date/by_server_modified_on',
chunk_size=chunk_size,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False
), chunk_size)
for chunk in chunked_iterator:
case_ids = [row['id'] for row in chunk]
results = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE)
.domain(domain)
.case_ids(case_ids)
.values_list('_id', 'server_modified_on'))
es_modified_on_by_ids = {_id: modified_on for _id, modified_on in results }
for row in chunk:
case_id, couch_modified_on = row['id'], row['value']
if iso_string_to_datetime(couch_modified_on) > start_time:
# skip cases modified after the script started
continue
es_modified_on = es_modified_on_by_ids.get(case_id)
if not es_modified_on or (es_modified_on != couch_modified_on):
yield (case_id, es_modified_on, couch_modified_on)
|
<commit_before><commit_msg>Add management command to get couch cases non updated in ES<commit_after>
import inspect
from django.core.management.base import BaseCommand
from datetime import datetime
from dimagi.utils.chunked import chunked
from casexml.apps.case.models import CommCareCase
from corehq.apps.es import CaseES
from corehq.elastic import ES_EXPORT_INSTANCE
from corehq.util.dates import iso_string_to_datetime
from corehq.util.couch_helpers import paginate_view
class Command(BaseCommand):
"""
Returns list of couch case_ids that are not updated in ES
$ ./manage.py stale_cases_in_es <DOMAIN> > case_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
def handle(self, domain, **options):
for case_id in get_server_modified_on_for_domain(domain):
print(case_id)
def get_server_modified_on_for_domain(domain):
start_time = datetime.utcnow()
chunk_size = 1000
chunked_iterator = chunked(paginate_view(
CommCareCase.get_db(),
'cases_by_server_date/by_server_modified_on',
chunk_size=chunk_size,
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False
), chunk_size)
for chunk in chunked_iterator:
case_ids = [row['id'] for row in chunk]
results = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE)
.domain(domain)
.case_ids(case_ids)
.values_list('_id', 'server_modified_on'))
es_modified_on_by_ids = {_id: modified_on for _id, modified_on in results }
for row in chunk:
case_id, couch_modified_on = row['id'], row['value']
if iso_string_to_datetime(couch_modified_on) > start_time:
# skip cases modified after the script started
continue
es_modified_on = es_modified_on_by_ids.get(case_id)
if not es_modified_on or (es_modified_on != couch_modified_on):
yield (case_id, es_modified_on, couch_modified_on)
|
|
35baf55e36c55222bc93b1e6d24b34c051dca4b1
|
scripts/delete_all_models.py
|
scripts/delete_all_models.py
|
import importlib
import logging
import os
import shutil
mainapp_database = importlib.import_module('third_party.3dmr.mainapp.database')
mainapp_upload = getattr(mainapp_database, 'upload')
mainapp_models = importlib.import_module('third_party.3dmr.mainapp.models')
mainapp_model = getattr(mainapp_models, 'Model')
mainapp_location = getattr(mainapp_models, 'Location')
mainapp_change = getattr(mainapp_models, 'Change')
MODEL_DIR = getattr(importlib.import_module('third_party.3dmr.mainapp.utils'), 'MODEL_DIR')
logger = logging.getLogger(__name__)
def run(*args):
"""Will **DELETE ALL MODELS** in DB and on storage.
This is dangerous and there is no going back. Use wisely.
"""
logger.info('Deleting all models and data.')
logger.info('Cearing database model entries.')
mainapp_model.objects.all().delete()
mainapp_location.objects.all().delete()
mainapp_change.objects.all().delete()
logger.info('Deleting all modle files on disk.')
failed_paths = []
for root, dirs, files in os.walk(MODEL_DIR):
for dir_name in dirs:
target_dir = os.path.join(root, dir_name)
logger.info('Deleting {}'.format(target_dir))
try:
shutil.rmtree(target_dir)
except Exception as e:
failed_paths.append(target_dir)
logger.error('Failed to delete {} for Reason: {}'.format(target_dir, e))
if failed_paths:
logger.error('Failed to delete all models on disk: {}'.format(failed_paths))
else:
logger.info('No failed deletions.')
logger.info('Delete all models job completed.')
|
Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.
|
Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.
|
Python
|
apache-2.0
|
kartta-labs/reservoir,kartta-labs/reservoir
|
Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.
|
import importlib
import logging
import os
import shutil
mainapp_database = importlib.import_module('third_party.3dmr.mainapp.database')
mainapp_upload = getattr(mainapp_database, 'upload')
mainapp_models = importlib.import_module('third_party.3dmr.mainapp.models')
mainapp_model = getattr(mainapp_models, 'Model')
mainapp_location = getattr(mainapp_models, 'Location')
mainapp_change = getattr(mainapp_models, 'Change')
MODEL_DIR = getattr(importlib.import_module('third_party.3dmr.mainapp.utils'), 'MODEL_DIR')
logger = logging.getLogger(__name__)
def run(*args):
"""Will **DELETE ALL MODELS** in DB and on storage.
This is dangerous and there is no going back. Use wisely.
"""
logger.info('Deleting all models and data.')
logger.info('Cearing database model entries.')
mainapp_model.objects.all().delete()
mainapp_location.objects.all().delete()
mainapp_change.objects.all().delete()
logger.info('Deleting all modle files on disk.')
failed_paths = []
for root, dirs, files in os.walk(MODEL_DIR):
for dir_name in dirs:
target_dir = os.path.join(root, dir_name)
logger.info('Deleting {}'.format(target_dir))
try:
shutil.rmtree(target_dir)
except Exception as e:
failed_paths.append(target_dir)
logger.error('Failed to delete {} for Reason: {}'.format(target_dir, e))
if failed_paths:
logger.error('Failed to delete all models on disk: {}'.format(failed_paths))
else:
logger.info('No failed deletions.')
logger.info('Delete all models job completed.')
|
<commit_before><commit_msg>Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.<commit_after>
|
import importlib
import logging
import os
import shutil
mainapp_database = importlib.import_module('third_party.3dmr.mainapp.database')
mainapp_upload = getattr(mainapp_database, 'upload')
mainapp_models = importlib.import_module('third_party.3dmr.mainapp.models')
mainapp_model = getattr(mainapp_models, 'Model')
mainapp_location = getattr(mainapp_models, 'Location')
mainapp_change = getattr(mainapp_models, 'Change')
MODEL_DIR = getattr(importlib.import_module('third_party.3dmr.mainapp.utils'), 'MODEL_DIR')
logger = logging.getLogger(__name__)
def run(*args):
"""Will **DELETE ALL MODELS** in DB and on storage.
This is dangerous and there is no going back. Use wisely.
"""
logger.info('Deleting all models and data.')
logger.info('Cearing database model entries.')
mainapp_model.objects.all().delete()
mainapp_location.objects.all().delete()
mainapp_change.objects.all().delete()
logger.info('Deleting all modle files on disk.')
failed_paths = []
for root, dirs, files in os.walk(MODEL_DIR):
for dir_name in dirs:
target_dir = os.path.join(root, dir_name)
logger.info('Deleting {}'.format(target_dir))
try:
shutil.rmtree(target_dir)
except Exception as e:
failed_paths.append(target_dir)
logger.error('Failed to delete {} for Reason: {}'.format(target_dir, e))
if failed_paths:
logger.error('Failed to delete all models on disk: {}'.format(failed_paths))
else:
logger.info('No failed deletions.')
logger.info('Delete all models job completed.')
|
Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.import importlib
import logging
import os
import shutil
mainapp_database = importlib.import_module('third_party.3dmr.mainapp.database')
mainapp_upload = getattr(mainapp_database, 'upload')
mainapp_models = importlib.import_module('third_party.3dmr.mainapp.models')
mainapp_model = getattr(mainapp_models, 'Model')
mainapp_location = getattr(mainapp_models, 'Location')
mainapp_change = getattr(mainapp_models, 'Change')
MODEL_DIR = getattr(importlib.import_module('third_party.3dmr.mainapp.utils'), 'MODEL_DIR')
logger = logging.getLogger(__name__)
def run(*args):
"""Will **DELETE ALL MODELS** in DB and on storage.
This is dangerous and there is no going back. Use wisely.
"""
logger.info('Deleting all models and data.')
logger.info('Cearing database model entries.')
mainapp_model.objects.all().delete()
mainapp_location.objects.all().delete()
mainapp_change.objects.all().delete()
logger.info('Deleting all modle files on disk.')
failed_paths = []
for root, dirs, files in os.walk(MODEL_DIR):
for dir_name in dirs:
target_dir = os.path.join(root, dir_name)
logger.info('Deleting {}'.format(target_dir))
try:
shutil.rmtree(target_dir)
except Exception as e:
failed_paths.append(target_dir)
logger.error('Failed to delete {} for Reason: {}'.format(target_dir, e))
if failed_paths:
logger.error('Failed to delete all models on disk: {}'.format(failed_paths))
else:
logger.info('No failed deletions.')
logger.info('Delete all models job completed.')
|
<commit_before><commit_msg>Add admin script to clear all models on disk and flush mainapp_model, mainapp_latestmodel and mainapp_location tables.<commit_after>import importlib
import logging
import os
import shutil
mainapp_database = importlib.import_module('third_party.3dmr.mainapp.database')
mainapp_upload = getattr(mainapp_database, 'upload')
mainapp_models = importlib.import_module('third_party.3dmr.mainapp.models')
mainapp_model = getattr(mainapp_models, 'Model')
mainapp_location = getattr(mainapp_models, 'Location')
mainapp_change = getattr(mainapp_models, 'Change')
MODEL_DIR = getattr(importlib.import_module('third_party.3dmr.mainapp.utils'), 'MODEL_DIR')
logger = logging.getLogger(__name__)
def run(*args):
"""Will **DELETE ALL MODELS** in DB and on storage.
This is dangerous and there is no going back. Use wisely.
"""
logger.info('Deleting all models and data.')
logger.info('Cearing database model entries.')
mainapp_model.objects.all().delete()
mainapp_location.objects.all().delete()
mainapp_change.objects.all().delete()
logger.info('Deleting all modle files on disk.')
failed_paths = []
for root, dirs, files in os.walk(MODEL_DIR):
for dir_name in dirs:
target_dir = os.path.join(root, dir_name)
logger.info('Deleting {}'.format(target_dir))
try:
shutil.rmtree(target_dir)
except Exception as e:
failed_paths.append(target_dir)
logger.error('Failed to delete {} for Reason: {}'.format(target_dir, e))
if failed_paths:
logger.error('Failed to delete all models on disk: {}'.format(failed_paths))
else:
logger.info('No failed deletions.')
logger.info('Delete all models job completed.')
|
|
0670cc4510b7049b29d716a2485487390f975095
|
polling_stations/apps/data_collection/management/commands/import_stockton.py
|
polling_stations/apps/data_collection/management/commands/import_stockton.py
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000004"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
|
Add import script for Stockton-upon-Tees
|
Add import script for Stockton-upon-Tees
Closes #1484
|
Python
|
bsd-3-clause
|
DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
Add import script for Stockton-upon-Tees
Closes #1484
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000004"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
|
<commit_before><commit_msg>Add import script for Stockton-upon-Tees
Closes #1484<commit_after>
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000004"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
|
Add import script for Stockton-upon-Tees
Closes #1484from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000004"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
|
<commit_before><commit_msg>Add import script for Stockton-upon-Tees
Closes #1484<commit_after>from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000004"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019_Stockton.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
|
|
ffdead34416dc9b2de52242503e4364d257ea619
|
tests/test_fetch_http.py
|
tests/test_fetch_http.py
|
# coding=utf-8
from __future__ import unicode_literals
import simplemediawiki
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] == 3:
import http.cookiejar as cookielib
elif sys.version_info[0] == 2:
import cookielib
UNICODE_TEST = 'κόσμε'
class FetchHttpTest(unittest.TestCase):
def setUp(self):
self.user_agent = simplemediawiki.build_user_agent(
'python-simplemediawiki test suite', simplemediawiki.__version__,
'https://github.com/ianweller/python-simplemediawiki')
self.mw = simplemediawiki.MediaWiki('https://httpbin.org/',
user_agent=self.user_agent)
self.data = None
def _do_post(self):
self.data = json.loads(self.mw._fetch_http('https://httpbin.org/post',
{'butts': 'lol',
'unicode': UNICODE_TEST}))
def test_get(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/get',
{'butts': 'lol',
'unicode': UNICODE_TEST},
force_get=True))
assert data['args']['format'] == 'json'
assert data['args']['unicode'] == UNICODE_TEST
def test_post(self):
self._do_post()
assert self.data is not None
assert self.data['form']['format'] == 'json'
assert self.data['form']['unicode'] == UNICODE_TEST
def test_user_agent(self):
self._do_post()
assert self.data['headers']['User-Agent'] == self.user_agent
def test_gzip(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/gzip', {},
force_get=True))
assert data['gzipped'] == True
def test_cookies(self):
self.mw._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST},
force_get=True)
data = json.loads(self.mw._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
def test_persistent_cookiejar(self):
cookiejar = cookielib.CookieJar()
mw1 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
mw1._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST}, force_get=True)
mw2 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
data = json.loads(mw2._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
print(data)
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
|
Add first test suite (in progress)
|
Add first test suite (in progress)
|
Python
|
lgpl-2.1
|
lahwaacz/python-simplemediawiki,YSelfTool/python-simplemediawiki,ianweller/python-simplemediawiki
|
Add first test suite (in progress)
|
# coding=utf-8
from __future__ import unicode_literals
import simplemediawiki
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] == 3:
import http.cookiejar as cookielib
elif sys.version_info[0] == 2:
import cookielib
UNICODE_TEST = 'κόσμε'
class FetchHttpTest(unittest.TestCase):
def setUp(self):
self.user_agent = simplemediawiki.build_user_agent(
'python-simplemediawiki test suite', simplemediawiki.__version__,
'https://github.com/ianweller/python-simplemediawiki')
self.mw = simplemediawiki.MediaWiki('https://httpbin.org/',
user_agent=self.user_agent)
self.data = None
def _do_post(self):
self.data = json.loads(self.mw._fetch_http('https://httpbin.org/post',
{'butts': 'lol',
'unicode': UNICODE_TEST}))
def test_get(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/get',
{'butts': 'lol',
'unicode': UNICODE_TEST},
force_get=True))
assert data['args']['format'] == 'json'
assert data['args']['unicode'] == UNICODE_TEST
def test_post(self):
self._do_post()
assert self.data is not None
assert self.data['form']['format'] == 'json'
assert self.data['form']['unicode'] == UNICODE_TEST
def test_user_agent(self):
self._do_post()
assert self.data['headers']['User-Agent'] == self.user_agent
def test_gzip(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/gzip', {},
force_get=True))
assert data['gzipped'] == True
def test_cookies(self):
self.mw._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST},
force_get=True)
data = json.loads(self.mw._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
def test_persistent_cookiejar(self):
cookiejar = cookielib.CookieJar()
mw1 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
mw1._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST}, force_get=True)
mw2 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
data = json.loads(mw2._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
print(data)
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
|
<commit_before><commit_msg>Add first test suite (in progress)<commit_after>
|
# coding=utf-8
from __future__ import unicode_literals
import simplemediawiki
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] == 3:
import http.cookiejar as cookielib
elif sys.version_info[0] == 2:
import cookielib
UNICODE_TEST = 'κόσμε'
class FetchHttpTest(unittest.TestCase):
def setUp(self):
self.user_agent = simplemediawiki.build_user_agent(
'python-simplemediawiki test suite', simplemediawiki.__version__,
'https://github.com/ianweller/python-simplemediawiki')
self.mw = simplemediawiki.MediaWiki('https://httpbin.org/',
user_agent=self.user_agent)
self.data = None
def _do_post(self):
self.data = json.loads(self.mw._fetch_http('https://httpbin.org/post',
{'butts': 'lol',
'unicode': UNICODE_TEST}))
def test_get(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/get',
{'butts': 'lol',
'unicode': UNICODE_TEST},
force_get=True))
assert data['args']['format'] == 'json'
assert data['args']['unicode'] == UNICODE_TEST
def test_post(self):
self._do_post()
assert self.data is not None
assert self.data['form']['format'] == 'json'
assert self.data['form']['unicode'] == UNICODE_TEST
def test_user_agent(self):
self._do_post()
assert self.data['headers']['User-Agent'] == self.user_agent
def test_gzip(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/gzip', {},
force_get=True))
assert data['gzipped'] == True
def test_cookies(self):
self.mw._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST},
force_get=True)
data = json.loads(self.mw._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
def test_persistent_cookiejar(self):
cookiejar = cookielib.CookieJar()
mw1 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
mw1._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST}, force_get=True)
mw2 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
data = json.loads(mw2._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
print(data)
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
|
Add first test suite (in progress)# coding=utf-8
from __future__ import unicode_literals
import simplemediawiki
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] == 3:
import http.cookiejar as cookielib
elif sys.version_info[0] == 2:
import cookielib
UNICODE_TEST = 'κόσμε'
class FetchHttpTest(unittest.TestCase):
def setUp(self):
self.user_agent = simplemediawiki.build_user_agent(
'python-simplemediawiki test suite', simplemediawiki.__version__,
'https://github.com/ianweller/python-simplemediawiki')
self.mw = simplemediawiki.MediaWiki('https://httpbin.org/',
user_agent=self.user_agent)
self.data = None
def _do_post(self):
self.data = json.loads(self.mw._fetch_http('https://httpbin.org/post',
{'butts': 'lol',
'unicode': UNICODE_TEST}))
def test_get(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/get',
{'butts': 'lol',
'unicode': UNICODE_TEST},
force_get=True))
assert data['args']['format'] == 'json'
assert data['args']['unicode'] == UNICODE_TEST
def test_post(self):
self._do_post()
assert self.data is not None
assert self.data['form']['format'] == 'json'
assert self.data['form']['unicode'] == UNICODE_TEST
def test_user_agent(self):
self._do_post()
assert self.data['headers']['User-Agent'] == self.user_agent
def test_gzip(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/gzip', {},
force_get=True))
assert data['gzipped'] == True
def test_cookies(self):
self.mw._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST},
force_get=True)
data = json.loads(self.mw._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
def test_persistent_cookiejar(self):
cookiejar = cookielib.CookieJar()
mw1 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
mw1._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST}, force_get=True)
mw2 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
data = json.loads(mw2._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
print(data)
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
|
<commit_before><commit_msg>Add first test suite (in progress)<commit_after># coding=utf-8
from __future__ import unicode_literals
import simplemediawiki
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
if sys.version_info[0] == 3:
import http.cookiejar as cookielib
elif sys.version_info[0] == 2:
import cookielib
UNICODE_TEST = 'κόσμε'
class FetchHttpTest(unittest.TestCase):
def setUp(self):
self.user_agent = simplemediawiki.build_user_agent(
'python-simplemediawiki test suite', simplemediawiki.__version__,
'https://github.com/ianweller/python-simplemediawiki')
self.mw = simplemediawiki.MediaWiki('https://httpbin.org/',
user_agent=self.user_agent)
self.data = None
def _do_post(self):
self.data = json.loads(self.mw._fetch_http('https://httpbin.org/post',
{'butts': 'lol',
'unicode': UNICODE_TEST}))
def test_get(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/get',
{'butts': 'lol',
'unicode': UNICODE_TEST},
force_get=True))
assert data['args']['format'] == 'json'
assert data['args']['unicode'] == UNICODE_TEST
def test_post(self):
self._do_post()
assert self.data is not None
assert self.data['form']['format'] == 'json'
assert self.data['form']['unicode'] == UNICODE_TEST
def test_user_agent(self):
self._do_post()
assert self.data['headers']['User-Agent'] == self.user_agent
def test_gzip(self):
data = json.loads(self.mw._fetch_http('https://httpbin.org/gzip', {},
force_get=True))
assert data['gzipped'] == True
def test_cookies(self):
self.mw._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST},
force_get=True)
data = json.loads(self.mw._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
def test_persistent_cookiejar(self):
cookiejar = cookielib.CookieJar()
mw1 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
mw1._fetch_http('https://httpbin.org/cookies/set',
{'unicode': UNICODE_TEST}, force_get=True)
mw2 = simplemediawiki.MediaWiki('https://httpbin.org/',
cookiejar=cookiejar,
user_agent=self.user_agent)
data = json.loads(mw2._fetch_http('https://httpbin.org/cookies', {},
force_get=True))
print(data)
assert 'unicode' in data['cookies']
assert data['cookies']['unicode'] == UNICODE_TEST
|
|
1ae25b4c129e156cad3d0ff18f37bdcd121be207
|
tests/contrib/test_mobile.py
|
tests/contrib/test_mobile.py
|
from django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE = 'm.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/', HTTP_USER_AGENT='Mozilla/5.0')
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
|
Test if mobile users get the right templates directory
|
Test if mobile users get the right templates directory
|
Python
|
mit
|
jeanmask/opps,williamroot/opps,opps/opps,YACOWS/opps,YACOWS/opps,opps/opps,williamroot/opps,williamroot/opps,jeanmask/opps,jeanmask/opps,opps/opps,williamroot/opps,opps/opps,jeanmask/opps,YACOWS/opps,YACOWS/opps
|
Test if mobile users get the right templates directory
|
from django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE = 'm.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/', HTTP_USER_AGENT='Mozilla/5.0')
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
|
<commit_before><commit_msg>Test if mobile users get the right templates directory<commit_after>
|
from django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE = 'm.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/', HTTP_USER_AGENT='Mozilla/5.0')
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
|
Test if mobile users get the right templates directoryfrom django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE = 'm.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/', HTTP_USER_AGENT='Mozilla/5.0')
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
|
<commit_before><commit_msg>Test if mobile users get the right templates directory<commit_after>from django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE = 'm.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/', HTTP_USER_AGENT='Mozilla/5.0')
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
self.template_loader.get_template_sources('index.html').next(),
self.template_loader.get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
|
|
ac1dac3638b40f4ecdf5f82e52b5f93d4e74f9ef
|
tests/list_files_requests.py
|
tests/list_files_requests.py
|
import os
import requests
from time import time
from typing import Iterator
import dropbox
def t():
"""Return string of elapsed time since start in seconds."""
return '{:.2f}:'.format(time() - start)
def list_files(token: str,
member_id: str) -> Iterator[dropbox.files.Metadata]:
"""Recursively walk the folder tree, yielding files."""
headers = {'Authorization': 'Bearer ' + token,
'Dropbox-API-Select-User': member_id}
list_folder = 'https://api.dropboxapi.com/2/files/list_folder'
list_folder_continue = 'https://api.dropboxapi.com/2/files/list_folder/continue'
post_data = {'path': '', 'recursive': True}
print(f'Requesting {list_folder} with {post_data}')
r = requests.post(list_folder, headers=headers, json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data = {'cursor': response['cursor']}
has_more = response['has_more']
while has_more:
print(f'Requesting {list_folder_continue}')
r = requests.post(list_folder_continue, headers=headers,
json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data['cursor'] = response['cursor']
has_more = response['has_more']
start = time()
token = os.environ['DROPBOX_TEAM_TOKEN']
team = dropbox.DropboxTeam(token)
members_list = team.team_members_list()
# Get the first member
member = members_list.members[0]
print(t(), 'Listing files for', member.profile.name.display_name)
for entry in list_files(token, member.profile.team_member_id):
print('Found ' + entry['path_display'])
|
Test for using HTTP API for recusive folder listing
|
Test for using HTTP API for recusive folder listing
|
Python
|
apache-2.0
|
blokeley/dfb,blokeley/backup_dropbox
|
Test for using HTTP API for recusive folder listing
|
import os
import requests
from time import time
from typing import Iterator
import dropbox
def t():
"""Return string of elapsed time since start in seconds."""
return '{:.2f}:'.format(time() - start)
def list_files(token: str,
member_id: str) -> Iterator[dropbox.files.Metadata]:
"""Recursively walk the folder tree, yielding files."""
headers = {'Authorization': 'Bearer ' + token,
'Dropbox-API-Select-User': member_id}
list_folder = 'https://api.dropboxapi.com/2/files/list_folder'
list_folder_continue = 'https://api.dropboxapi.com/2/files/list_folder/continue'
post_data = {'path': '', 'recursive': True}
print(f'Requesting {list_folder} with {post_data}')
r = requests.post(list_folder, headers=headers, json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data = {'cursor': response['cursor']}
has_more = response['has_more']
while has_more:
print(f'Requesting {list_folder_continue}')
r = requests.post(list_folder_continue, headers=headers,
json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data['cursor'] = response['cursor']
has_more = response['has_more']
start = time()
token = os.environ['DROPBOX_TEAM_TOKEN']
team = dropbox.DropboxTeam(token)
members_list = team.team_members_list()
# Get the first member
member = members_list.members[0]
print(t(), 'Listing files for', member.profile.name.display_name)
for entry in list_files(token, member.profile.team_member_id):
print('Found ' + entry['path_display'])
|
<commit_before><commit_msg>Test for using HTTP API for recusive folder listing<commit_after>
|
import os
import requests
from time import time
from typing import Iterator
import dropbox
def t():
"""Return string of elapsed time since start in seconds."""
return '{:.2f}:'.format(time() - start)
def list_files(token: str,
member_id: str) -> Iterator[dropbox.files.Metadata]:
"""Recursively walk the folder tree, yielding files."""
headers = {'Authorization': 'Bearer ' + token,
'Dropbox-API-Select-User': member_id}
list_folder = 'https://api.dropboxapi.com/2/files/list_folder'
list_folder_continue = 'https://api.dropboxapi.com/2/files/list_folder/continue'
post_data = {'path': '', 'recursive': True}
print(f'Requesting {list_folder} with {post_data}')
r = requests.post(list_folder, headers=headers, json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data = {'cursor': response['cursor']}
has_more = response['has_more']
while has_more:
print(f'Requesting {list_folder_continue}')
r = requests.post(list_folder_continue, headers=headers,
json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data['cursor'] = response['cursor']
has_more = response['has_more']
start = time()
token = os.environ['DROPBOX_TEAM_TOKEN']
team = dropbox.DropboxTeam(token)
members_list = team.team_members_list()
# Get the first member
member = members_list.members[0]
print(t(), 'Listing files for', member.profile.name.display_name)
for entry in list_files(token, member.profile.team_member_id):
print('Found ' + entry['path_display'])
|
Test for using HTTP API for recusive folder listingimport os
import requests
from time import time
from typing import Iterator
import dropbox
def t():
"""Return string of elapsed time since start in seconds."""
return '{:.2f}:'.format(time() - start)
def list_files(token: str,
member_id: str) -> Iterator[dropbox.files.Metadata]:
"""Recursively walk the folder tree, yielding files."""
headers = {'Authorization': 'Bearer ' + token,
'Dropbox-API-Select-User': member_id}
list_folder = 'https://api.dropboxapi.com/2/files/list_folder'
list_folder_continue = 'https://api.dropboxapi.com/2/files/list_folder/continue'
post_data = {'path': '', 'recursive': True}
print(f'Requesting {list_folder} with {post_data}')
r = requests.post(list_folder, headers=headers, json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data = {'cursor': response['cursor']}
has_more = response['has_more']
while has_more:
print(f'Requesting {list_folder_continue}')
r = requests.post(list_folder_continue, headers=headers,
json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data['cursor'] = response['cursor']
has_more = response['has_more']
start = time()
token = os.environ['DROPBOX_TEAM_TOKEN']
team = dropbox.DropboxTeam(token)
members_list = team.team_members_list()
# Get the first member
member = members_list.members[0]
print(t(), 'Listing files for', member.profile.name.display_name)
for entry in list_files(token, member.profile.team_member_id):
print('Found ' + entry['path_display'])
|
<commit_before><commit_msg>Test for using HTTP API for recusive folder listing<commit_after>import os
import requests
from time import time
from typing import Iterator
import dropbox
def t():
"""Return string of elapsed time since start in seconds."""
return '{:.2f}:'.format(time() - start)
def list_files(token: str,
member_id: str) -> Iterator[dropbox.files.Metadata]:
"""Recursively walk the folder tree, yielding files."""
headers = {'Authorization': 'Bearer ' + token,
'Dropbox-API-Select-User': member_id}
list_folder = 'https://api.dropboxapi.com/2/files/list_folder'
list_folder_continue = 'https://api.dropboxapi.com/2/files/list_folder/continue'
post_data = {'path': '', 'recursive': True}
print(f'Requesting {list_folder} with {post_data}')
r = requests.post(list_folder, headers=headers, json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data = {'cursor': response['cursor']}
has_more = response['has_more']
while has_more:
print(f'Requesting {list_folder_continue}')
r = requests.post(list_folder_continue, headers=headers,
json=post_data)
r.raise_for_status()
response = r.json()
for entry in response['entries']:
yield entry
post_data['cursor'] = response['cursor']
has_more = response['has_more']
start = time()
token = os.environ['DROPBOX_TEAM_TOKEN']
team = dropbox.DropboxTeam(token)
members_list = team.team_members_list()
# Get the first member
member = members_list.members[0]
print(t(), 'Listing files for', member.profile.name.display_name)
for entry in list_files(token, member.profile.team_member_id):
print('Found ' + entry['path_display'])
|
|
b4cd1a63148e0fca75781a05ed11541d1e6c87fa
|
django_lightweight_queue/management/commands/queue_worker.py
|
django_lightweight_queue/management/commands/queue_worker.py
|
import sys
import logging
import argparse
from django.core.management.base import BaseCommand, CommandError
from ...worker import Worker
class Command(BaseCommand):
help = "Run an individual queue worker"
def add_arguments(self, parser):
parser.add_argument(
'queue',
help="queue for which this is a worker",
)
parser.add_argument(
'number',
type=int,
help="worker number within this queue",
)
parser.add_argument(
'--prometheus-port',
type=int,
help="port number on which to run Prometheus",
)
parser.add_argument(
'--log-level',
choices=[x.lower() for x in logging._nameToLevel.keys()],
default='warning',
help="log level to set",
)
parser.add_argument(
'--log-file',
type=str,
help="log destination",
)
parser.add_argument(
'--touch-file',
type=argparse.FileType('ab'),
default=None,
help="file to touch after jobs",
)
def handle(
self,
queue,
number,
prometheus_port,
log_level,
log_file,
touch_file,
**options
):
worker = Worker(
queue=queue,
worker_num=number,
prometheus_port=prometheus_port,
log_level=logging._nameToLevel[log_level.upper()],
log_file=log_file,
touch_file=touch_file,
)
worker.run()
|
Add basic individual queue worker command
|
Add basic individual queue worker command
|
Python
|
bsd-3-clause
|
thread/django-lightweight-queue,thread/django-lightweight-queue
|
Add basic individual queue worker command
|
import sys
import logging
import argparse
from django.core.management.base import BaseCommand, CommandError
from ...worker import Worker
class Command(BaseCommand):
help = "Run an individual queue worker"
def add_arguments(self, parser):
parser.add_argument(
'queue',
help="queue for which this is a worker",
)
parser.add_argument(
'number',
type=int,
help="worker number within this queue",
)
parser.add_argument(
'--prometheus-port',
type=int,
help="port number on which to run Prometheus",
)
parser.add_argument(
'--log-level',
choices=[x.lower() for x in logging._nameToLevel.keys()],
default='warning',
help="log level to set",
)
parser.add_argument(
'--log-file',
type=str,
help="log destination",
)
parser.add_argument(
'--touch-file',
type=argparse.FileType('ab'),
default=None,
help="file to touch after jobs",
)
def handle(
self,
queue,
number,
prometheus_port,
log_level,
log_file,
touch_file,
**options
):
worker = Worker(
queue=queue,
worker_num=number,
prometheus_port=prometheus_port,
log_level=logging._nameToLevel[log_level.upper()],
log_file=log_file,
touch_file=touch_file,
)
worker.run()
|
<commit_before><commit_msg>Add basic individual queue worker command<commit_after>
|
import sys
import logging
import argparse
from django.core.management.base import BaseCommand, CommandError
from ...worker import Worker
class Command(BaseCommand):
help = "Run an individual queue worker"
def add_arguments(self, parser):
parser.add_argument(
'queue',
help="queue for which this is a worker",
)
parser.add_argument(
'number',
type=int,
help="worker number within this queue",
)
parser.add_argument(
'--prometheus-port',
type=int,
help="port number on which to run Prometheus",
)
parser.add_argument(
'--log-level',
choices=[x.lower() for x in logging._nameToLevel.keys()],
default='warning',
help="log level to set",
)
parser.add_argument(
'--log-file',
type=str,
help="log destination",
)
parser.add_argument(
'--touch-file',
type=argparse.FileType('ab'),
default=None,
help="file to touch after jobs",
)
def handle(
self,
queue,
number,
prometheus_port,
log_level,
log_file,
touch_file,
**options
):
worker = Worker(
queue=queue,
worker_num=number,
prometheus_port=prometheus_port,
log_level=logging._nameToLevel[log_level.upper()],
log_file=log_file,
touch_file=touch_file,
)
worker.run()
|
Add basic individual queue worker commandimport sys
import logging
import argparse
from django.core.management.base import BaseCommand, CommandError
from ...worker import Worker
class Command(BaseCommand):
help = "Run an individual queue worker"
def add_arguments(self, parser):
parser.add_argument(
'queue',
help="queue for which this is a worker",
)
parser.add_argument(
'number',
type=int,
help="worker number within this queue",
)
parser.add_argument(
'--prometheus-port',
type=int,
help="port number on which to run Prometheus",
)
parser.add_argument(
'--log-level',
choices=[x.lower() for x in logging._nameToLevel.keys()],
default='warning',
help="log level to set",
)
parser.add_argument(
'--log-file',
type=str,
help="log destination",
)
parser.add_argument(
'--touch-file',
type=argparse.FileType('ab'),
default=None,
help="file to touch after jobs",
)
def handle(
self,
queue,
number,
prometheus_port,
log_level,
log_file,
touch_file,
**options
):
worker = Worker(
queue=queue,
worker_num=number,
prometheus_port=prometheus_port,
log_level=logging._nameToLevel[log_level.upper()],
log_file=log_file,
touch_file=touch_file,
)
worker.run()
|
<commit_before><commit_msg>Add basic individual queue worker command<commit_after>import sys
import logging
import argparse
from django.core.management.base import BaseCommand, CommandError
from ...worker import Worker
class Command(BaseCommand):
help = "Run an individual queue worker"
def add_arguments(self, parser):
parser.add_argument(
'queue',
help="queue for which this is a worker",
)
parser.add_argument(
'number',
type=int,
help="worker number within this queue",
)
parser.add_argument(
'--prometheus-port',
type=int,
help="port number on which to run Prometheus",
)
parser.add_argument(
'--log-level',
choices=[x.lower() for x in logging._nameToLevel.keys()],
default='warning',
help="log level to set",
)
parser.add_argument(
'--log-file',
type=str,
help="log destination",
)
parser.add_argument(
'--touch-file',
type=argparse.FileType('ab'),
default=None,
help="file to touch after jobs",
)
def handle(
self,
queue,
number,
prometheus_port,
log_level,
log_file,
touch_file,
**options
):
worker = Worker(
queue=queue,
worker_num=number,
prometheus_port=prometheus_port,
log_level=logging._nameToLevel[log_level.upper()],
log_file=log_file,
touch_file=touch_file,
)
worker.run()
|
|
a86f31954ffbb5708d5a4d0608bc4611f80ff2ff
|
tsparser/tests/parser/imu.py
|
tsparser/tests/parser/imu.py
|
from tsparser.parser import imu
from tsparser.tests.parser import ParserTestCase, DEFAULT_TIMESTAMP
class TestIMU(ParserTestCase):
ex_data = {'timestamp': DEFAULT_TIMESTAMP, 'pressure': 3981106,
'gyro_x': -413, 'gyro_y': -1286, 'gyro_z': -2545,
'accel_x': 14400, 'accel_y': 3328, 'accel_z': 5440,
'magnet_x': 13310, 'magnet_y': -32001, 'magnet_z': 5118}
lines = ['$GYRO,-413,-1286,-2545', '$ACCEL,14400,3328,5440',
'$MAGNET,13310,-32001,5118', '$MBAR,3981106']
def test_imu_parser(self):
"""Test IMUParser with standard output"""
for line in self.lines[:3]:
self.parse_line(line)
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[3])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_different_order(self):
"""Test IMUParser with input in different order"""
self.parse_line(self.lines[1])
self.parse_line(self.lines[3])
self.parse_line(self.lines[2])
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[0])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_invalid_data(self):
"""Test IMUParser with invalid input"""
self.assertRaises(ValueError, self.parse_line, '$GYRO,111')
self.assertRaises(ValueError, self.parse_line, '$GYRO,a,b,c')
self.assertRaises(ValueError, self.parse_line, '$ACCEL,111,-1,222,4')
self.assertRaises(ValueError, self.parse_line, '$MAGNET,82,222')
self.assertRaises(ValueError, self.parse_line, '$MBAR,1,2')
|
Add unit tests to IMUParser
|
Add unit tests to IMUParser
|
Python
|
mit
|
m4tx/techswarm-receiver
|
Add unit tests to IMUParser
|
from tsparser.parser import imu
from tsparser.tests.parser import ParserTestCase, DEFAULT_TIMESTAMP
class TestIMU(ParserTestCase):
ex_data = {'timestamp': DEFAULT_TIMESTAMP, 'pressure': 3981106,
'gyro_x': -413, 'gyro_y': -1286, 'gyro_z': -2545,
'accel_x': 14400, 'accel_y': 3328, 'accel_z': 5440,
'magnet_x': 13310, 'magnet_y': -32001, 'magnet_z': 5118}
lines = ['$GYRO,-413,-1286,-2545', '$ACCEL,14400,3328,5440',
'$MAGNET,13310,-32001,5118', '$MBAR,3981106']
def test_imu_parser(self):
"""Test IMUParser with standard output"""
for line in self.lines[:3]:
self.parse_line(line)
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[3])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_different_order(self):
"""Test IMUParser with input in different order"""
self.parse_line(self.lines[1])
self.parse_line(self.lines[3])
self.parse_line(self.lines[2])
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[0])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_invalid_data(self):
"""Test IMUParser with invalid input"""
self.assertRaises(ValueError, self.parse_line, '$GYRO,111')
self.assertRaises(ValueError, self.parse_line, '$GYRO,a,b,c')
self.assertRaises(ValueError, self.parse_line, '$ACCEL,111,-1,222,4')
self.assertRaises(ValueError, self.parse_line, '$MAGNET,82,222')
self.assertRaises(ValueError, self.parse_line, '$MBAR,1,2')
|
<commit_before><commit_msg>Add unit tests to IMUParser<commit_after>
|
from tsparser.parser import imu
from tsparser.tests.parser import ParserTestCase, DEFAULT_TIMESTAMP
class TestIMU(ParserTestCase):
ex_data = {'timestamp': DEFAULT_TIMESTAMP, 'pressure': 3981106,
'gyro_x': -413, 'gyro_y': -1286, 'gyro_z': -2545,
'accel_x': 14400, 'accel_y': 3328, 'accel_z': 5440,
'magnet_x': 13310, 'magnet_y': -32001, 'magnet_z': 5118}
lines = ['$GYRO,-413,-1286,-2545', '$ACCEL,14400,3328,5440',
'$MAGNET,13310,-32001,5118', '$MBAR,3981106']
def test_imu_parser(self):
"""Test IMUParser with standard output"""
for line in self.lines[:3]:
self.parse_line(line)
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[3])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_different_order(self):
"""Test IMUParser with input in different order"""
self.parse_line(self.lines[1])
self.parse_line(self.lines[3])
self.parse_line(self.lines[2])
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[0])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_invalid_data(self):
"""Test IMUParser with invalid input"""
self.assertRaises(ValueError, self.parse_line, '$GYRO,111')
self.assertRaises(ValueError, self.parse_line, '$GYRO,a,b,c')
self.assertRaises(ValueError, self.parse_line, '$ACCEL,111,-1,222,4')
self.assertRaises(ValueError, self.parse_line, '$MAGNET,82,222')
self.assertRaises(ValueError, self.parse_line, '$MBAR,1,2')
|
Add unit tests to IMUParserfrom tsparser.parser import imu
from tsparser.tests.parser import ParserTestCase, DEFAULT_TIMESTAMP
class TestIMU(ParserTestCase):
ex_data = {'timestamp': DEFAULT_TIMESTAMP, 'pressure': 3981106,
'gyro_x': -413, 'gyro_y': -1286, 'gyro_z': -2545,
'accel_x': 14400, 'accel_y': 3328, 'accel_z': 5440,
'magnet_x': 13310, 'magnet_y': -32001, 'magnet_z': 5118}
lines = ['$GYRO,-413,-1286,-2545', '$ACCEL,14400,3328,5440',
'$MAGNET,13310,-32001,5118', '$MBAR,3981106']
def test_imu_parser(self):
"""Test IMUParser with standard output"""
for line in self.lines[:3]:
self.parse_line(line)
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[3])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_different_order(self):
"""Test IMUParser with input in different order"""
self.parse_line(self.lines[1])
self.parse_line(self.lines[3])
self.parse_line(self.lines[2])
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[0])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_invalid_data(self):
"""Test IMUParser with invalid input"""
self.assertRaises(ValueError, self.parse_line, '$GYRO,111')
self.assertRaises(ValueError, self.parse_line, '$GYRO,a,b,c')
self.assertRaises(ValueError, self.parse_line, '$ACCEL,111,-1,222,4')
self.assertRaises(ValueError, self.parse_line, '$MAGNET,82,222')
self.assertRaises(ValueError, self.parse_line, '$MBAR,1,2')
|
<commit_before><commit_msg>Add unit tests to IMUParser<commit_after>from tsparser.parser import imu
from tsparser.tests.parser import ParserTestCase, DEFAULT_TIMESTAMP
class TestIMU(ParserTestCase):
ex_data = {'timestamp': DEFAULT_TIMESTAMP, 'pressure': 3981106,
'gyro_x': -413, 'gyro_y': -1286, 'gyro_z': -2545,
'accel_x': 14400, 'accel_y': 3328, 'accel_z': 5440,
'magnet_x': 13310, 'magnet_y': -32001, 'magnet_z': 5118}
lines = ['$GYRO,-413,-1286,-2545', '$ACCEL,14400,3328,5440',
'$MAGNET,13310,-32001,5118', '$MBAR,3981106']
def test_imu_parser(self):
"""Test IMUParser with standard output"""
for line in self.lines[:3]:
self.parse_line(line)
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[3])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_different_order(self):
"""Test IMUParser with input in different order"""
self.parse_line(self.lines[1])
self.parse_line(self.lines[3])
self.parse_line(self.lines[2])
self.assertEqual(self.send_data_mock.called, False)
self.parse_line(self.lines[0])
self.send_data_mock.assert_called_with(self.ex_data, imu.IMUParser.url)
def test_imu_parser_invalid_data(self):
"""Test IMUParser with invalid input"""
self.assertRaises(ValueError, self.parse_line, '$GYRO,111')
self.assertRaises(ValueError, self.parse_line, '$GYRO,a,b,c')
self.assertRaises(ValueError, self.parse_line, '$ACCEL,111,-1,222,4')
self.assertRaises(ValueError, self.parse_line, '$MAGNET,82,222')
self.assertRaises(ValueError, self.parse_line, '$MBAR,1,2')
|
|
ec59ed5c360d0d455c4623425271df3fffecbf82
|
test/test_cs.py
|
test/test_cs.py
|
import pytest
from pml import cs
class InvalidControlSystem(cs.ControlSystem):
"""
Extends ControlSystem without implementing required methods.
"""
def __init__(self):
pass
def test_ControlSystem_throws_NotImplememtedError():
with pytest.raises(NotImplementedError):
cs.ControlSystem()
def test_InvalidControlSystem_throws_NotImplementedError():
ics = InvalidControlSystem()
with pytest.raises(NotImplementedError):
ics.get('dummy')
with pytest.raises(NotImplementedError):
ics.put('dummy', 1)
def test_NullControlSystem_throws_no_errors():
ncs = cs.NullControlSystem()
ncs.get('dummy')
ncs.put('dummy', 1)
|
Add simple tests for pml/cs.py.
|
Add simple tests for pml/cs.py.
|
Python
|
apache-2.0
|
willrogers/pml,willrogers/pml
|
Add simple tests for pml/cs.py.
|
import pytest
from pml import cs
class InvalidControlSystem(cs.ControlSystem):
"""
Extends ControlSystem without implementing required methods.
"""
def __init__(self):
pass
def test_ControlSystem_throws_NotImplememtedError():
with pytest.raises(NotImplementedError):
cs.ControlSystem()
def test_InvalidControlSystem_throws_NotImplementedError():
ics = InvalidControlSystem()
with pytest.raises(NotImplementedError):
ics.get('dummy')
with pytest.raises(NotImplementedError):
ics.put('dummy', 1)
def test_NullControlSystem_throws_no_errors():
ncs = cs.NullControlSystem()
ncs.get('dummy')
ncs.put('dummy', 1)
|
<commit_before><commit_msg>Add simple tests for pml/cs.py.<commit_after>
|
import pytest
from pml import cs
class InvalidControlSystem(cs.ControlSystem):
"""
Extends ControlSystem without implementing required methods.
"""
def __init__(self):
pass
def test_ControlSystem_throws_NotImplememtedError():
with pytest.raises(NotImplementedError):
cs.ControlSystem()
def test_InvalidControlSystem_throws_NotImplementedError():
ics = InvalidControlSystem()
with pytest.raises(NotImplementedError):
ics.get('dummy')
with pytest.raises(NotImplementedError):
ics.put('dummy', 1)
def test_NullControlSystem_throws_no_errors():
ncs = cs.NullControlSystem()
ncs.get('dummy')
ncs.put('dummy', 1)
|
Add simple tests for pml/cs.py.import pytest
from pml import cs
class InvalidControlSystem(cs.ControlSystem):
"""
Extends ControlSystem without implementing required methods.
"""
def __init__(self):
pass
def test_ControlSystem_throws_NotImplememtedError():
with pytest.raises(NotImplementedError):
cs.ControlSystem()
def test_InvalidControlSystem_throws_NotImplementedError():
ics = InvalidControlSystem()
with pytest.raises(NotImplementedError):
ics.get('dummy')
with pytest.raises(NotImplementedError):
ics.put('dummy', 1)
def test_NullControlSystem_throws_no_errors():
ncs = cs.NullControlSystem()
ncs.get('dummy')
ncs.put('dummy', 1)
|
<commit_before><commit_msg>Add simple tests for pml/cs.py.<commit_after>import pytest
from pml import cs
class InvalidControlSystem(cs.ControlSystem):
"""
Extends ControlSystem without implementing required methods.
"""
def __init__(self):
pass
def test_ControlSystem_throws_NotImplememtedError():
with pytest.raises(NotImplementedError):
cs.ControlSystem()
def test_InvalidControlSystem_throws_NotImplementedError():
ics = InvalidControlSystem()
with pytest.raises(NotImplementedError):
ics.get('dummy')
with pytest.raises(NotImplementedError):
ics.put('dummy', 1)
def test_NullControlSystem_throws_no_errors():
ncs = cs.NullControlSystem()
ncs.get('dummy')
ncs.put('dummy', 1)
|
|
37c0719c6a82657d9796d39e7cd217d694de504b
|
bluebottle/payouts/migrations/0017_delete_in_review_accounts.py
|
bluebottle/payouts/migrations/0017_delete_in_review_accounts.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-19 15:21
from __future__ import unicode_literals
from django.db import migrations
def remove_accounts(apps, schema_editor):
PayoutAccount = apps.get_model('payouts', 'PayoutAccount')
ProjectPhase = apps.get_model('bb_projects', 'ProjectPhase')
phases = ProjectPhase.objects.filter(
slug__in=('plan-needs-work', 'plan-submitted', 'plan-new')
)
PayoutAccount.objects.filter(project__status__in=phases).delete()
class Migration(migrations.Migration):
dependencies = [
('payouts', '0016_auto_20181215_2016'),
]
operations = [
migrations.RunPython(remove_accounts)
]
|
Make sure we delete all payoutaccounts from new projects so that they will need submit a stripeaccount.
|
Make sure we delete all payoutaccounts from new projects so that they
will need submit a stripeaccount.
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Make sure we delete all payoutaccounts from new projects so that they
will need submit a stripeaccount.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-19 15:21
from __future__ import unicode_literals
from django.db import migrations
def remove_accounts(apps, schema_editor):
PayoutAccount = apps.get_model('payouts', 'PayoutAccount')
ProjectPhase = apps.get_model('bb_projects', 'ProjectPhase')
phases = ProjectPhase.objects.filter(
slug__in=('plan-needs-work', 'plan-submitted', 'plan-new')
)
PayoutAccount.objects.filter(project__status__in=phases).delete()
class Migration(migrations.Migration):
dependencies = [
('payouts', '0016_auto_20181215_2016'),
]
operations = [
migrations.RunPython(remove_accounts)
]
|
<commit_before><commit_msg>Make sure we delete all payoutaccounts from new projects so that they
will need submit a stripeaccount.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-19 15:21
from __future__ import unicode_literals
from django.db import migrations
def remove_accounts(apps, schema_editor):
PayoutAccount = apps.get_model('payouts', 'PayoutAccount')
ProjectPhase = apps.get_model('bb_projects', 'ProjectPhase')
phases = ProjectPhase.objects.filter(
slug__in=('plan-needs-work', 'plan-submitted', 'plan-new')
)
PayoutAccount.objects.filter(project__status__in=phases).delete()
class Migration(migrations.Migration):
dependencies = [
('payouts', '0016_auto_20181215_2016'),
]
operations = [
migrations.RunPython(remove_accounts)
]
|
Make sure we delete all payoutaccounts from new projects so that they
will need submit a stripeaccount.# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-19 15:21
from __future__ import unicode_literals
from django.db import migrations
def remove_accounts(apps, schema_editor):
PayoutAccount = apps.get_model('payouts', 'PayoutAccount')
ProjectPhase = apps.get_model('bb_projects', 'ProjectPhase')
phases = ProjectPhase.objects.filter(
slug__in=('plan-needs-work', 'plan-submitted', 'plan-new')
)
PayoutAccount.objects.filter(project__status__in=phases).delete()
class Migration(migrations.Migration):
dependencies = [
('payouts', '0016_auto_20181215_2016'),
]
operations = [
migrations.RunPython(remove_accounts)
]
|
<commit_before><commit_msg>Make sure we delete all payoutaccounts from new projects so that they
will need submit a stripeaccount.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-12-19 15:21
from __future__ import unicode_literals
from django.db import migrations
def remove_accounts(apps, schema_editor):
PayoutAccount = apps.get_model('payouts', 'PayoutAccount')
ProjectPhase = apps.get_model('bb_projects', 'ProjectPhase')
phases = ProjectPhase.objects.filter(
slug__in=('plan-needs-work', 'plan-submitted', 'plan-new')
)
PayoutAccount.objects.filter(project__status__in=phases).delete()
class Migration(migrations.Migration):
dependencies = [
('payouts', '0016_auto_20181215_2016'),
]
operations = [
migrations.RunPython(remove_accounts)
]
|
|
bfba1d3c4c7dce98bb718666e0f3c232a0c4479f
|
mq/plugins/cloudtrailFixup.py
|
mq/plugins/cloudtrailFixup.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings or values to match with an event's dictionary of keys or values
set the priority if you have a preference for order of plugins to run. 0 goes first, 100 is assumed/default if not sent
'''
# get auditd data
self.registration = ['cloudtrail']
self.priority = 2
def onMessage(self, message, metadata):
# Convert apiVersion with the format '2016_01_02'
# to '2016-01-02'
# This is a result of apiVersion mapping being a dateOptionalTime
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313780
if 'apiVersion' in message.keys():
message['apiVersion'] = message['apiVersion'].replace('_', '-')
return (message, metadata)
|
Add apiVersion mapping fix for cloudtrail
|
Add apiVersion mapping fix for cloudtrail
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>
|
Python
|
mpl-2.0
|
ameihm0912/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,mpurzynski/MozDef,mozilla/MozDef,Phrozyn/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,mozilla/MozDef,mozilla/MozDef,Phrozyn/MozDef,Phrozyn/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,ameihm0912/MozDef,jeffbryner/MozDef,Phrozyn/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,gdestuynder/MozDef,mozilla/MozDef,mpurzynski/MozDef,mpurzynski/MozDef
|
Add apiVersion mapping fix for cloudtrail
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings or values to match with an event's dictionary of keys or values
set the priority if you have a preference for order of plugins to run. 0 goes first, 100 is assumed/default if not sent
'''
# get auditd data
self.registration = ['cloudtrail']
self.priority = 2
def onMessage(self, message, metadata):
# Convert apiVersion with the format '2016_01_02'
# to '2016-01-02'
# This is a result of apiVersion mapping being a dateOptionalTime
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313780
if 'apiVersion' in message.keys():
message['apiVersion'] = message['apiVersion'].replace('_', '-')
return (message, metadata)
|
<commit_before><commit_msg>Add apiVersion mapping fix for cloudtrail
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com><commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings or values to match with an event's dictionary of keys or values
set the priority if you have a preference for order of plugins to run. 0 goes first, 100 is assumed/default if not sent
'''
# get auditd data
self.registration = ['cloudtrail']
self.priority = 2
def onMessage(self, message, metadata):
# Convert apiVersion with the format '2016_01_02'
# to '2016-01-02'
# This is a result of apiVersion mapping being a dateOptionalTime
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313780
if 'apiVersion' in message.keys():
message['apiVersion'] = message['apiVersion'].replace('_', '-')
return (message, metadata)
|
Add apiVersion mapping fix for cloudtrail
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings or values to match with an event's dictionary of keys or values
set the priority if you have a preference for order of plugins to run. 0 goes first, 100 is assumed/default if not sent
'''
# get auditd data
self.registration = ['cloudtrail']
self.priority = 2
def onMessage(self, message, metadata):
# Convert apiVersion with the format '2016_01_02'
# to '2016-01-02'
# This is a result of apiVersion mapping being a dateOptionalTime
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313780
if 'apiVersion' in message.keys():
message['apiVersion'] = message['apiVersion'].replace('_', '-')
return (message, metadata)
|
<commit_before><commit_msg>Add apiVersion mapping fix for cloudtrail
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com><commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
class message(object):
def __init__(self):
'''register our criteria for being passed a message
as a list of lower case strings or values to match with an event's dictionary of keys or values
set the priority if you have a preference for order of plugins to run. 0 goes first, 100 is assumed/default if not sent
'''
# get auditd data
self.registration = ['cloudtrail']
self.priority = 2
def onMessage(self, message, metadata):
# Convert apiVersion with the format '2016_01_02'
# to '2016-01-02'
# This is a result of apiVersion mapping being a dateOptionalTime
# https://bugzilla.mozilla.org/show_bug.cgi?id=1313780
if 'apiVersion' in message.keys():
message['apiVersion'] = message['apiVersion'].replace('_', '-')
return (message, metadata)
|
|
2cb89fd366a014c27d5c5f44e35cc34a0eb967cf
|
hotline/db/db_abstract.py
|
hotline/db/db_abstract.py
|
from abc import ABCMeta, abstractmethod
class AbstractClient(metaclass=ABCMeta):
@abstractmethod
def connect(self):
pass
@abstractmethod
def get(self, **kwargs):
pass
@abstractmethod
def set(self, **kwargs):
pass
@abstractmethod
def update(self, **kwargs):
pass
@abstractmethod
def delete(self, **kwargs):
pass
|
Add Abstract class that will inhertied in different database clients
|
Add Abstract class that will inhertied in different database clients
|
Python
|
mit
|
wearhacks/hackathon_hotline
|
Add Abstract class that will inhertied in different database clients
|
from abc import ABCMeta, abstractmethod
class AbstractClient(metaclass=ABCMeta):
@abstractmethod
def connect(self):
pass
@abstractmethod
def get(self, **kwargs):
pass
@abstractmethod
def set(self, **kwargs):
pass
@abstractmethod
def update(self, **kwargs):
pass
@abstractmethod
def delete(self, **kwargs):
pass
|
<commit_before><commit_msg>Add Abstract class that will inhertied in different database clients<commit_after>
|
from abc import ABCMeta, abstractmethod
class AbstractClient(metaclass=ABCMeta):
@abstractmethod
def connect(self):
pass
@abstractmethod
def get(self, **kwargs):
pass
@abstractmethod
def set(self, **kwargs):
pass
@abstractmethod
def update(self, **kwargs):
pass
@abstractmethod
def delete(self, **kwargs):
pass
|
Add Abstract class that will inhertied in different database clientsfrom abc import ABCMeta, abstractmethod
class AbstractClient(metaclass=ABCMeta):
@abstractmethod
def connect(self):
pass
@abstractmethod
def get(self, **kwargs):
pass
@abstractmethod
def set(self, **kwargs):
pass
@abstractmethod
def update(self, **kwargs):
pass
@abstractmethod
def delete(self, **kwargs):
pass
|
<commit_before><commit_msg>Add Abstract class that will inhertied in different database clients<commit_after>from abc import ABCMeta, abstractmethod
class AbstractClient(metaclass=ABCMeta):
@abstractmethod
def connect(self):
pass
@abstractmethod
def get(self, **kwargs):
pass
@abstractmethod
def set(self, **kwargs):
pass
@abstractmethod
def update(self, **kwargs):
pass
@abstractmethod
def delete(self, **kwargs):
pass
|
|
17a9c1154ef41f7b3276be73d255a95c2e616cb0
|
spec/bottling_specs/factory_specs/BottleSingletonAppLoader_specs.py
|
spec/bottling_specs/factory_specs/BottleSingletonAppLoader_specs.py
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
Add loader for singleton apps
|
Add loader for singleton apps
|
Python
|
mit
|
datamora/datamora,datamora/datamora
|
Add loader for singleton apps
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
<commit_before><commit_msg>Add loader for singleton apps<commit_after>
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
Add loader for singleton appsimport fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
<commit_before><commit_msg>Add loader for singleton apps<commit_after>import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
|
f60aa33b10268394b66e88b2b262b6ed821f05a6
|
CodeFights/pagesNumberingWithInk.py
|
CodeFights/pagesNumberingWithInk.py
|
#!/usr/local/bin/python
# Code Fights Pages Numbering With Ink Problem
import math
def pagesNumberingWithInk(current, numberOfDigits):
num = current
digits = (int(math.log(num, 10)) + 1)
available = numberOfDigits - digits
while available >= digits:
digits = int(math.log(num + 1, 10)) + 1
if digits <= available:
num += 1
available -= digits
else:
return num
return num
def main():
tests = [
[1, 5, 5],
[21, 5, 22],
[8, 4, 10],
[21, 6, 23],
[76, 250, 166],
[80, 1000, 419]
]
for t in tests:
res = pagesNumberingWithInk(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pagesNumberingWithInk({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pagesNumberingWithInk({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights pages numbering with ink problem
|
Solve Code Fights pages numbering with ink problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights pages numbering with ink problem
|
#!/usr/local/bin/python
# Code Fights Pages Numbering With Ink Problem
import math
def pagesNumberingWithInk(current, numberOfDigits):
num = current
digits = (int(math.log(num, 10)) + 1)
available = numberOfDigits - digits
while available >= digits:
digits = int(math.log(num + 1, 10)) + 1
if digits <= available:
num += 1
available -= digits
else:
return num
return num
def main():
tests = [
[1, 5, 5],
[21, 5, 22],
[8, 4, 10],
[21, 6, 23],
[76, 250, 166],
[80, 1000, 419]
]
for t in tests:
res = pagesNumberingWithInk(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pagesNumberingWithInk({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pagesNumberingWithInk({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights pages numbering with ink problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Pages Numbering With Ink Problem
import math
def pagesNumberingWithInk(current, numberOfDigits):
num = current
digits = (int(math.log(num, 10)) + 1)
available = numberOfDigits - digits
while available >= digits:
digits = int(math.log(num + 1, 10)) + 1
if digits <= available:
num += 1
available -= digits
else:
return num
return num
def main():
tests = [
[1, 5, 5],
[21, 5, 22],
[8, 4, 10],
[21, 6, 23],
[76, 250, 166],
[80, 1000, 419]
]
for t in tests:
res = pagesNumberingWithInk(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pagesNumberingWithInk({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pagesNumberingWithInk({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights pages numbering with ink problem#!/usr/local/bin/python
# Code Fights Pages Numbering With Ink Problem
import math
def pagesNumberingWithInk(current, numberOfDigits):
num = current
digits = (int(math.log(num, 10)) + 1)
available = numberOfDigits - digits
while available >= digits:
digits = int(math.log(num + 1, 10)) + 1
if digits <= available:
num += 1
available -= digits
else:
return num
return num
def main():
tests = [
[1, 5, 5],
[21, 5, 22],
[8, 4, 10],
[21, 6, 23],
[76, 250, 166],
[80, 1000, 419]
]
for t in tests:
res = pagesNumberingWithInk(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pagesNumberingWithInk({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pagesNumberingWithInk({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights pages numbering with ink problem<commit_after>#!/usr/local/bin/python
# Code Fights Pages Numbering With Ink Problem
import math
def pagesNumberingWithInk(current, numberOfDigits):
num = current
digits = (int(math.log(num, 10)) + 1)
available = numberOfDigits - digits
while available >= digits:
digits = int(math.log(num + 1, 10)) + 1
if digits <= available:
num += 1
available -= digits
else:
return num
return num
def main():
tests = [
[1, 5, 5],
[21, 5, 22],
[8, 4, 10],
[21, 6, 23],
[76, 250, 166],
[80, 1000, 419]
]
for t in tests:
res = pagesNumberingWithInk(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pagesNumberingWithInk({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pagesNumberingWithInk({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
76578869caaa79f7958ace74eeab82c9af9f4207
|
metaci/cumulusci/migrations/0009_remove_org_management_group.py
|
metaci/cumulusci/migrations/0009_remove_org_management_group.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-19 14:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cumulusci', '0008_org_management_group'),
]
operations = [
migrations.RemoveField(
model_name='org',
name='management_group',
),
]
|
Remove Org.management_group since org access is now controlled through the org_login perm on PlanRepository objects via guardian.
|
Remove Org.management_group since org access is now controlled through
the org_login perm on PlanRepository objects via guardian.
|
Python
|
bsd-3-clause
|
SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci,SalesforceFoundation/mrbelvedereci
|
Remove Org.management_group since org access is now controlled through
the org_login perm on PlanRepository objects via guardian.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-19 14:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cumulusci', '0008_org_management_group'),
]
operations = [
migrations.RemoveField(
model_name='org',
name='management_group',
),
]
|
<commit_before><commit_msg>Remove Org.management_group since org access is now controlled through
the org_login perm on PlanRepository objects via guardian.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-19 14:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cumulusci', '0008_org_management_group'),
]
operations = [
migrations.RemoveField(
model_name='org',
name='management_group',
),
]
|
Remove Org.management_group since org access is now controlled through
the org_login perm on PlanRepository objects via guardian.# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-19 14:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cumulusci', '0008_org_management_group'),
]
operations = [
migrations.RemoveField(
model_name='org',
name='management_group',
),
]
|
<commit_before><commit_msg>Remove Org.management_group since org access is now controlled through
the org_login perm on PlanRepository objects via guardian.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-10-19 14:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cumulusci', '0008_org_management_group'),
]
operations = [
migrations.RemoveField(
model_name='org',
name='management_group',
),
]
|
|
39b1ce81adedbff4099e2931d7379bc1281ee4b2
|
scripts/make_ast_classes.py
|
scripts/make_ast_classes.py
|
"""Build src/py_ast.js from the documentation of Python ast module."""
import os
import re
import json
import urllib.request
ast_url = "https://raw.githubusercontent.com/python/cpython/main/Doc/library/ast.rst"
f = urllib.request.urlopen(ast_url)
classes = {}
def add_class(line):
line = line[len(starter):].strip()
ix = line.find('(')
if ix > -1:
name = line[:ix]
args = line[ix:][1:-1]
else:
name, args = line, ''
classes[name] = args
kl = False
starter = '.. class:: '
for line in f:
line = line.decode('utf-8')
if line.startswith(starter):
add_class(line)
kl = True
elif kl and line.startswith(' ' * len(starter)):
add_class(line)
else:
kl = False
keys = sorted(list(classes))
lines = []
for key in keys:
lines.append(f"{key}:'{classes[key]}'".replace(' ', ''))
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_ast.js'), 'w', encoding='utf-8') as out:
out.write('__BRYTHON__.ast_classes = {\n' + ',\n'.join(lines) + '\n}\n')
|
Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)
|
Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)
|
Python
|
bsd-3-clause
|
brython-dev/brython,brython-dev/brython,brython-dev/brython
|
Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)
|
"""Build src/py_ast.js from the documentation of Python ast module."""
import os
import re
import json
import urllib.request
ast_url = "https://raw.githubusercontent.com/python/cpython/main/Doc/library/ast.rst"
f = urllib.request.urlopen(ast_url)
classes = {}
def add_class(line):
line = line[len(starter):].strip()
ix = line.find('(')
if ix > -1:
name = line[:ix]
args = line[ix:][1:-1]
else:
name, args = line, ''
classes[name] = args
kl = False
starter = '.. class:: '
for line in f:
line = line.decode('utf-8')
if line.startswith(starter):
add_class(line)
kl = True
elif kl and line.startswith(' ' * len(starter)):
add_class(line)
else:
kl = False
keys = sorted(list(classes))
lines = []
for key in keys:
lines.append(f"{key}:'{classes[key]}'".replace(' ', ''))
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_ast.js'), 'w', encoding='utf-8') as out:
out.write('__BRYTHON__.ast_classes = {\n' + ',\n'.join(lines) + '\n}\n')
|
<commit_before><commit_msg>Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)<commit_after>
|
"""Build src/py_ast.js from the documentation of Python ast module."""
import os
import re
import json
import urllib.request
ast_url = "https://raw.githubusercontent.com/python/cpython/main/Doc/library/ast.rst"
f = urllib.request.urlopen(ast_url)
classes = {}
def add_class(line):
line = line[len(starter):].strip()
ix = line.find('(')
if ix > -1:
name = line[:ix]
args = line[ix:][1:-1]
else:
name, args = line, ''
classes[name] = args
kl = False
starter = '.. class:: '
for line in f:
line = line.decode('utf-8')
if line.startswith(starter):
add_class(line)
kl = True
elif kl and line.startswith(' ' * len(starter)):
add_class(line)
else:
kl = False
keys = sorted(list(classes))
lines = []
for key in keys:
lines.append(f"{key}:'{classes[key]}'".replace(' ', ''))
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_ast.js'), 'w', encoding='utf-8') as out:
out.write('__BRYTHON__.ast_classes = {\n' + ',\n'.join(lines) + '\n}\n')
|
Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)"""Build src/py_ast.js from the documentation of Python ast module."""
import os
import re
import json
import urllib.request
ast_url = "https://raw.githubusercontent.com/python/cpython/main/Doc/library/ast.rst"
f = urllib.request.urlopen(ast_url)
classes = {}
def add_class(line):
line = line[len(starter):].strip()
ix = line.find('(')
if ix > -1:
name = line[:ix]
args = line[ix:][1:-1]
else:
name, args = line, ''
classes[name] = args
kl = False
starter = '.. class:: '
for line in f:
line = line.decode('utf-8')
if line.startswith(starter):
add_class(line)
kl = True
elif kl and line.startswith(' ' * len(starter)):
add_class(line)
else:
kl = False
keys = sorted(list(classes))
lines = []
for key in keys:
lines.append(f"{key}:'{classes[key]}'".replace(' ', ''))
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_ast.js'), 'w', encoding='utf-8') as out:
out.write('__BRYTHON__.ast_classes = {\n' + ',\n'.join(lines) + '\n}\n')
|
<commit_before><commit_msg>Add script to generate src/py_ast.js (classes for Python Abstract Syntax Tree)<commit_after>"""Build src/py_ast.js from the documentation of Python ast module."""
import os
import re
import json
import urllib.request
ast_url = "https://raw.githubusercontent.com/python/cpython/main/Doc/library/ast.rst"
f = urllib.request.urlopen(ast_url)
classes = {}
def add_class(line):
line = line[len(starter):].strip()
ix = line.find('(')
if ix > -1:
name = line[:ix]
args = line[ix:][1:-1]
else:
name, args = line, ''
classes[name] = args
kl = False
starter = '.. class:: '
for line in f:
line = line.decode('utf-8')
if line.startswith(starter):
add_class(line)
kl = True
elif kl and line.startswith(' ' * len(starter)):
add_class(line)
else:
kl = False
keys = sorted(list(classes))
lines = []
for key in keys:
lines.append(f"{key}:'{classes[key]}'".replace(' ', ''))
dest_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src")
with open(os.path.join(dest_dir, 'py_ast.js'), 'w', encoding='utf-8') as out:
out.write('__BRYTHON__.ast_classes = {\n' + ',\n'.join(lines) + '\n}\n')
|
|
2fbd3cc8d903aff06588d9ed74edece8b0ecc41f
|
python/opencv/opencv_2/image_precessing/changing_colorspaces.py
|
python/opencv/opencv_2/image_precessing/changing_colorspaces.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Changing colorspaces: convert images from one color-space to another
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 1
video_capture = cv.VideoCapture(device_number)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frame (Gray)
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', img_gray)
# Display the resulting frame (HSV)
img_hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
cv.imshow('HSV', img_hsv)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).
|
Add a snippet (Python OpenCV).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python OpenCV).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Changing colorspaces: convert images from one color-space to another
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 1
video_capture = cv.VideoCapture(device_number)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frame (Gray)
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', img_gray)
# Display the resulting frame (HSV)
img_hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
cv.imshow('HSV', img_hsv)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Changing colorspaces: convert images from one color-space to another
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 1
video_capture = cv.VideoCapture(device_number)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frame (Gray)
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', img_gray)
# Display the resulting frame (HSV)
img_hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
cv.imshow('HSV', img_hsv)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Changing colorspaces: convert images from one color-space to another
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 1
video_capture = cv.VideoCapture(device_number)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frame (Gray)
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', img_gray)
# Display the resulting frame (HSV)
img_hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
cv.imshow('HSV', img_hsv)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Changing colorspaces: convert images from one color-space to another
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#converting-colorspaces
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 1
video_capture = cv.VideoCapture(device_number)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frame (Gray)
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', img_gray)
# Display the resulting frame (HSV)
img_hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
cv.imshow('HSV', img_hsv)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
|
ff246189bcf5168222ff1231ec1028d2c4dd182c
|
simple_text_editor/kevin.py
|
simple_text_editor/kevin.py
|
#!/usr/bin/python3
# Kevin Boyette
# 1/11/2017
class Stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def is_empty(self):
return self.stack == []
def push(self, element):
self.stack.append(element)
def pop(self):
return self.stack.pop()
class TextEditor(object):
def __init__(self):
'''
Class TextEditor
Memebers:
Input is a string
Operation Stack is a stack of tuples,
( <operation number, <appended/deleted string> )
'''
self.input = '';
self.operation_stack = Stack()
def append(self, x: str):
self.input += x
self.operation_stack.push( (1, x) )
def delete(self, k: int):
self.operation_stack.push((2, self.input[-k:]))
self.input = self.input[:-k]
def print(self,k: int):
print(self.input[:k])
def undo(self):
if len(self.operation_stack) == 0:
return
# Operation can be either 1 or 2
# 1 == APPEND
# 2 == DELETE
operation, items = self.operation_stack.pop()
if operation == 1:
self.delete(len(items))
else:
self.append(items)
if __name__ == '__main__':
q = int(input())
textEditor = TextEditor()
for each_iteration in range(q):
line = input()
operation = int(line[0])
if operation == 1:
textEditor.append(line.split()[1])
elif operation == 2:
textEditor.delete(int(line.split()[1]))
elif operation == 3:
textEditor.print(int(line.split()[1]))
else:
textEditor.undo()
|
Add Kevin's take on the TextEditor Problem
|
Add Kevin's take on the TextEditor Problem
|
Python
|
mit
|
PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank,PlattsSEC/HackerRank
|
Add Kevin's take on the TextEditor Problem
|
#!/usr/bin/python3
# Kevin Boyette
# 1/11/2017
class Stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def is_empty(self):
return self.stack == []
def push(self, element):
self.stack.append(element)
def pop(self):
return self.stack.pop()
class TextEditor(object):
def __init__(self):
'''
Class TextEditor
Memebers:
Input is a string
Operation Stack is a stack of tuples,
( <operation number, <appended/deleted string> )
'''
self.input = '';
self.operation_stack = Stack()
def append(self, x: str):
self.input += x
self.operation_stack.push( (1, x) )
def delete(self, k: int):
self.operation_stack.push((2, self.input[-k:]))
self.input = self.input[:-k]
def print(self,k: int):
print(self.input[:k])
def undo(self):
if len(self.operation_stack) == 0:
return
# Operation can be either 1 or 2
# 1 == APPEND
# 2 == DELETE
operation, items = self.operation_stack.pop()
if operation == 1:
self.delete(len(items))
else:
self.append(items)
if __name__ == '__main__':
q = int(input())
textEditor = TextEditor()
for each_iteration in range(q):
line = input()
operation = int(line[0])
if operation == 1:
textEditor.append(line.split()[1])
elif operation == 2:
textEditor.delete(int(line.split()[1]))
elif operation == 3:
textEditor.print(int(line.split()[1]))
else:
textEditor.undo()
|
<commit_before><commit_msg>Add Kevin's take on the TextEditor Problem<commit_after>
|
#!/usr/bin/python3
# Kevin Boyette
# 1/11/2017
class Stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def is_empty(self):
return self.stack == []
def push(self, element):
self.stack.append(element)
def pop(self):
return self.stack.pop()
class TextEditor(object):
def __init__(self):
'''
Class TextEditor
Memebers:
Input is a string
Operation Stack is a stack of tuples,
( <operation number, <appended/deleted string> )
'''
self.input = '';
self.operation_stack = Stack()
def append(self, x: str):
self.input += x
self.operation_stack.push( (1, x) )
def delete(self, k: int):
self.operation_stack.push((2, self.input[-k:]))
self.input = self.input[:-k]
def print(self,k: int):
print(self.input[:k])
def undo(self):
if len(self.operation_stack) == 0:
return
# Operation can be either 1 or 2
# 1 == APPEND
# 2 == DELETE
operation, items = self.operation_stack.pop()
if operation == 1:
self.delete(len(items))
else:
self.append(items)
if __name__ == '__main__':
q = int(input())
textEditor = TextEditor()
for each_iteration in range(q):
line = input()
operation = int(line[0])
if operation == 1:
textEditor.append(line.split()[1])
elif operation == 2:
textEditor.delete(int(line.split()[1]))
elif operation == 3:
textEditor.print(int(line.split()[1]))
else:
textEditor.undo()
|
Add Kevin's take on the TextEditor Problem#!/usr/bin/python3
# Kevin Boyette
# 1/11/2017
class Stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def is_empty(self):
return self.stack == []
def push(self, element):
self.stack.append(element)
def pop(self):
return self.stack.pop()
class TextEditor(object):
def __init__(self):
'''
Class TextEditor
Memebers:
Input is a string
Operation Stack is a stack of tuples,
( <operation number, <appended/deleted string> )
'''
self.input = '';
self.operation_stack = Stack()
def append(self, x: str):
self.input += x
self.operation_stack.push( (1, x) )
def delete(self, k: int):
self.operation_stack.push((2, self.input[-k:]))
self.input = self.input[:-k]
def print(self,k: int):
print(self.input[:k])
def undo(self):
if len(self.operation_stack) == 0:
return
# Operation can be either 1 or 2
# 1 == APPEND
# 2 == DELETE
operation, items = self.operation_stack.pop()
if operation == 1:
self.delete(len(items))
else:
self.append(items)
if __name__ == '__main__':
q = int(input())
textEditor = TextEditor()
for each_iteration in range(q):
line = input()
operation = int(line[0])
if operation == 1:
textEditor.append(line.split()[1])
elif operation == 2:
textEditor.delete(int(line.split()[1]))
elif operation == 3:
textEditor.print(int(line.split()[1]))
else:
textEditor.undo()
|
<commit_before><commit_msg>Add Kevin's take on the TextEditor Problem<commit_after>#!/usr/bin/python3
# Kevin Boyette
# 1/11/2017
class Stack(object):
def __init__(self):
self.stack = []
def __str__(self):
return str(self.stack)
def __len__(self):
return len(self.stack)
def is_empty(self):
return self.stack == []
def push(self, element):
self.stack.append(element)
def pop(self):
return self.stack.pop()
class TextEditor(object):
def __init__(self):
'''
Class TextEditor
Memebers:
Input is a string
Operation Stack is a stack of tuples,
( <operation number, <appended/deleted string> )
'''
self.input = '';
self.operation_stack = Stack()
def append(self, x: str):
self.input += x
self.operation_stack.push( (1, x) )
def delete(self, k: int):
self.operation_stack.push((2, self.input[-k:]))
self.input = self.input[:-k]
def print(self,k: int):
print(self.input[:k])
def undo(self):
if len(self.operation_stack) == 0:
return
# Operation can be either 1 or 2
# 1 == APPEND
# 2 == DELETE
operation, items = self.operation_stack.pop()
if operation == 1:
self.delete(len(items))
else:
self.append(items)
if __name__ == '__main__':
q = int(input())
textEditor = TextEditor()
for each_iteration in range(q):
line = input()
operation = int(line[0])
if operation == 1:
textEditor.append(line.split()[1])
elif operation == 2:
textEditor.delete(int(line.split()[1]))
elif operation == 3:
textEditor.print(int(line.split()[1]))
else:
textEditor.undo()
|
|
3189a4db76c4ca49d1a51d1df05eb6b7bee0d817
|
scripts/make_routes_json.py
|
scripts/make_routes_json.py
|
#!/usr/bin/env python
import os
import sys
import json
SCRIPTS_DIR = '/usr/src/scripts'
BUS_ROUTES_FILE = os.path.join(SCRIPTS_DIR, 'routes.txt')
result = []
with open(BUS_ROUTES_FILE, 'r') as fp:
for line in fp:
route_num = line.strip()
result.append(route_num)
json.dump(result, sys.stdout)
|
Create helper script to generate bus routes JSON
|
Create helper script to generate bus routes JSON
|
Python
|
mit
|
kdeloach/septa-viz,kdeloach/septa-viz,kdeloach/septa-viz,kdeloach/septa-viz
|
Create helper script to generate bus routes JSON
|
#!/usr/bin/env python
import os
import sys
import json
SCRIPTS_DIR = '/usr/src/scripts'
BUS_ROUTES_FILE = os.path.join(SCRIPTS_DIR, 'routes.txt')
result = []
with open(BUS_ROUTES_FILE, 'r') as fp:
for line in fp:
route_num = line.strip()
result.append(route_num)
json.dump(result, sys.stdout)
|
<commit_before><commit_msg>Create helper script to generate bus routes JSON<commit_after>
|
#!/usr/bin/env python
import os
import sys
import json
SCRIPTS_DIR = '/usr/src/scripts'
BUS_ROUTES_FILE = os.path.join(SCRIPTS_DIR, 'routes.txt')
result = []
with open(BUS_ROUTES_FILE, 'r') as fp:
for line in fp:
route_num = line.strip()
result.append(route_num)
json.dump(result, sys.stdout)
|
Create helper script to generate bus routes JSON#!/usr/bin/env python
import os
import sys
import json
SCRIPTS_DIR = '/usr/src/scripts'
BUS_ROUTES_FILE = os.path.join(SCRIPTS_DIR, 'routes.txt')
result = []
with open(BUS_ROUTES_FILE, 'r') as fp:
for line in fp:
route_num = line.strip()
result.append(route_num)
json.dump(result, sys.stdout)
|
<commit_before><commit_msg>Create helper script to generate bus routes JSON<commit_after>#!/usr/bin/env python
import os
import sys
import json
SCRIPTS_DIR = '/usr/src/scripts'
BUS_ROUTES_FILE = os.path.join(SCRIPTS_DIR, 'routes.txt')
result = []
with open(BUS_ROUTES_FILE, 'r') as fp:
for line in fp:
route_num = line.strip()
result.append(route_num)
json.dump(result, sys.stdout)
|
|
8351814c8eb645b50f92a498cb25d6af349d9a27
|
scripts/upload_2_crowdai.py
|
scripts/upload_2_crowdai.py
|
#!/usr/bin/env python
try:
import crowdai
except:
raise Exception("Please install the `crowdai` python client by : pip install crowdai")
import argparse
parser = argparse.ArgumentParser(description='Upload saved docker environments to crowdai for grading')
parser.add_argument('--api_key', dest='api_key', action='store', required=True)
parser.add_argument('--docker_container', dest='docker_container', action='store', required=True)
args = parser.parse_args()
challenge = crowdai.Challenge("Learning2RunChallengeNIPS2017", args.api_key)
result = challenge.submit(docker_container)
print(result)
|
Add a sample uploader script
|
Add a sample uploader script
|
Python
|
mit
|
vzhuang/osim-rl,stanfordnmbl/osim-rl
|
Add a sample uploader script
|
#!/usr/bin/env python
try:
import crowdai
except:
raise Exception("Please install the `crowdai` python client by : pip install crowdai")
import argparse
parser = argparse.ArgumentParser(description='Upload saved docker environments to crowdai for grading')
parser.add_argument('--api_key', dest='api_key', action='store', required=True)
parser.add_argument('--docker_container', dest='docker_container', action='store', required=True)
args = parser.parse_args()
challenge = crowdai.Challenge("Learning2RunChallengeNIPS2017", args.api_key)
result = challenge.submit(docker_container)
print(result)
|
<commit_before><commit_msg>Add a sample uploader script<commit_after>
|
#!/usr/bin/env python
try:
import crowdai
except:
raise Exception("Please install the `crowdai` python client by : pip install crowdai")
import argparse
parser = argparse.ArgumentParser(description='Upload saved docker environments to crowdai for grading')
parser.add_argument('--api_key', dest='api_key', action='store', required=True)
parser.add_argument('--docker_container', dest='docker_container', action='store', required=True)
args = parser.parse_args()
challenge = crowdai.Challenge("Learning2RunChallengeNIPS2017", args.api_key)
result = challenge.submit(docker_container)
print(result)
|
Add a sample uploader script#!/usr/bin/env python
try:
import crowdai
except:
raise Exception("Please install the `crowdai` python client by : pip install crowdai")
import argparse
parser = argparse.ArgumentParser(description='Upload saved docker environments to crowdai for grading')
parser.add_argument('--api_key', dest='api_key', action='store', required=True)
parser.add_argument('--docker_container', dest='docker_container', action='store', required=True)
args = parser.parse_args()
challenge = crowdai.Challenge("Learning2RunChallengeNIPS2017", args.api_key)
result = challenge.submit(docker_container)
print(result)
|
<commit_before><commit_msg>Add a sample uploader script<commit_after>#!/usr/bin/env python
try:
import crowdai
except:
raise Exception("Please install the `crowdai` python client by : pip install crowdai")
import argparse
parser = argparse.ArgumentParser(description='Upload saved docker environments to crowdai for grading')
parser.add_argument('--api_key', dest='api_key', action='store', required=True)
parser.add_argument('--docker_container', dest='docker_container', action='store', required=True)
args = parser.parse_args()
challenge = crowdai.Challenge("Learning2RunChallengeNIPS2017", args.api_key)
result = challenge.submit(docker_container)
print(result)
|
|
2d4431d7bc6b7ab362ce2cf084b901be45c51c2f
|
quantum/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
|
quantum/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 48b6f43f7471
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '48b6f43f7471'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
Add migration for network bindings in NVP plugin
|
Add migration for network bindings in NVP plugin
Bug 1099895
Ensures the table nvp_network_bindings is created when upgrading
database to head, by adding an appropriate alembic migration
Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75
|
Python
|
apache-2.0
|
blueboxgroup/neutron,vbannai/neutron,swdream/neutron,mattt416/neutron,ykaneko/quantum,yamahata/neutron,apporc/neutron,vivekanand1101/neutron,openstack/neutron,mattt416/neutron,mandeepdhami/neutron,Comcast/neutron,miyakz1192/neutron,dims/neutron,vbannai/neutron,JianyuWang/neutron,Brocade-OpenSource/OpenStack-DNRM-Neutron,SamYaple/neutron,Metaswitch/calico-neutron,tpaszkowski/quantum,bgxavier/neutron,antonioUnina/neutron,JioCloud/neutron,liqin75/vse-vpnaas-plugin,magic0704/neutron,yuewko/neutron,citrix-openstack-build/neutron,JioCloud/neutron,ykaneko/neutron,huntxu/neutron,yamahata/neutron,wolverineav/neutron,jacknjzhou/neutron,vijayendrabvs/hap,CiscoSystems/neutron,MaximNevrov/neutron,sajuptpm/neutron-ipam,rickerc/neutron_audit,rossella/neutron,rickerc/neutron_audit,vveerava/Openstack,takeshineshiro/neutron,klmitch/neutron,mahak/neutron,ykaneko/quantum,mmnelemane/neutron,leeseuljeong/leeseulstack_neutron,cisco-openstack/neutron,liqin75/vse-vpnaas-plugin,kaiweifan/neutron,virtualopensystems/neutron,yamt/neutron,infobloxopen/neutron,gkotton/neutron,sasukeh/neutron,beagles/neutron_hacking,yamahata/tacker,bgxavier/neutron,Stavitsky/neutron,takeshineshiro/neutron,citrix-openstack-build/neutron,Juniper/neutron,Juniper/contrail-dev-neutron,redhat-openstack/neutron,neoareslinux/neutron,wolverineav/neutron,sajuptpm/neutron-ipam,javaos74/neutron,rossella/neutron,armando-migliaccio/neutron,gkotton/neutron,jerryz1982/neutron,armando-migliaccio/neutron,glove747/liberty-neutron,antonioUnina/neutron,watonyweng/neutron,magic0704/neutron,jacknjzhou/neutron,rossella/neutron,vveerava/Openstack,openstack/neutron,CiscoSystems/quantum,sasukeh/neutron,silenci/neutron,openstack/neutron,ykaneko/quantum,beagles/neutron_hacking,kaiweifan/neutron,eayunstack/neutron,blueboxgroup/neutron,cernops/neutron,chitr/neutron,redhat-openstack/neutron,jumpojoy/neutron,javaos74/neutron,igor-toga/local-snat,Metaswitch/calico-neutron,beagles/neutron_hacking,zhhf/charging,yanheven/neutron,vivekanand1101/neutron,waltBB/neutron_read,yamahata/neutron,oeeagle/quantum,alexandrucoman/vbox-neutron-agent,gopal1cloud/neutron,igor-toga/local-snat,mandeepdhami/neutron,paninetworks/neutron,dims/neutron,aristanetworks/neutron,tpaszkowski/quantum,sebrandon1/neutron,alexandrucoman/vbox-neutron-agent,aristanetworks/neutron,asgard-lab/neutron,leeseuljeong/leeseulstack_neutron,leeseulstack/openstack,rdo-management/neutron,CiscoSystems/vespa,zhhf/charging,neoareslinux/neutron,mmnelemane/neutron,oeeagle/quantum,CiscoSystems/neutron,noironetworks/neutron,yuewko/neutron,Juniper/neutron,yamt/neutron,shahbazn/neutron,pnavarro/neutron,Brocade-OpenSource/OpenStack-DNRM-Neutron,SamYaple/neutron,NeCTAR-RC/neutron,yamt/neutron,apporc/neutron,Juniper/contrail-dev-neutron,swdream/neutron,ykaneko/neutron,yamahata/tacker,sebrandon1/neutron,barnsnake351/neutron,vveerava/Openstack,wenhuizhang/neutron,netscaler/neutron,virtualopensystems/neutron,Comcast/neutron,cernops/neutron,cloudbase/neutron-virtualbox,CiscoSystems/neutron,liqin75/vse-vpnaas-plugin,liqin75/vse-vpnaas-plugin,shahbazn/neutron,armando-migliaccio/neutron,suneeth51/neutron,dhanunjaya/neutron,barnsnake351/neutron,silenci/neutron,citrix-openstack-build/neutron,yamahata/tacker,Brocade-OpenSource/OpenStack-DNRM-Neutron,ykaneko/neutron,jerryz1982/neutron,vijayendrabvs/hap,miyakz1192/neutron,dhanunjaya/neutron,asgard-lab/neutron,pnavarro/neutron,kaiweifan/vse-lbaas-plugin-poc,CiscoSystems/vespa,eayunstack/neutron,jumpojoy/neutron,CiscoSystems/quantum,rickerc/neutron_audit,ntt-sic/neutron,NeCTAR-RC/neutron,rdo-management/neutron,wenhuizhang/neutron,leeseulstack/openstack,armando-migliaccio/neutron,tpaszkowski/quantum,mahak/neutron,vijayendrabvs/ssl-neutron,noironetworks/neutron,projectcalico/calico-neutron,Stavitsky/neutron,vbannai/neutron,kaiweifan/vse-lbaas-plugin-poc,vijayendrabvs/ssl-neutron,netscaler/neutron,infobloxopen/neutron,adelina-t/neutron,glove747/liberty-neutron,rossella/neutron,ntt-sic/neutron,leeseuljeong/leeseulstack_neutron,JianyuWang/neutron,cloudbase/neutron,suneeth51/neutron,gopal1cloud/neutron,kaiweifan/neutron,sajuptpm/neutron-ipam,chitr/neutron,CiscoSystems/quantum,skyddv/neutron,virtualopensystems/neutron,vijayendrabvs/hap,cloudbase/neutron,klmitch/neutron,kaiweifan/vse-lbaas-plugin-poc,adelina-t/neutron,zhhf/charging,ykaneko/quantum,SmartInfrastructures/neutron,vijayendrabvs/ssl-neutron,bigswitch/neutron,tpaszkowski/quantum,CiscoSystems/vespa,blueboxgroup/neutron,SmartInfrastructures/neutron,bigswitch/neutron,kaiweifan/vse-lbaas-plugin-poc,mahak/neutron,ntt-sic/neutron,waltBB/neutron_read,Juniper/contrail-dev-neutron,leeseulstack/openstack,gkotton/neutron,paninetworks/neutron,Comcast/neutron,projectcalico/calico-neutron,eonpatapon/neutron,cisco-openstack/neutron,huntxu/neutron,watonyweng/neutron,netscaler/neutron,skyddv/neutron,Juniper/neutron,eonpatapon/neutron,yamt/neutron,MaximNevrov/neutron,cloudbase/neutron-virtualbox,yanheven/neutron
|
Add migration for network bindings in NVP plugin
Bug 1099895
Ensures the table nvp_network_bindings is created when upgrading
database to head, by adding an appropriate alembic migration
Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 48b6f43f7471
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '48b6f43f7471'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
<commit_before><commit_msg>Add migration for network bindings in NVP plugin
Bug 1099895
Ensures the table nvp_network_bindings is created when upgrading
database to head, by adding an appropriate alembic migration
Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 48b6f43f7471
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '48b6f43f7471'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
Add migration for network bindings in NVP plugin
Bug 1099895
Ensures the table nvp_network_bindings is created when upgrading
database to head, by adding an appropriate alembic migration
Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 48b6f43f7471
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '48b6f43f7471'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
<commit_before><commit_msg>Add migration for network bindings in NVP plugin
Bug 1099895
Ensures the table nvp_network_bindings is created when upgrading
database to head, by adding an appropriate alembic migration
Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nvp_netbinding
Revision ID: 1d76643bcec4
Revises: 48b6f43f7471
Create Date: 2013-01-15 07:36:10.024346
"""
# revision identifiers, used by Alembic.
revision = '1d76643bcec4'
down_revision = '48b6f43f7471'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'nvp_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'),
nullable=False),
sa.Column('tz_uuid', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('nvp_network_bindings')
|
|
0ff0e770babe4c1e1d07f5b8f0722774d5bcb2b0
|
benchexec/tools/kissat.py
|
benchexec/tools/kissat.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Kissat SAT Solver.
URL: http://fmv.jku.at/kissat/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("kissat", subdir="build")
def name(self):
return "Kissat"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
@return: status of Kissat after executing a run
"""
status = None
for line in run.output:
if "s SATISFIABLE" in line:
status = "SAT"
elif "s UNSATISFIABLE" in line:
status = "UNSAT"
if (not status or status == result.RESULT_UNKNOWN) and run.was_timeout:
status = "TIMEOUT"
if not status:
status = result.RESULT_ERROR
return status
|
Add a tool-info module for Kissat SAT solver
|
Add a tool-info module for Kissat SAT solver
|
Python
|
apache-2.0
|
sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec
|
Add a tool-info module for Kissat SAT solver
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Kissat SAT Solver.
URL: http://fmv.jku.at/kissat/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("kissat", subdir="build")
def name(self):
return "Kissat"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
@return: status of Kissat after executing a run
"""
status = None
for line in run.output:
if "s SATISFIABLE" in line:
status = "SAT"
elif "s UNSATISFIABLE" in line:
status = "UNSAT"
if (not status or status == result.RESULT_UNKNOWN) and run.was_timeout:
status = "TIMEOUT"
if not status:
status = result.RESULT_ERROR
return status
|
<commit_before><commit_msg>Add a tool-info module for Kissat SAT solver<commit_after>
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Kissat SAT Solver.
URL: http://fmv.jku.at/kissat/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("kissat", subdir="build")
def name(self):
return "Kissat"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
@return: status of Kissat after executing a run
"""
status = None
for line in run.output:
if "s SATISFIABLE" in line:
status = "SAT"
elif "s UNSATISFIABLE" in line:
status = "UNSAT"
if (not status or status == result.RESULT_UNKNOWN) and run.was_timeout:
status = "TIMEOUT"
if not status:
status = result.RESULT_ERROR
return status
|
Add a tool-info module for Kissat SAT solver# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Kissat SAT Solver.
URL: http://fmv.jku.at/kissat/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("kissat", subdir="build")
def name(self):
return "Kissat"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
@return: status of Kissat after executing a run
"""
status = None
for line in run.output:
if "s SATISFIABLE" in line:
status = "SAT"
elif "s UNSATISFIABLE" in line:
status = "UNSAT"
if (not status or status == result.RESULT_UNKNOWN) and run.was_timeout:
status = "TIMEOUT"
if not status:
status = result.RESULT_ERROR
return status
|
<commit_before><commit_msg>Add a tool-info module for Kissat SAT solver<commit_after># This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Kissat SAT Solver.
URL: http://fmv.jku.at/kissat/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("kissat", subdir="build")
def name(self):
return "Kissat"
def version(self, executable):
return self._version_from_tool(executable)
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + list(task.input_files_or_identifier)
def determine_result(self, run):
"""
@return: status of Kissat after executing a run
"""
status = None
for line in run.output:
if "s SATISFIABLE" in line:
status = "SAT"
elif "s UNSATISFIABLE" in line:
status = "UNSAT"
if (not status or status == result.RESULT_UNKNOWN) and run.was_timeout:
status = "TIMEOUT"
if not status:
status = result.RESULT_ERROR
return status
|
|
0c065daa5eef7868cf1825c247cf9628907b86a0
|
tests/test_vector2_isclose.py
|
tests/test_vector2_isclose.py
|
from ppb_vector import Vector2
from utils import vectors
from hypothesis import assume, given, note, example
from hypothesis.strategies import floats
@given(x=vectors(), abs_tol=floats(min_value=0), rel_tol=floats(min_value=0))
def test_isclose_to_self(x, abs_tol, rel_tol):
assert x.isclose(x, abs_tol=abs_tol, rel_tol=rel_tol)
|
Add a test for Vector2.isclose
|
Add a test for Vector2.isclose
|
Python
|
artistic-2.0
|
ppb/ppb-vector,ppb/ppb-vector
|
Add a test for Vector2.isclose
|
from ppb_vector import Vector2
from utils import vectors
from hypothesis import assume, given, note, example
from hypothesis.strategies import floats
@given(x=vectors(), abs_tol=floats(min_value=0), rel_tol=floats(min_value=0))
def test_isclose_to_self(x, abs_tol, rel_tol):
assert x.isclose(x, abs_tol=abs_tol, rel_tol=rel_tol)
|
<commit_before><commit_msg>Add a test for Vector2.isclose<commit_after>
|
from ppb_vector import Vector2
from utils import vectors
from hypothesis import assume, given, note, example
from hypothesis.strategies import floats
@given(x=vectors(), abs_tol=floats(min_value=0), rel_tol=floats(min_value=0))
def test_isclose_to_self(x, abs_tol, rel_tol):
assert x.isclose(x, abs_tol=abs_tol, rel_tol=rel_tol)
|
Add a test for Vector2.isclosefrom ppb_vector import Vector2
from utils import vectors
from hypothesis import assume, given, note, example
from hypothesis.strategies import floats
@given(x=vectors(), abs_tol=floats(min_value=0), rel_tol=floats(min_value=0))
def test_isclose_to_self(x, abs_tol, rel_tol):
assert x.isclose(x, abs_tol=abs_tol, rel_tol=rel_tol)
|
<commit_before><commit_msg>Add a test for Vector2.isclose<commit_after>from ppb_vector import Vector2
from utils import vectors
from hypothesis import assume, given, note, example
from hypothesis.strategies import floats
@given(x=vectors(), abs_tol=floats(min_value=0), rel_tol=floats(min_value=0))
def test_isclose_to_self(x, abs_tol, rel_tol):
assert x.isclose(x, abs_tol=abs_tol, rel_tol=rel_tol)
|
|
c7ef4887d06b47d64dad9fc989e9eadb1b9d16ef
|
tests/test_with_hypothesis.py
|
tests/test_with_hypothesis.py
|
from hypothesis import given
from aead import AEAD
@given(bytes, bytes)
def test_round_trip_encrypt_decrypt(plaintext, associated_data):
cryptor = AEAD(AEAD.generate_key())
ct = cryptor.encrypt(plaintext, associated_data)
assert plaintext == cryptor.decrypt(ct, associated_data)
|
Add roundtrip encrypt-decrypt test using hypothesis.
|
Add roundtrip encrypt-decrypt test using hypothesis.
|
Python
|
apache-2.0
|
Ayrx/python-aead,Ayrx/python-aead
|
Add roundtrip encrypt-decrypt test using hypothesis.
|
from hypothesis import given
from aead import AEAD
@given(bytes, bytes)
def test_round_trip_encrypt_decrypt(plaintext, associated_data):
cryptor = AEAD(AEAD.generate_key())
ct = cryptor.encrypt(plaintext, associated_data)
assert plaintext == cryptor.decrypt(ct, associated_data)
|
<commit_before><commit_msg>Add roundtrip encrypt-decrypt test using hypothesis.<commit_after>
|
from hypothesis import given
from aead import AEAD
@given(bytes, bytes)
def test_round_trip_encrypt_decrypt(plaintext, associated_data):
cryptor = AEAD(AEAD.generate_key())
ct = cryptor.encrypt(plaintext, associated_data)
assert plaintext == cryptor.decrypt(ct, associated_data)
|
Add roundtrip encrypt-decrypt test using hypothesis.from hypothesis import given
from aead import AEAD
@given(bytes, bytes)
def test_round_trip_encrypt_decrypt(plaintext, associated_data):
cryptor = AEAD(AEAD.generate_key())
ct = cryptor.encrypt(plaintext, associated_data)
assert plaintext == cryptor.decrypt(ct, associated_data)
|
<commit_before><commit_msg>Add roundtrip encrypt-decrypt test using hypothesis.<commit_after>from hypothesis import given
from aead import AEAD
@given(bytes, bytes)
def test_round_trip_encrypt_decrypt(plaintext, associated_data):
cryptor = AEAD(AEAD.generate_key())
ct = cryptor.encrypt(plaintext, associated_data)
assert plaintext == cryptor.decrypt(ct, associated_data)
|
|
7997b8ad33736cdc436325cda3ed65db3223f75c
|
get_email_body_using_imap.py
|
get_email_body_using_imap.py
|
import email
from imaplib import IMAP4_SSL
import quopri
# Called recursively.
def get_email_content(message):
if not message.is_multipart():
return message.get_payload()
parts = [get_email_content(payload) for payload in message.get_payload()]
return ''.join(parts)
server = 'imap.example.com'
username = 'matt'
password = 'Password1'
mail = IMAP4_SSL(server)
mail.login(username, password)
mail.select('INBOX')
ok, raw_uid_list = mail.uid('search', None, 'ALL')
if ok != 'OK':
raise Exception('Bad')
uid_list = raw_uid_list[0].split()
uid = uid_list[0]
ok, data = mail.uid('fetch', uid, '(RFC822)')
if ok != 'OK':
raise Exception('Bad')
raw_email_bytes = data[0][1]
email_message = email.message_from_bytes(raw_email_bytes)
raw_content = get_email_content(email_message)
content = quopri.decodestring(raw_content, False)
for line in content.splitlines():
print(line)
|
Add get email body using IMAP example
|
Add get email body using IMAP example
|
Python
|
mit
|
MattMS/Python_3_examples
|
Add get email body using IMAP example
|
import email
from imaplib import IMAP4_SSL
import quopri
# Called recursively.
def get_email_content(message):
if not message.is_multipart():
return message.get_payload()
parts = [get_email_content(payload) for payload in message.get_payload()]
return ''.join(parts)
server = 'imap.example.com'
username = 'matt'
password = 'Password1'
mail = IMAP4_SSL(server)
mail.login(username, password)
mail.select('INBOX')
ok, raw_uid_list = mail.uid('search', None, 'ALL')
if ok != 'OK':
raise Exception('Bad')
uid_list = raw_uid_list[0].split()
uid = uid_list[0]
ok, data = mail.uid('fetch', uid, '(RFC822)')
if ok != 'OK':
raise Exception('Bad')
raw_email_bytes = data[0][1]
email_message = email.message_from_bytes(raw_email_bytes)
raw_content = get_email_content(email_message)
content = quopri.decodestring(raw_content, False)
for line in content.splitlines():
print(line)
|
<commit_before><commit_msg>Add get email body using IMAP example<commit_after>
|
import email
from imaplib import IMAP4_SSL
import quopri
# Called recursively.
def get_email_content(message):
if not message.is_multipart():
return message.get_payload()
parts = [get_email_content(payload) for payload in message.get_payload()]
return ''.join(parts)
server = 'imap.example.com'
username = 'matt'
password = 'Password1'
mail = IMAP4_SSL(server)
mail.login(username, password)
mail.select('INBOX')
ok, raw_uid_list = mail.uid('search', None, 'ALL')
if ok != 'OK':
raise Exception('Bad')
uid_list = raw_uid_list[0].split()
uid = uid_list[0]
ok, data = mail.uid('fetch', uid, '(RFC822)')
if ok != 'OK':
raise Exception('Bad')
raw_email_bytes = data[0][1]
email_message = email.message_from_bytes(raw_email_bytes)
raw_content = get_email_content(email_message)
content = quopri.decodestring(raw_content, False)
for line in content.splitlines():
print(line)
|
Add get email body using IMAP exampleimport email
from imaplib import IMAP4_SSL
import quopri
# Called recursively.
def get_email_content(message):
if not message.is_multipart():
return message.get_payload()
parts = [get_email_content(payload) for payload in message.get_payload()]
return ''.join(parts)
server = 'imap.example.com'
username = 'matt'
password = 'Password1'
mail = IMAP4_SSL(server)
mail.login(username, password)
mail.select('INBOX')
ok, raw_uid_list = mail.uid('search', None, 'ALL')
if ok != 'OK':
raise Exception('Bad')
uid_list = raw_uid_list[0].split()
uid = uid_list[0]
ok, data = mail.uid('fetch', uid, '(RFC822)')
if ok != 'OK':
raise Exception('Bad')
raw_email_bytes = data[0][1]
email_message = email.message_from_bytes(raw_email_bytes)
raw_content = get_email_content(email_message)
content = quopri.decodestring(raw_content, False)
for line in content.splitlines():
print(line)
|
<commit_before><commit_msg>Add get email body using IMAP example<commit_after>import email
from imaplib import IMAP4_SSL
import quopri
# Called recursively.
def get_email_content(message):
if not message.is_multipart():
return message.get_payload()
parts = [get_email_content(payload) for payload in message.get_payload()]
return ''.join(parts)
server = 'imap.example.com'
username = 'matt'
password = 'Password1'
mail = IMAP4_SSL(server)
mail.login(username, password)
mail.select('INBOX')
ok, raw_uid_list = mail.uid('search', None, 'ALL')
if ok != 'OK':
raise Exception('Bad')
uid_list = raw_uid_list[0].split()
uid = uid_list[0]
ok, data = mail.uid('fetch', uid, '(RFC822)')
if ok != 'OK':
raise Exception('Bad')
raw_email_bytes = data[0][1]
email_message = email.message_from_bytes(raw_email_bytes)
raw_content = get_email_content(email_message)
content = quopri.decodestring(raw_content, False)
for line in content.splitlines():
print(line)
|
|
535d8330427db8ddcb5bef832b8c7ae6ea3f6583
|
stack/331.py
|
stack/331.py
|
class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
|
Verify Preorder Serialization of a Binary Tree
|
Verify Preorder Serialization of a Binary Tree
|
Python
|
apache-2.0
|
MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode
|
Verify Preorder Serialization of a Binary Tree
|
class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
|
<commit_before><commit_msg>Verify Preorder Serialization of a Binary Tree<commit_after>
|
class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
|
Verify Preorder Serialization of a Binary Treeclass Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
|
<commit_before><commit_msg>Verify Preorder Serialization of a Binary Tree<commit_after>class Solution:
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
arr_pre_order = preorder.split(',')
stack = []
for node in arr_pre_order:
stack.append(node)
while len(stack) > 1 and stack[-1] == '#' and stack[-2] == '#':
stack.pop()
stack.pop()
if len(stack) < 1:
return False
stack[-1] = '#'
if len(stack) == 1 and stack[0] == '#':
return True
return False
|
|
825d2c053e7fa744f1d9c07748da358cba8d0d3b
|
tests/query_test/test_kudu.py
|
tests/query_test/test_kudu.py
|
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestKuduOperations(ImpalaTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
def setup_method(self, method):
self.cleanup_db("kududb_test")
self.client.execute("create database kududb_test")
def teardown_method(self, methd):
self.cleanup_db("kududb_test")
def test_sample(self, vector):
pass
|
Add boilerplate code for Kudu end-to-end test
|
Add boilerplate code for Kudu end-to-end test
Change-Id: I568719afe5c172ac7e4ac98f9fe030f9710f26f1
Reviewed-on: http://gerrit.sjc.cloudera.com:8080/7038
Reviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>
Tested-by: jenkins
|
Python
|
apache-2.0
|
ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC,ibmsoe/ImpalaPPC
|
Add boilerplate code for Kudu end-to-end test
Change-Id: I568719afe5c172ac7e4ac98f9fe030f9710f26f1
Reviewed-on: http://gerrit.sjc.cloudera.com:8080/7038
Reviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>
Tested-by: jenkins
|
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestKuduOperations(ImpalaTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
def setup_method(self, method):
self.cleanup_db("kududb_test")
self.client.execute("create database kududb_test")
def teardown_method(self, methd):
self.cleanup_db("kududb_test")
def test_sample(self, vector):
pass
|
<commit_before><commit_msg>Add boilerplate code for Kudu end-to-end test
Change-Id: I568719afe5c172ac7e4ac98f9fe030f9710f26f1
Reviewed-on: http://gerrit.sjc.cloudera.com:8080/7038
Reviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>
Tested-by: jenkins<commit_after>
|
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestKuduOperations(ImpalaTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
def setup_method(self, method):
self.cleanup_db("kududb_test")
self.client.execute("create database kududb_test")
def teardown_method(self, methd):
self.cleanup_db("kududb_test")
def test_sample(self, vector):
pass
|
Add boilerplate code for Kudu end-to-end test
Change-Id: I568719afe5c172ac7e4ac98f9fe030f9710f26f1
Reviewed-on: http://gerrit.sjc.cloudera.com:8080/7038
Reviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>
Tested-by: jenkins# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestKuduOperations(ImpalaTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
def setup_method(self, method):
self.cleanup_db("kududb_test")
self.client.execute("create database kududb_test")
def teardown_method(self, methd):
self.cleanup_db("kududb_test")
def test_sample(self, vector):
pass
|
<commit_before><commit_msg>Add boilerplate code for Kudu end-to-end test
Change-Id: I568719afe5c172ac7e4ac98f9fe030f9710f26f1
Reviewed-on: http://gerrit.sjc.cloudera.com:8080/7038
Reviewed-by: David Alves <33ea948168c114d220e0372a903be6ee60f6396e@cloudera.com>
Tested-by: jenkins<commit_after># Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.common.impala_test_suite import ImpalaTestSuite
class TestKuduOperations(ImpalaTestSuite):
"""
This suite tests the different modification operations when using a kudu table.
"""
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestKuduOperations, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
def setup_method(self, method):
self.cleanup_db("kududb_test")
self.client.execute("create database kududb_test")
def teardown_method(self, methd):
self.cleanup_db("kududb_test")
def test_sample(self, vector):
pass
|
|
5eee235af2bc145af5a9d476054da12f8cb095e2
|
svir/test/unit/test_dumb.py
|
svir/test/unit/test_dumb.py
|
# -*- coding: utf-8 -*-
#/***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
# from svir.test.utilities import get_qgis_app
# QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class DumbTest(unittest.TestCase):
"""Test nothing."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_dumb(self):
pass
|
Add a dumb test, to investigate segfault [skip CI]
|
Add a dumb test, to investigate segfault [skip CI]
|
Python
|
agpl-3.0
|
gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis
|
Add a dumb test, to investigate segfault [skip CI]
|
# -*- coding: utf-8 -*-
#/***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
# from svir.test.utilities import get_qgis_app
# QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class DumbTest(unittest.TestCase):
"""Test nothing."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_dumb(self):
pass
|
<commit_before><commit_msg>Add a dumb test, to investigate segfault [skip CI]<commit_after>
|
# -*- coding: utf-8 -*-
#/***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
# from svir.test.utilities import get_qgis_app
# QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class DumbTest(unittest.TestCase):
"""Test nothing."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_dumb(self):
pass
|
Add a dumb test, to investigate segfault [skip CI]# -*- coding: utf-8 -*-
#/***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
# from svir.test.utilities import get_qgis_app
# QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class DumbTest(unittest.TestCase):
"""Test nothing."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_dumb(self):
pass
|
<commit_before><commit_msg>Add a dumb test, to investigate segfault [skip CI]<commit_after># -*- coding: utf-8 -*-
#/***************************************************************************
# Irmt
# A QGIS plugin
# OpenQuake Integrated Risk Modelling Toolkit
# -------------------
# begin : 2013-10-24
# copyright : (C) 2013 by GEM Foundation
# email : devops@openquake.org
# ***************************************************************************/
#
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import unittest
# from svir.test.utilities import get_qgis_app
# QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class DumbTest(unittest.TestCase):
"""Test nothing."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_dumb(self):
pass
|
|
1e17a5297d2b02035088a1c6218e3b5d1796848f
|
datasets/management/commands/load_freesound_false_examples.py
|
datasets/management/commands/load_freesound_false_examples.py
|
from django.core.management.base import BaseCommand
from datasets.models import *
import json
from datasets.models import Taxonomy, Dataset, Sound, TaxonomyNode
class Command(BaseCommand):
help = 'Load false examples from json taxonomy file. Use it as python manage.py load_freesound_false_examples ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
for ex_id in d['FS_false_examples']:
sound = Sound.objects.get(freesound_id=ex_id)
node.freesound_false_examples.add(sound)
node.save()
|
Add command load freesound false examples
|
Add command load freesound false examples
|
Python
|
agpl-3.0
|
MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets
|
Add command load freesound false examples
|
from django.core.management.base import BaseCommand
from datasets.models import *
import json
from datasets.models import Taxonomy, Dataset, Sound, TaxonomyNode
class Command(BaseCommand):
help = 'Load false examples from json taxonomy file. Use it as python manage.py load_freesound_false_examples ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
for ex_id in d['FS_false_examples']:
sound = Sound.objects.get(freesound_id=ex_id)
node.freesound_false_examples.add(sound)
node.save()
|
<commit_before><commit_msg>Add command load freesound false examples<commit_after>
|
from django.core.management.base import BaseCommand
from datasets.models import *
import json
from datasets.models import Taxonomy, Dataset, Sound, TaxonomyNode
class Command(BaseCommand):
help = 'Load false examples from json taxonomy file. Use it as python manage.py load_freesound_false_examples ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
for ex_id in d['FS_false_examples']:
sound = Sound.objects.get(freesound_id=ex_id)
node.freesound_false_examples.add(sound)
node.save()
|
Add command load freesound false examplesfrom django.core.management.base import BaseCommand
from datasets.models import *
import json
from datasets.models import Taxonomy, Dataset, Sound, TaxonomyNode
class Command(BaseCommand):
help = 'Load false examples from json taxonomy file. Use it as python manage.py load_freesound_false_examples ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
for ex_id in d['FS_false_examples']:
sound = Sound.objects.get(freesound_id=ex_id)
node.freesound_false_examples.add(sound)
node.save()
|
<commit_before><commit_msg>Add command load freesound false examples<commit_after>from django.core.management.base import BaseCommand
from datasets.models import *
import json
from datasets.models import Taxonomy, Dataset, Sound, TaxonomyNode
class Command(BaseCommand):
help = 'Load false examples from json taxonomy file. Use it as python manage.py load_freesound_false_examples ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
for ex_id in d['FS_false_examples']:
sound = Sound.objects.get(freesound_id=ex_id)
node.freesound_false_examples.add(sound)
node.save()
|
|
bcada138526fc8ff4d07297802074d45b417d075
|
dplace_app/migrations/0077_increase_max_length_for_Society_id.py
|
dplace_app/migrations/0077_increase_max_length_for_Society_id.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dplace_app', '0076_society_original_name'),
]
operations = [
migrations.AlterField(
model_name='society',
name='ext_id',
field=models.CharField(max_length=20, unique=True, verbose_name='External ID'),
),
]
|
Increase length of society ID
|
Increase length of society ID
|
Python
|
mit
|
stefelisabeth/dplace,NESCent/dplace,stefelisabeth/dplace,NESCent/dplace,D-PLACE/dplace,D-PLACE/dplace,stefelisabeth/dplace,stefelisabeth/dplace,shh-dlce/dplace,NESCent/dplace,NESCent/dplace,D-PLACE/dplace,D-PLACE/dplace,shh-dlce/dplace,shh-dlce/dplace,shh-dlce/dplace
|
Increase length of society ID
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dplace_app', '0076_society_original_name'),
]
operations = [
migrations.AlterField(
model_name='society',
name='ext_id',
field=models.CharField(max_length=20, unique=True, verbose_name='External ID'),
),
]
|
<commit_before><commit_msg>Increase length of society ID<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dplace_app', '0076_society_original_name'),
]
operations = [
migrations.AlterField(
model_name='society',
name='ext_id',
field=models.CharField(max_length=20, unique=True, verbose_name='External ID'),
),
]
|
Increase length of society ID# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dplace_app', '0076_society_original_name'),
]
operations = [
migrations.AlterField(
model_name='society',
name='ext_id',
field=models.CharField(max_length=20, unique=True, verbose_name='External ID'),
),
]
|
<commit_before><commit_msg>Increase length of society ID<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dplace_app', '0076_society_original_name'),
]
operations = [
migrations.AlterField(
model_name='society',
name='ext_id',
field=models.CharField(max_length=20, unique=True, verbose_name='External ID'),
),
]
|
|
d3dab028d3d91c5144489e3826753a1a1579a0e6
|
tests/testvdf.py
|
tests/testvdf.py
|
import unittest
from steam import vdf
class SyntaxTestCase(unittest.TestCase):
UNQUOTED_VDF = """
node
{
key value
}
"""
QUOTED_VDF = """
"node"
{
"key" "value"
}
"""
MACRO_UNQUOTED_VDF = """
node
{
key value [$MACRO]
}
"""
MACRO_QUOTED_VDF = """
"node"
{
"key" "value" [$MACRO]
}
"""
MIXED_VDF = """
node
{
"key" value
key2 "value"
"key3" "value" [$MACRO]
// Comment
"subnode" [$MACRO]
{
key value
}
}
"""
EXPECTED_DICT = {
u"node": {
u"key": u"value"
}
}
EXPECTED_MIXED_DICT = {
u"node": {
u"key": u"value",
u"key2": u"value",
u"key3": u"value",
u"subnode": {
u"key": u"value"
}
}
}
class DeserializeTestCase(SyntaxTestCase):
def test_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.UNQUOTED_VDF))
def test_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.QUOTED_VDF))
def test_macro_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_UNQUOTED_VDF))
def test_macro_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_QUOTED_VDF))
def test_mixed(self):
self.assertEqual(self.EXPECTED_MIXED_DICT, vdf.loads(self.MIXED_VDF))
|
Add initial vdf test fixture
|
Add initial vdf test fixture
|
Python
|
isc
|
miedzinski/steamodd,Lagg/steamodd
|
Add initial vdf test fixture
|
import unittest
from steam import vdf
class SyntaxTestCase(unittest.TestCase):
UNQUOTED_VDF = """
node
{
key value
}
"""
QUOTED_VDF = """
"node"
{
"key" "value"
}
"""
MACRO_UNQUOTED_VDF = """
node
{
key value [$MACRO]
}
"""
MACRO_QUOTED_VDF = """
"node"
{
"key" "value" [$MACRO]
}
"""
MIXED_VDF = """
node
{
"key" value
key2 "value"
"key3" "value" [$MACRO]
// Comment
"subnode" [$MACRO]
{
key value
}
}
"""
EXPECTED_DICT = {
u"node": {
u"key": u"value"
}
}
EXPECTED_MIXED_DICT = {
u"node": {
u"key": u"value",
u"key2": u"value",
u"key3": u"value",
u"subnode": {
u"key": u"value"
}
}
}
class DeserializeTestCase(SyntaxTestCase):
def test_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.UNQUOTED_VDF))
def test_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.QUOTED_VDF))
def test_macro_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_UNQUOTED_VDF))
def test_macro_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_QUOTED_VDF))
def test_mixed(self):
self.assertEqual(self.EXPECTED_MIXED_DICT, vdf.loads(self.MIXED_VDF))
|
<commit_before><commit_msg>Add initial vdf test fixture<commit_after>
|
import unittest
from steam import vdf
class SyntaxTestCase(unittest.TestCase):
UNQUOTED_VDF = """
node
{
key value
}
"""
QUOTED_VDF = """
"node"
{
"key" "value"
}
"""
MACRO_UNQUOTED_VDF = """
node
{
key value [$MACRO]
}
"""
MACRO_QUOTED_VDF = """
"node"
{
"key" "value" [$MACRO]
}
"""
MIXED_VDF = """
node
{
"key" value
key2 "value"
"key3" "value" [$MACRO]
// Comment
"subnode" [$MACRO]
{
key value
}
}
"""
EXPECTED_DICT = {
u"node": {
u"key": u"value"
}
}
EXPECTED_MIXED_DICT = {
u"node": {
u"key": u"value",
u"key2": u"value",
u"key3": u"value",
u"subnode": {
u"key": u"value"
}
}
}
class DeserializeTestCase(SyntaxTestCase):
def test_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.UNQUOTED_VDF))
def test_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.QUOTED_VDF))
def test_macro_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_UNQUOTED_VDF))
def test_macro_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_QUOTED_VDF))
def test_mixed(self):
self.assertEqual(self.EXPECTED_MIXED_DICT, vdf.loads(self.MIXED_VDF))
|
Add initial vdf test fixtureimport unittest
from steam import vdf
class SyntaxTestCase(unittest.TestCase):
UNQUOTED_VDF = """
node
{
key value
}
"""
QUOTED_VDF = """
"node"
{
"key" "value"
}
"""
MACRO_UNQUOTED_VDF = """
node
{
key value [$MACRO]
}
"""
MACRO_QUOTED_VDF = """
"node"
{
"key" "value" [$MACRO]
}
"""
MIXED_VDF = """
node
{
"key" value
key2 "value"
"key3" "value" [$MACRO]
// Comment
"subnode" [$MACRO]
{
key value
}
}
"""
EXPECTED_DICT = {
u"node": {
u"key": u"value"
}
}
EXPECTED_MIXED_DICT = {
u"node": {
u"key": u"value",
u"key2": u"value",
u"key3": u"value",
u"subnode": {
u"key": u"value"
}
}
}
class DeserializeTestCase(SyntaxTestCase):
def test_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.UNQUOTED_VDF))
def test_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.QUOTED_VDF))
def test_macro_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_UNQUOTED_VDF))
def test_macro_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_QUOTED_VDF))
def test_mixed(self):
self.assertEqual(self.EXPECTED_MIXED_DICT, vdf.loads(self.MIXED_VDF))
|
<commit_before><commit_msg>Add initial vdf test fixture<commit_after>import unittest
from steam import vdf
class SyntaxTestCase(unittest.TestCase):
UNQUOTED_VDF = """
node
{
key value
}
"""
QUOTED_VDF = """
"node"
{
"key" "value"
}
"""
MACRO_UNQUOTED_VDF = """
node
{
key value [$MACRO]
}
"""
MACRO_QUOTED_VDF = """
"node"
{
"key" "value" [$MACRO]
}
"""
MIXED_VDF = """
node
{
"key" value
key2 "value"
"key3" "value" [$MACRO]
// Comment
"subnode" [$MACRO]
{
key value
}
}
"""
EXPECTED_DICT = {
u"node": {
u"key": u"value"
}
}
EXPECTED_MIXED_DICT = {
u"node": {
u"key": u"value",
u"key2": u"value",
u"key3": u"value",
u"subnode": {
u"key": u"value"
}
}
}
class DeserializeTestCase(SyntaxTestCase):
def test_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.UNQUOTED_VDF))
def test_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.QUOTED_VDF))
def test_macro_unquoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_UNQUOTED_VDF))
def test_macro_quoted(self):
self.assertEqual(self.EXPECTED_DICT, vdf.loads(self.MACRO_QUOTED_VDF))
def test_mixed(self):
self.assertEqual(self.EXPECTED_MIXED_DICT, vdf.loads(self.MIXED_VDF))
|
|
fdf820d731a31e39861fe0f9b3dbbf2da225116c
|
tests/test_pipeline_wgbs.py
|
tests/test_pipeline_wgbs.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.wgbs
@pytest.mark.pipeline
def test_wgbs_pipeline():
"""
Test case to ensure that the RNA-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_wgbs.py \
--taxon_id 10090 \
--genome /<dataset_dir>/Mouse.GRCm38.fasta \
--assembly GRCm38 \
--fastq1 /<dataset_dir>/expt_1.fastq \
--fastq2 /<dataset_dir>/expt_2.fastq \
--aligner bowtie2 \
--aligner_path ${HOME}/lib/bowtie2-2.3.2 \
--bss_path ${HOME}/lib/BSseeker2
"""
home = os.path.expanduser('~')
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genomefa_file = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq1_file = resource_path + "bsSeeker.Mouse.GRCm38_1.fastq"
fastq2_file = resource_path + "bsSeeker.Mouse.GRCm38_2.fastq"
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
genomefa_file,
fastq1_file,
fastq2_file
],
{
'assembly' : 'GRCh38',
'aligner' : 'bowtie2',
'aligner_path' : home + '/lib/bowtie2-2.3.2',
'bss_path' : home + '/lib/BSseeker2'
},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("WGBS RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for the WGBS pipeline
|
Test the pipeline code for the WGBS pipeline
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq
|
Test the pipeline code for the WGBS pipeline
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.wgbs
@pytest.mark.pipeline
def test_wgbs_pipeline():
"""
Test case to ensure that the RNA-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_wgbs.py \
--taxon_id 10090 \
--genome /<dataset_dir>/Mouse.GRCm38.fasta \
--assembly GRCm38 \
--fastq1 /<dataset_dir>/expt_1.fastq \
--fastq2 /<dataset_dir>/expt_2.fastq \
--aligner bowtie2 \
--aligner_path ${HOME}/lib/bowtie2-2.3.2 \
--bss_path ${HOME}/lib/BSseeker2
"""
home = os.path.expanduser('~')
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genomefa_file = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq1_file = resource_path + "bsSeeker.Mouse.GRCm38_1.fastq"
fastq2_file = resource_path + "bsSeeker.Mouse.GRCm38_2.fastq"
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
genomefa_file,
fastq1_file,
fastq2_file
],
{
'assembly' : 'GRCh38',
'aligner' : 'bowtie2',
'aligner_path' : home + '/lib/bowtie2-2.3.2',
'bss_path' : home + '/lib/BSseeker2'
},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("WGBS RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for the WGBS pipeline<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.wgbs
@pytest.mark.pipeline
def test_wgbs_pipeline():
"""
Test case to ensure that the RNA-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_wgbs.py \
--taxon_id 10090 \
--genome /<dataset_dir>/Mouse.GRCm38.fasta \
--assembly GRCm38 \
--fastq1 /<dataset_dir>/expt_1.fastq \
--fastq2 /<dataset_dir>/expt_2.fastq \
--aligner bowtie2 \
--aligner_path ${HOME}/lib/bowtie2-2.3.2 \
--bss_path ${HOME}/lib/BSseeker2
"""
home = os.path.expanduser('~')
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genomefa_file = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq1_file = resource_path + "bsSeeker.Mouse.GRCm38_1.fastq"
fastq2_file = resource_path + "bsSeeker.Mouse.GRCm38_2.fastq"
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
genomefa_file,
fastq1_file,
fastq2_file
],
{
'assembly' : 'GRCh38',
'aligner' : 'bowtie2',
'aligner_path' : home + '/lib/bowtie2-2.3.2',
'bss_path' : home + '/lib/BSseeker2'
},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("WGBS RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for the WGBS pipeline"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.wgbs
@pytest.mark.pipeline
def test_wgbs_pipeline():
"""
Test case to ensure that the RNA-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_wgbs.py \
--taxon_id 10090 \
--genome /<dataset_dir>/Mouse.GRCm38.fasta \
--assembly GRCm38 \
--fastq1 /<dataset_dir>/expt_1.fastq \
--fastq2 /<dataset_dir>/expt_2.fastq \
--aligner bowtie2 \
--aligner_path ${HOME}/lib/bowtie2-2.3.2 \
--bss_path ${HOME}/lib/BSseeker2
"""
home = os.path.expanduser('~')
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genomefa_file = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq1_file = resource_path + "bsSeeker.Mouse.GRCm38_1.fastq"
fastq2_file = resource_path + "bsSeeker.Mouse.GRCm38_2.fastq"
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
genomefa_file,
fastq1_file,
fastq2_file
],
{
'assembly' : 'GRCh38',
'aligner' : 'bowtie2',
'aligner_path' : home + '/lib/bowtie2-2.3.2',
'bss_path' : home + '/lib/BSseeker2'
},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("WGBS RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for the WGBS pipeline<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.wgbs
@pytest.mark.pipeline
def test_wgbs_pipeline():
"""
Test case to ensure that the RNA-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_wgbs.py \
--taxon_id 10090 \
--genome /<dataset_dir>/Mouse.GRCm38.fasta \
--assembly GRCm38 \
--fastq1 /<dataset_dir>/expt_1.fastq \
--fastq2 /<dataset_dir>/expt_2.fastq \
--aligner bowtie2 \
--aligner_path ${HOME}/lib/bowtie2-2.3.2 \
--bss_path ${HOME}/lib/BSseeker2
"""
home = os.path.expanduser('~')
resource_path = os.path.join(os.path.dirname(__file__), "data/")
genomefa_file = resource_path + "bsSeeker.Mouse.GRCm38.fasta"
fastq1_file = resource_path + "bsSeeker.Mouse.GRCm38_1.fastq"
fastq2_file = resource_path + "bsSeeker.Mouse.GRCm38_2.fastq"
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
genomefa_file,
fastq1_file,
fastq2_file
],
{
'assembly' : 'GRCh38',
'aligner' : 'bowtie2',
'aligner_path' : home + '/lib/bowtie2-2.3.2',
'bss_path' : home + '/lib/BSseeker2'
},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("WGBS RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
|
11695a26b7ca7f364d4464331965189ebd6357bc
|
backend/django/core/base_viewset.py
|
backend/django/core/base_viewset.py
|
from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
class BaseViewSet(viewsets.ModelViewSet):
true = True
false = False
none = None
authentication_classes = AllowAny,
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not self.none:
serializer = self.get_serializer(page, many=self.true)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=self.true)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
try:
serializer.is_valid(raise_exception=self.true)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
try:
instance_to_create = serializer.create(validated_data=data)
except Exception as exception:
message = exception.message
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
serialized_instance = self.get_serializer(instance_to_create).data
return Response(
data=serialized_instance, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
instance_to_update = self.get_object()
data = request.data
try:
updated_object = self.get_serializer().update(
instance=instance_to_update, validated_data=data)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response(
data={'message': 'Value error'},
status=status.HTTP_400_BAD_REQUEST)
except Exception as exception:
return Response(
data={'message': exception.message},
status=status.HTTP_403_FORBIDDEN)
serialized_instance = self.get_serializer(updated_object).data
return Response(
data=serialized_instance, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
|
Create default BaseViewSet for all the common actions
|
Create default BaseViewSet for all the common actions
|
Python
|
mit
|
slavpetroff/sweetshop,slavpetroff/sweetshop
|
Create default BaseViewSet for all the common actions
|
from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
class BaseViewSet(viewsets.ModelViewSet):
true = True
false = False
none = None
authentication_classes = AllowAny,
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not self.none:
serializer = self.get_serializer(page, many=self.true)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=self.true)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
try:
serializer.is_valid(raise_exception=self.true)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
try:
instance_to_create = serializer.create(validated_data=data)
except Exception as exception:
message = exception.message
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
serialized_instance = self.get_serializer(instance_to_create).data
return Response(
data=serialized_instance, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
instance_to_update = self.get_object()
data = request.data
try:
updated_object = self.get_serializer().update(
instance=instance_to_update, validated_data=data)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response(
data={'message': 'Value error'},
status=status.HTTP_400_BAD_REQUEST)
except Exception as exception:
return Response(
data={'message': exception.message},
status=status.HTTP_403_FORBIDDEN)
serialized_instance = self.get_serializer(updated_object).data
return Response(
data=serialized_instance, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
|
<commit_before><commit_msg>Create default BaseViewSet for all the common actions<commit_after>
|
from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
class BaseViewSet(viewsets.ModelViewSet):
true = True
false = False
none = None
authentication_classes = AllowAny,
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not self.none:
serializer = self.get_serializer(page, many=self.true)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=self.true)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
try:
serializer.is_valid(raise_exception=self.true)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
try:
instance_to_create = serializer.create(validated_data=data)
except Exception as exception:
message = exception.message
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
serialized_instance = self.get_serializer(instance_to_create).data
return Response(
data=serialized_instance, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
instance_to_update = self.get_object()
data = request.data
try:
updated_object = self.get_serializer().update(
instance=instance_to_update, validated_data=data)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response(
data={'message': 'Value error'},
status=status.HTTP_400_BAD_REQUEST)
except Exception as exception:
return Response(
data={'message': exception.message},
status=status.HTTP_403_FORBIDDEN)
serialized_instance = self.get_serializer(updated_object).data
return Response(
data=serialized_instance, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
|
Create default BaseViewSet for all the common actionsfrom rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
class BaseViewSet(viewsets.ModelViewSet):
true = True
false = False
none = None
authentication_classes = AllowAny,
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not self.none:
serializer = self.get_serializer(page, many=self.true)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=self.true)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
try:
serializer.is_valid(raise_exception=self.true)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
try:
instance_to_create = serializer.create(validated_data=data)
except Exception as exception:
message = exception.message
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
serialized_instance = self.get_serializer(instance_to_create).data
return Response(
data=serialized_instance, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
instance_to_update = self.get_object()
data = request.data
try:
updated_object = self.get_serializer().update(
instance=instance_to_update, validated_data=data)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response(
data={'message': 'Value error'},
status=status.HTTP_400_BAD_REQUEST)
except Exception as exception:
return Response(
data={'message': exception.message},
status=status.HTTP_403_FORBIDDEN)
serialized_instance = self.get_serializer(updated_object).data
return Response(
data=serialized_instance, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
|
<commit_before><commit_msg>Create default BaseViewSet for all the common actions<commit_after>from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
class BaseViewSet(viewsets.ModelViewSet):
true = True
false = False
none = None
authentication_classes = AllowAny,
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not self.none:
serializer = self.get_serializer(page, many=self.true)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=self.true)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
data = request.data
serializer = self.get_serializer(data=data)
try:
serializer.is_valid(raise_exception=self.true)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
try:
instance_to_create = serializer.create(validated_data=data)
except Exception as exception:
message = exception.message
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
serialized_instance = self.get_serializer(instance_to_create).data
return Response(
data=serialized_instance, status=status.HTTP_201_CREATED)
def update(self, request, *args, **kwargs):
instance_to_update = self.get_object()
data = request.data
try:
updated_object = self.get_serializer().update(
instance=instance_to_update, validated_data=data)
except ValidationError as exception:
invalid_fields = list(exception.detail.keys())
message = {'invalid_input_fields': invalid_fields}
return Response(data=message, status=status.HTTP_400_BAD_REQUEST)
except ValueError:
return Response(
data={'message': 'Value error'},
status=status.HTTP_400_BAD_REQUEST)
except Exception as exception:
return Response(
data={'message': exception.message},
status=status.HTTP_403_FORBIDDEN)
serialized_instance = self.get_serializer(updated_object).data
return Response(
data=serialized_instance, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_200_OK)
|
|
af2c7febc2feaad2d46b4b7ea818e39eac89ec66
|
app/api/tests/test_weekday.py
|
app/api/tests/test_weekday.py
|
from django.test import Client, TestCase
class WeekdayApiTest(TestCase):
"""Tests for Weekday API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
def create_weekday(self, name):
query = '''
mutation{
createWeekday(input: {name: "%s"}){
user{
id,
originalId,
name
}
}
}
''' % (name)
return self.client.post(self.endpoint, {'query': query}).json()
def test_creation_of_weekday_object(self):
response = self.create_weekday('tuesday')
expected = {
'createWeekday': {
'weekday': {
'id': response['data']['createWeekday']['weekday']['id'],
'originalId': response['data']['createWeekday']['weekday']['originalId'],
'username': 'tuesday'
}
}
}
self.assertEqual(expected, response['data'])
|
Set up test for weekday api
|
Set up test for weekday api
|
Python
|
mit
|
teamtaverna/core
|
Set up test for weekday api
|
from django.test import Client, TestCase
class WeekdayApiTest(TestCase):
"""Tests for Weekday API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
def create_weekday(self, name):
query = '''
mutation{
createWeekday(input: {name: "%s"}){
user{
id,
originalId,
name
}
}
}
''' % (name)
return self.client.post(self.endpoint, {'query': query}).json()
def test_creation_of_weekday_object(self):
response = self.create_weekday('tuesday')
expected = {
'createWeekday': {
'weekday': {
'id': response['data']['createWeekday']['weekday']['id'],
'originalId': response['data']['createWeekday']['weekday']['originalId'],
'username': 'tuesday'
}
}
}
self.assertEqual(expected, response['data'])
|
<commit_before><commit_msg>Set up test for weekday api<commit_after>
|
from django.test import Client, TestCase
class WeekdayApiTest(TestCase):
"""Tests for Weekday API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
def create_weekday(self, name):
query = '''
mutation{
createWeekday(input: {name: "%s"}){
user{
id,
originalId,
name
}
}
}
''' % (name)
return self.client.post(self.endpoint, {'query': query}).json()
def test_creation_of_weekday_object(self):
response = self.create_weekday('tuesday')
expected = {
'createWeekday': {
'weekday': {
'id': response['data']['createWeekday']['weekday']['id'],
'originalId': response['data']['createWeekday']['weekday']['originalId'],
'username': 'tuesday'
}
}
}
self.assertEqual(expected, response['data'])
|
Set up test for weekday apifrom django.test import Client, TestCase
class WeekdayApiTest(TestCase):
"""Tests for Weekday API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
def create_weekday(self, name):
query = '''
mutation{
createWeekday(input: {name: "%s"}){
user{
id,
originalId,
name
}
}
}
''' % (name)
return self.client.post(self.endpoint, {'query': query}).json()
def test_creation_of_weekday_object(self):
response = self.create_weekday('tuesday')
expected = {
'createWeekday': {
'weekday': {
'id': response['data']['createWeekday']['weekday']['id'],
'originalId': response['data']['createWeekday']['weekday']['originalId'],
'username': 'tuesday'
}
}
}
self.assertEqual(expected, response['data'])
|
<commit_before><commit_msg>Set up test for weekday api<commit_after>from django.test import Client, TestCase
class WeekdayApiTest(TestCase):
"""Tests for Weekday API."""
def setUp(self):
self.client = Client()
self.endpoint = '/api'
def create_weekday(self, name):
query = '''
mutation{
createWeekday(input: {name: "%s"}){
user{
id,
originalId,
name
}
}
}
''' % (name)
return self.client.post(self.endpoint, {'query': query}).json()
def test_creation_of_weekday_object(self):
response = self.create_weekday('tuesday')
expected = {
'createWeekday': {
'weekday': {
'id': response['data']['createWeekday']['weekday']['id'],
'originalId': response['data']['createWeekday']['weekday']['originalId'],
'username': 'tuesday'
}
}
}
self.assertEqual(expected, response['data'])
|
|
8a21a8741d152a4040f42b57b4d21d483a6367fb
|
adhocracy/migration/versions/037_proposal_variants_fix_pickle.py
|
adhocracy/migration/versions/037_proposal_variants_fix_pickle.py
|
'''
Fix an error in the previous migration where wie pickled the versions
into a string that was pickled again by sqlalchemy.
'''
from datetime import datetime
from pickle import loads
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, Table)
metadata = MetaData()
def are_elements_equal(x, y):
return x == y
selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
selections = migrate_engine.execute(selection_table.select())
fixed = 0
for (id, _, _, _, _, variants) in selections:
try:
# see if we can unpickle from the variants value
variants = loads(variants)
except TypeError:
continue
if not isinstance(variants, list):
raise ValueError(
("Already fixed: %s. Error in selection %s. 'variants' is "
'double pickled, but not a list. Value: %s, type: %s') %
(id, str(variants), type(variants)))
fixed += 1
migrate_engine.execute(
selection_table.update().values(variants=variants).where(
selection_table.c.id == id))
def downgrade(migrate_engine):
raise NotImplementedError()
|
Add migration for double pickled values
|
Selection.variants: Add migration for double pickled values
|
Python
|
agpl-3.0
|
DanielNeugebauer/adhocracy,liqd/adhocracy,liqd/adhocracy,alkadis/vcv,alkadis/vcv,SysTheron/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,phihag/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,phihag/adhocracy,alkadis/vcv,phihag/adhocracy,SysTheron/adhocracy,SysTheron/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,phihag/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy
|
Selection.variants: Add migration for double pickled values
|
'''
Fix an error in the previous migration where wie pickled the versions
into a string that was pickled again by sqlalchemy.
'''
from datetime import datetime
from pickle import loads
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, Table)
metadata = MetaData()
def are_elements_equal(x, y):
return x == y
selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
selections = migrate_engine.execute(selection_table.select())
fixed = 0
for (id, _, _, _, _, variants) in selections:
try:
# see if we can unpickle from the variants value
variants = loads(variants)
except TypeError:
continue
if not isinstance(variants, list):
raise ValueError(
("Already fixed: %s. Error in selection %s. 'variants' is "
'double pickled, but not a list. Value: %s, type: %s') %
(id, str(variants), type(variants)))
fixed += 1
migrate_engine.execute(
selection_table.update().values(variants=variants).where(
selection_table.c.id == id))
def downgrade(migrate_engine):
raise NotImplementedError()
|
<commit_before><commit_msg>Selection.variants: Add migration for double pickled values<commit_after>
|
'''
Fix an error in the previous migration where wie pickled the versions
into a string that was pickled again by sqlalchemy.
'''
from datetime import datetime
from pickle import loads
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, Table)
metadata = MetaData()
def are_elements_equal(x, y):
return x == y
selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
selections = migrate_engine.execute(selection_table.select())
fixed = 0
for (id, _, _, _, _, variants) in selections:
try:
# see if we can unpickle from the variants value
variants = loads(variants)
except TypeError:
continue
if not isinstance(variants, list):
raise ValueError(
("Already fixed: %s. Error in selection %s. 'variants' is "
'double pickled, but not a list. Value: %s, type: %s') %
(id, str(variants), type(variants)))
fixed += 1
migrate_engine.execute(
selection_table.update().values(variants=variants).where(
selection_table.c.id == id))
def downgrade(migrate_engine):
raise NotImplementedError()
|
Selection.variants: Add migration for double pickled values'''
Fix an error in the previous migration where wie pickled the versions
into a string that was pickled again by sqlalchemy.
'''
from datetime import datetime
from pickle import loads
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, Table)
metadata = MetaData()
def are_elements_equal(x, y):
return x == y
selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
selections = migrate_engine.execute(selection_table.select())
fixed = 0
for (id, _, _, _, _, variants) in selections:
try:
# see if we can unpickle from the variants value
variants = loads(variants)
except TypeError:
continue
if not isinstance(variants, list):
raise ValueError(
("Already fixed: %s. Error in selection %s. 'variants' is "
'double pickled, but not a list. Value: %s, type: %s') %
(id, str(variants), type(variants)))
fixed += 1
migrate_engine.execute(
selection_table.update().values(variants=variants).where(
selection_table.c.id == id))
def downgrade(migrate_engine):
raise NotImplementedError()
|
<commit_before><commit_msg>Selection.variants: Add migration for double pickled values<commit_after>'''
Fix an error in the previous migration where wie pickled the versions
into a string that was pickled again by sqlalchemy.
'''
from datetime import datetime
from pickle import loads
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, Table)
metadata = MetaData()
def are_elements_equal(x, y):
return x == y
selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
selections = migrate_engine.execute(selection_table.select())
fixed = 0
for (id, _, _, _, _, variants) in selections:
try:
# see if we can unpickle from the variants value
variants = loads(variants)
except TypeError:
continue
if not isinstance(variants, list):
raise ValueError(
("Already fixed: %s. Error in selection %s. 'variants' is "
'double pickled, but not a list. Value: %s, type: %s') %
(id, str(variants), type(variants)))
fixed += 1
migrate_engine.execute(
selection_table.update().values(variants=variants).where(
selection_table.c.id == id))
def downgrade(migrate_engine):
raise NotImplementedError()
|
|
56c9d8dc45c27b8d86f17be4a88dd1c574874460
|
nova/tests/virt_unittest.py
|
nova/tests/virt_unittest.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
Python
|
apache-2.0
|
n0ano/ganttclient
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add new libvirt_type option "uml" for user-mode-linux.. This switches the libvirt URI to uml:///system and uses a different template for the libvirt xml.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
|
e3d0bcb91f59616eb0aa8cc56f72315c362493cf
|
utils/webhistory/epiphany-history-to-ttl.py
|
utils/webhistory/epiphany-history-to-ttl.py
|
import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
|
Add util to generate real webhistory
|
Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.
|
Python
|
lgpl-2.1
|
hoheinzollern/tracker,hoheinzollern/tracker,outofbits/tracker,outofbits/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,outofbits/tracker,hoheinzollern/tracker,hoheinzollern/tracker,outofbits/tracker
|
Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.
|
import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
|
<commit_before><commit_msg>Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.<commit_after>
|
import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
|
Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
|
<commit_before><commit_msg>Add util to generate real webhistory
Added program that reads epiphany web browsing history and print it
in turtle format.<commit_after>import xml.dom.minidom
from xml.dom.minidom import Node
import time
import sys, os
PROPERTIES = {2: ("nie:title", str),
3: ("nfo:uri", str),
4: ("nie:usageCounter", int),
6: ("nie:lastRefreshed", time.struct_time)}
# Use time.struct_time as type for dates, even when the format is not that!
def get_text (node):
text = ""
for subnode in node.childNodes:
if subnode.nodeType == Node.TEXT_NODE:
text += subnode.data
return text.encode ('utf8').replace ('"', '') # Use a safer method!
def process_file (filename):
doc = xml.dom.minidom.parse(filename)
for node in doc.getElementsByTagName ("node"):
print "<uri:uuid:epiphany-webhistory-%s> a nfo:WebHistory" % (node.getAttribute ("id")),
for prop in node.getElementsByTagName ("property"):
prop_id = int(prop.getAttribute ("id"))
if (PROPERTIES.has_key (prop_id)):
prop_name, prop_type = PROPERTIES [prop_id]
if (prop_type == str):
print ';\n\t%s "%s"' % (prop_name, get_text (prop)),
elif (prop_type == int):
print ';\n\t%s %s' % (prop_name, get_text (prop)),
elif (prop_type == time.struct_time):
print ';\n\t%s "%s"' % (prop_name, time.strftime ("%Y%m%dT%H:%m:%S",time.localtime (int(get_text (prop))))),
print ".\n"
def print_headers ():
print "@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#>."
print "@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> ."
if __name__ == "__main__":
epi_history = os.path.join (os.getenv ("HOME"), ".gnome2", "epiphany", "ephy-history.xml")
print >> sys.stderr, "Scanning", epi_history
print_headers ()
if (os.path.exists (epi_history)):
process_file (epi_history)
|
|
52ab2b852a9e4df8783818743396c565c8355547
|
jacquard/storage/commands.py
|
jacquard/storage/commands.py
|
import pprint
from jacquard.commands import BaseCommand
class StorageDump(BaseCommand):
help = "dump all objects in storage"
def add_arguments(self, parser):
pass
def handle(self, config, options):
with config['storage'].transaction() as store:
for key, value in store.items():
print(key)
print('=' * len(key))
pprint.pprint(value)
print()
|
Add command to dump contents of storage
|
Add command to dump contents of storage
|
Python
|
mit
|
prophile/jacquard,prophile/jacquard
|
Add command to dump contents of storage
|
import pprint
from jacquard.commands import BaseCommand
class StorageDump(BaseCommand):
help = "dump all objects in storage"
def add_arguments(self, parser):
pass
def handle(self, config, options):
with config['storage'].transaction() as store:
for key, value in store.items():
print(key)
print('=' * len(key))
pprint.pprint(value)
print()
|
<commit_before><commit_msg>Add command to dump contents of storage<commit_after>
|
import pprint
from jacquard.commands import BaseCommand
class StorageDump(BaseCommand):
help = "dump all objects in storage"
def add_arguments(self, parser):
pass
def handle(self, config, options):
with config['storage'].transaction() as store:
for key, value in store.items():
print(key)
print('=' * len(key))
pprint.pprint(value)
print()
|
Add command to dump contents of storageimport pprint
from jacquard.commands import BaseCommand
class StorageDump(BaseCommand):
help = "dump all objects in storage"
def add_arguments(self, parser):
pass
def handle(self, config, options):
with config['storage'].transaction() as store:
for key, value in store.items():
print(key)
print('=' * len(key))
pprint.pprint(value)
print()
|
<commit_before><commit_msg>Add command to dump contents of storage<commit_after>import pprint
from jacquard.commands import BaseCommand
class StorageDump(BaseCommand):
help = "dump all objects in storage"
def add_arguments(self, parser):
pass
def handle(self, config, options):
with config['storage'].transaction() as store:
for key, value in store.items():
print(key)
print('=' * len(key))
pprint.pprint(value)
print()
|
|
f18626a190ac967db2a30b9929bc055a93a370e6
|
appium-demo/tt.py
|
appium-demo/tt.py
|
import os
from appium import webdriver
# capabilities for built-in email app
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.android.email'
desired_caps['appActivity'] = 'com.android.email.activity.Welcome'
# email locator index
email_prefix = 'com.android.email:id/'
to_field_locator = email_prefix + 'to'
subject_field_locator = email_prefix + 'subject'
body_field_locator = email_prefix + 'body'
send_button_locator = email_prefix + 'send'
compose_button_locator = email_prefix + 'compose_button'
conversation_list_view_locator = email_prefix + 'conversation_list_view'
def test_send_mail():
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
compose = driver.find_element_by_id(compose_button_locator)
if compose.is_displayed():
compose.click()
to = driver.find_element_by_id(to_field_locator)
subject = driver.find_element_by_id(subject_field_locator)
body = driver.find_element_by_id(body_field_locator)
send = driver.find_element_by_id(send_button_locator)
to.send_keys('zapionator@gmail.com')
subject.send_keys('Verify sending email')
body.send_keys('Hello, this is a test from the testing script')
send.click()
# Verification
conversation_list = driver.find_elements(conversation_list_view_locator)
els = conversation_list.find_elements_by_id('android.widget.FrameLayout')
loc_1 = els[0].location
loc_2 = els[3].location
els.swipe(loc_1['x'], loc_1['y'], loc_2['x'], loc_2['y'], 800)
os.sleep(10)
conversation_list.find_elements_by_id('android.widget.FrameLayout')[0].click()
driver.quit()
|
Add demo app for appium
|
Add demo app for appium
|
Python
|
mpl-2.0
|
zapion/working-scripts,zapion/working-scripts,zapion/working-scripts,zapion/working-scripts
|
Add demo app for appium
|
import os
from appium import webdriver
# capabilities for built-in email app
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.android.email'
desired_caps['appActivity'] = 'com.android.email.activity.Welcome'
# email locator index
email_prefix = 'com.android.email:id/'
to_field_locator = email_prefix + 'to'
subject_field_locator = email_prefix + 'subject'
body_field_locator = email_prefix + 'body'
send_button_locator = email_prefix + 'send'
compose_button_locator = email_prefix + 'compose_button'
conversation_list_view_locator = email_prefix + 'conversation_list_view'
def test_send_mail():
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
compose = driver.find_element_by_id(compose_button_locator)
if compose.is_displayed():
compose.click()
to = driver.find_element_by_id(to_field_locator)
subject = driver.find_element_by_id(subject_field_locator)
body = driver.find_element_by_id(body_field_locator)
send = driver.find_element_by_id(send_button_locator)
to.send_keys('zapionator@gmail.com')
subject.send_keys('Verify sending email')
body.send_keys('Hello, this is a test from the testing script')
send.click()
# Verification
conversation_list = driver.find_elements(conversation_list_view_locator)
els = conversation_list.find_elements_by_id('android.widget.FrameLayout')
loc_1 = els[0].location
loc_2 = els[3].location
els.swipe(loc_1['x'], loc_1['y'], loc_2['x'], loc_2['y'], 800)
os.sleep(10)
conversation_list.find_elements_by_id('android.widget.FrameLayout')[0].click()
driver.quit()
|
<commit_before><commit_msg>Add demo app for appium<commit_after>
|
import os
from appium import webdriver
# capabilities for built-in email app
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.android.email'
desired_caps['appActivity'] = 'com.android.email.activity.Welcome'
# email locator index
email_prefix = 'com.android.email:id/'
to_field_locator = email_prefix + 'to'
subject_field_locator = email_prefix + 'subject'
body_field_locator = email_prefix + 'body'
send_button_locator = email_prefix + 'send'
compose_button_locator = email_prefix + 'compose_button'
conversation_list_view_locator = email_prefix + 'conversation_list_view'
def test_send_mail():
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
compose = driver.find_element_by_id(compose_button_locator)
if compose.is_displayed():
compose.click()
to = driver.find_element_by_id(to_field_locator)
subject = driver.find_element_by_id(subject_field_locator)
body = driver.find_element_by_id(body_field_locator)
send = driver.find_element_by_id(send_button_locator)
to.send_keys('zapionator@gmail.com')
subject.send_keys('Verify sending email')
body.send_keys('Hello, this is a test from the testing script')
send.click()
# Verification
conversation_list = driver.find_elements(conversation_list_view_locator)
els = conversation_list.find_elements_by_id('android.widget.FrameLayout')
loc_1 = els[0].location
loc_2 = els[3].location
els.swipe(loc_1['x'], loc_1['y'], loc_2['x'], loc_2['y'], 800)
os.sleep(10)
conversation_list.find_elements_by_id('android.widget.FrameLayout')[0].click()
driver.quit()
|
Add demo app for appiumimport os
from appium import webdriver
# capabilities for built-in email app
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.android.email'
desired_caps['appActivity'] = 'com.android.email.activity.Welcome'
# email locator index
email_prefix = 'com.android.email:id/'
to_field_locator = email_prefix + 'to'
subject_field_locator = email_prefix + 'subject'
body_field_locator = email_prefix + 'body'
send_button_locator = email_prefix + 'send'
compose_button_locator = email_prefix + 'compose_button'
conversation_list_view_locator = email_prefix + 'conversation_list_view'
def test_send_mail():
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
compose = driver.find_element_by_id(compose_button_locator)
if compose.is_displayed():
compose.click()
to = driver.find_element_by_id(to_field_locator)
subject = driver.find_element_by_id(subject_field_locator)
body = driver.find_element_by_id(body_field_locator)
send = driver.find_element_by_id(send_button_locator)
to.send_keys('zapionator@gmail.com')
subject.send_keys('Verify sending email')
body.send_keys('Hello, this is a test from the testing script')
send.click()
# Verification
conversation_list = driver.find_elements(conversation_list_view_locator)
els = conversation_list.find_elements_by_id('android.widget.FrameLayout')
loc_1 = els[0].location
loc_2 = els[3].location
els.swipe(loc_1['x'], loc_1['y'], loc_2['x'], loc_2['y'], 800)
os.sleep(10)
conversation_list.find_elements_by_id('android.widget.FrameLayout')[0].click()
driver.quit()
|
<commit_before><commit_msg>Add demo app for appium<commit_after>import os
from appium import webdriver
# capabilities for built-in email app
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.android.email'
desired_caps['appActivity'] = 'com.android.email.activity.Welcome'
# email locator index
email_prefix = 'com.android.email:id/'
to_field_locator = email_prefix + 'to'
subject_field_locator = email_prefix + 'subject'
body_field_locator = email_prefix + 'body'
send_button_locator = email_prefix + 'send'
compose_button_locator = email_prefix + 'compose_button'
conversation_list_view_locator = email_prefix + 'conversation_list_view'
def test_send_mail():
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
compose = driver.find_element_by_id(compose_button_locator)
if compose.is_displayed():
compose.click()
to = driver.find_element_by_id(to_field_locator)
subject = driver.find_element_by_id(subject_field_locator)
body = driver.find_element_by_id(body_field_locator)
send = driver.find_element_by_id(send_button_locator)
to.send_keys('zapionator@gmail.com')
subject.send_keys('Verify sending email')
body.send_keys('Hello, this is a test from the testing script')
send.click()
# Verification
conversation_list = driver.find_elements(conversation_list_view_locator)
els = conversation_list.find_elements_by_id('android.widget.FrameLayout')
loc_1 = els[0].location
loc_2 = els[3].location
els.swipe(loc_1['x'], loc_1['y'], loc_2['x'], loc_2['y'], 800)
os.sleep(10)
conversation_list.find_elements_by_id('android.widget.FrameLayout')[0].click()
driver.quit()
|
|
89deb13c54957c10ab94c384fb58ca569fdb5cd2
|
python/dataset_converter.py
|
python/dataset_converter.py
|
# converts article and user name to article id and user id
import csv
with open('cbdata-feedback.csv', 'rb') as f:
reader = csv.reader(f)
next(reader, None) # skip header
out_file = open('cbdata-feedback-anon.csv', 'wb')
writer = csv.writer(out_file, delimiter=',')
user_id = 0
item_id = 0
user_dict = {}
item_dict = {}
# write header
writer.writerow(['# user_id','url_id', 'rating', 'timeOnePage','timeMovingMouse','timeSpentVerticalScrolling'])
for row in reader:
# check if user has an id
if not row[0] in user_dict:
user_id = user_id + 1
user_dict[row[0]] = user_id
# check if item has an id
if not row[1] in item_dict:
item_id = item_id + 1
item_dict[row[1]] = item_id
# replace username and item with ids instead
row[0] = user_dict[row[0]]
row[1] = item_dict[row[1]]
# check for empty strings
for i in range(2,len(row)):
if row[i] == '':
row[i] = 0
writer.writerow(row)
f.close()
out_file.close()
|
Convert claypool dataset to anonymous claypool dataset
|
Convert claypool dataset to anonymous claypool dataset
|
Python
|
mit
|
ntnu-smartmedia/goldfish,monsendag/goldfish,monsendag/goldfish,monsendag/goldfish,ntnu-smartmedia/goldfish,ntnu-smartmedia/goldfish
|
Convert claypool dataset to anonymous claypool dataset
|
# converts article and user name to article id and user id
import csv
with open('cbdata-feedback.csv', 'rb') as f:
reader = csv.reader(f)
next(reader, None) # skip header
out_file = open('cbdata-feedback-anon.csv', 'wb')
writer = csv.writer(out_file, delimiter=',')
user_id = 0
item_id = 0
user_dict = {}
item_dict = {}
# write header
writer.writerow(['# user_id','url_id', 'rating', 'timeOnePage','timeMovingMouse','timeSpentVerticalScrolling'])
for row in reader:
# check if user has an id
if not row[0] in user_dict:
user_id = user_id + 1
user_dict[row[0]] = user_id
# check if item has an id
if not row[1] in item_dict:
item_id = item_id + 1
item_dict[row[1]] = item_id
# replace username and item with ids instead
row[0] = user_dict[row[0]]
row[1] = item_dict[row[1]]
# check for empty strings
for i in range(2,len(row)):
if row[i] == '':
row[i] = 0
writer.writerow(row)
f.close()
out_file.close()
|
<commit_before><commit_msg>Convert claypool dataset to anonymous claypool dataset<commit_after>
|
# converts article and user name to article id and user id
import csv
with open('cbdata-feedback.csv', 'rb') as f:
reader = csv.reader(f)
next(reader, None) # skip header
out_file = open('cbdata-feedback-anon.csv', 'wb')
writer = csv.writer(out_file, delimiter=',')
user_id = 0
item_id = 0
user_dict = {}
item_dict = {}
# write header
writer.writerow(['# user_id','url_id', 'rating', 'timeOnePage','timeMovingMouse','timeSpentVerticalScrolling'])
for row in reader:
# check if user has an id
if not row[0] in user_dict:
user_id = user_id + 1
user_dict[row[0]] = user_id
# check if item has an id
if not row[1] in item_dict:
item_id = item_id + 1
item_dict[row[1]] = item_id
# replace username and item with ids instead
row[0] = user_dict[row[0]]
row[1] = item_dict[row[1]]
# check for empty strings
for i in range(2,len(row)):
if row[i] == '':
row[i] = 0
writer.writerow(row)
f.close()
out_file.close()
|
Convert claypool dataset to anonymous claypool dataset# converts article and user name to article id and user id
import csv
with open('cbdata-feedback.csv', 'rb') as f:
reader = csv.reader(f)
next(reader, None) # skip header
out_file = open('cbdata-feedback-anon.csv', 'wb')
writer = csv.writer(out_file, delimiter=',')
user_id = 0
item_id = 0
user_dict = {}
item_dict = {}
# write header
writer.writerow(['# user_id','url_id', 'rating', 'timeOnePage','timeMovingMouse','timeSpentVerticalScrolling'])
for row in reader:
# check if user has an id
if not row[0] in user_dict:
user_id = user_id + 1
user_dict[row[0]] = user_id
# check if item has an id
if not row[1] in item_dict:
item_id = item_id + 1
item_dict[row[1]] = item_id
# replace username and item with ids instead
row[0] = user_dict[row[0]]
row[1] = item_dict[row[1]]
# check for empty strings
for i in range(2,len(row)):
if row[i] == '':
row[i] = 0
writer.writerow(row)
f.close()
out_file.close()
|
<commit_before><commit_msg>Convert claypool dataset to anonymous claypool dataset<commit_after># converts article and user name to article id and user id
import csv
with open('cbdata-feedback.csv', 'rb') as f:
reader = csv.reader(f)
next(reader, None) # skip header
out_file = open('cbdata-feedback-anon.csv', 'wb')
writer = csv.writer(out_file, delimiter=',')
user_id = 0
item_id = 0
user_dict = {}
item_dict = {}
# write header
writer.writerow(['# user_id','url_id', 'rating', 'timeOnePage','timeMovingMouse','timeSpentVerticalScrolling'])
for row in reader:
# check if user has an id
if not row[0] in user_dict:
user_id = user_id + 1
user_dict[row[0]] = user_id
# check if item has an id
if not row[1] in item_dict:
item_id = item_id + 1
item_dict[row[1]] = item_id
# replace username and item with ids instead
row[0] = user_dict[row[0]]
row[1] = item_dict[row[1]]
# check for empty strings
for i in range(2,len(row)):
if row[i] == '':
row[i] = 0
writer.writerow(row)
f.close()
out_file.close()
|
|
e095f5f3dd155877d2280862905ba5bb0c01d928
|
bundle_to_yaml.py
|
bundle_to_yaml.py
|
#!/usr/bin/env python
import os
import sys
import unitypack
import yaml
from argparse import ArgumentParser
def handle_asset(asset):
for id, obj in asset.objects.items():
d = obj.read()
print(yaml.dump(d))
def asset_representer(dumper, data):
return dumper.represent_scalar("!asset", data.name)
yaml.add_representer(unitypack.Asset, asset_representer)
def objectpointer_representer(dumper, data):
return dumper.represent_mapping("!pptr", {
"file": data.file_id, "path": data.path_id
})
yaml.add_representer(unitypack.ObjectPointer, objectpointer_representer)
def textasset_representer(dumper, data):
obj = data._obj.copy()
obj["m_Script"] = "<stripped>"
return dumper.represent_mapping("!TextAsset", obj)
def texture2d_representer(dumper, data):
obj = data._obj.copy()
obj["image data"] = "<stripped>"
return dumper.represent_mapping("!Texture2D", obj)
def movietexture_representer(dumper, data):
obj = data._obj.copy()
obj["m_MovieData"] = "<stripped>"
return dumper.represent_mapping("!MovieTexture", obj)
def main():
p = ArgumentParser()
p.add_argument("files", nargs="+")
p.add_argument("-s", "--strip", action="store_true", help="Strip extractable data")
args = p.parse_args(sys.argv[1:])
if args.strip:
yaml.add_representer(unitypack.engine.movie.MovieTexture, movietexture_representer)
yaml.add_representer(unitypack.engine.text.TextAsset, textasset_representer)
yaml.add_representer(unitypack.engine.texture.Texture2D, texture2d_representer)
for file in args.files:
if file.endswith(".assets"):
with open(file, "rb") as f:
asset = unitypack.Asset.from_file(f)
handle_asset(asset)
continue
with open(file, "rb") as f:
bundle = unitypack.load(f)
for asset in bundle.assets:
handle_asset(asset)
if __name__ == "__main__":
main()
|
Add a yaml dumper for asset bundles
|
Add a yaml dumper for asset bundles
|
Python
|
mit
|
andburn/python-unitypack
|
Add a yaml dumper for asset bundles
|
#!/usr/bin/env python
import os
import sys
import unitypack
import yaml
from argparse import ArgumentParser
def handle_asset(asset):
for id, obj in asset.objects.items():
d = obj.read()
print(yaml.dump(d))
def asset_representer(dumper, data):
return dumper.represent_scalar("!asset", data.name)
yaml.add_representer(unitypack.Asset, asset_representer)
def objectpointer_representer(dumper, data):
return dumper.represent_mapping("!pptr", {
"file": data.file_id, "path": data.path_id
})
yaml.add_representer(unitypack.ObjectPointer, objectpointer_representer)
def textasset_representer(dumper, data):
obj = data._obj.copy()
obj["m_Script"] = "<stripped>"
return dumper.represent_mapping("!TextAsset", obj)
def texture2d_representer(dumper, data):
obj = data._obj.copy()
obj["image data"] = "<stripped>"
return dumper.represent_mapping("!Texture2D", obj)
def movietexture_representer(dumper, data):
obj = data._obj.copy()
obj["m_MovieData"] = "<stripped>"
return dumper.represent_mapping("!MovieTexture", obj)
def main():
p = ArgumentParser()
p.add_argument("files", nargs="+")
p.add_argument("-s", "--strip", action="store_true", help="Strip extractable data")
args = p.parse_args(sys.argv[1:])
if args.strip:
yaml.add_representer(unitypack.engine.movie.MovieTexture, movietexture_representer)
yaml.add_representer(unitypack.engine.text.TextAsset, textasset_representer)
yaml.add_representer(unitypack.engine.texture.Texture2D, texture2d_representer)
for file in args.files:
if file.endswith(".assets"):
with open(file, "rb") as f:
asset = unitypack.Asset.from_file(f)
handle_asset(asset)
continue
with open(file, "rb") as f:
bundle = unitypack.load(f)
for asset in bundle.assets:
handle_asset(asset)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a yaml dumper for asset bundles<commit_after>
|
#!/usr/bin/env python
import os
import sys
import unitypack
import yaml
from argparse import ArgumentParser
def handle_asset(asset):
for id, obj in asset.objects.items():
d = obj.read()
print(yaml.dump(d))
def asset_representer(dumper, data):
return dumper.represent_scalar("!asset", data.name)
yaml.add_representer(unitypack.Asset, asset_representer)
def objectpointer_representer(dumper, data):
return dumper.represent_mapping("!pptr", {
"file": data.file_id, "path": data.path_id
})
yaml.add_representer(unitypack.ObjectPointer, objectpointer_representer)
def textasset_representer(dumper, data):
obj = data._obj.copy()
obj["m_Script"] = "<stripped>"
return dumper.represent_mapping("!TextAsset", obj)
def texture2d_representer(dumper, data):
obj = data._obj.copy()
obj["image data"] = "<stripped>"
return dumper.represent_mapping("!Texture2D", obj)
def movietexture_representer(dumper, data):
obj = data._obj.copy()
obj["m_MovieData"] = "<stripped>"
return dumper.represent_mapping("!MovieTexture", obj)
def main():
p = ArgumentParser()
p.add_argument("files", nargs="+")
p.add_argument("-s", "--strip", action="store_true", help="Strip extractable data")
args = p.parse_args(sys.argv[1:])
if args.strip:
yaml.add_representer(unitypack.engine.movie.MovieTexture, movietexture_representer)
yaml.add_representer(unitypack.engine.text.TextAsset, textasset_representer)
yaml.add_representer(unitypack.engine.texture.Texture2D, texture2d_representer)
for file in args.files:
if file.endswith(".assets"):
with open(file, "rb") as f:
asset = unitypack.Asset.from_file(f)
handle_asset(asset)
continue
with open(file, "rb") as f:
bundle = unitypack.load(f)
for asset in bundle.assets:
handle_asset(asset)
if __name__ == "__main__":
main()
|
Add a yaml dumper for asset bundles#!/usr/bin/env python
import os
import sys
import unitypack
import yaml
from argparse import ArgumentParser
def handle_asset(asset):
for id, obj in asset.objects.items():
d = obj.read()
print(yaml.dump(d))
def asset_representer(dumper, data):
return dumper.represent_scalar("!asset", data.name)
yaml.add_representer(unitypack.Asset, asset_representer)
def objectpointer_representer(dumper, data):
return dumper.represent_mapping("!pptr", {
"file": data.file_id, "path": data.path_id
})
yaml.add_representer(unitypack.ObjectPointer, objectpointer_representer)
def textasset_representer(dumper, data):
obj = data._obj.copy()
obj["m_Script"] = "<stripped>"
return dumper.represent_mapping("!TextAsset", obj)
def texture2d_representer(dumper, data):
obj = data._obj.copy()
obj["image data"] = "<stripped>"
return dumper.represent_mapping("!Texture2D", obj)
def movietexture_representer(dumper, data):
obj = data._obj.copy()
obj["m_MovieData"] = "<stripped>"
return dumper.represent_mapping("!MovieTexture", obj)
def main():
p = ArgumentParser()
p.add_argument("files", nargs="+")
p.add_argument("-s", "--strip", action="store_true", help="Strip extractable data")
args = p.parse_args(sys.argv[1:])
if args.strip:
yaml.add_representer(unitypack.engine.movie.MovieTexture, movietexture_representer)
yaml.add_representer(unitypack.engine.text.TextAsset, textasset_representer)
yaml.add_representer(unitypack.engine.texture.Texture2D, texture2d_representer)
for file in args.files:
if file.endswith(".assets"):
with open(file, "rb") as f:
asset = unitypack.Asset.from_file(f)
handle_asset(asset)
continue
with open(file, "rb") as f:
bundle = unitypack.load(f)
for asset in bundle.assets:
handle_asset(asset)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a yaml dumper for asset bundles<commit_after>#!/usr/bin/env python
import os
import sys
import unitypack
import yaml
from argparse import ArgumentParser
def handle_asset(asset):
for id, obj in asset.objects.items():
d = obj.read()
print(yaml.dump(d))
def asset_representer(dumper, data):
return dumper.represent_scalar("!asset", data.name)
yaml.add_representer(unitypack.Asset, asset_representer)
def objectpointer_representer(dumper, data):
return dumper.represent_mapping("!pptr", {
"file": data.file_id, "path": data.path_id
})
yaml.add_representer(unitypack.ObjectPointer, objectpointer_representer)
def textasset_representer(dumper, data):
obj = data._obj.copy()
obj["m_Script"] = "<stripped>"
return dumper.represent_mapping("!TextAsset", obj)
def texture2d_representer(dumper, data):
obj = data._obj.copy()
obj["image data"] = "<stripped>"
return dumper.represent_mapping("!Texture2D", obj)
def movietexture_representer(dumper, data):
obj = data._obj.copy()
obj["m_MovieData"] = "<stripped>"
return dumper.represent_mapping("!MovieTexture", obj)
def main():
p = ArgumentParser()
p.add_argument("files", nargs="+")
p.add_argument("-s", "--strip", action="store_true", help="Strip extractable data")
args = p.parse_args(sys.argv[1:])
if args.strip:
yaml.add_representer(unitypack.engine.movie.MovieTexture, movietexture_representer)
yaml.add_representer(unitypack.engine.text.TextAsset, textasset_representer)
yaml.add_representer(unitypack.engine.texture.Texture2D, texture2d_representer)
for file in args.files:
if file.endswith(".assets"):
with open(file, "rb") as f:
asset = unitypack.Asset.from_file(f)
handle_asset(asset)
continue
with open(file, "rb") as f:
bundle = unitypack.load(f)
for asset in bundle.assets:
handle_asset(asset)
if __name__ == "__main__":
main()
|
|
55fc4153ece6e47dc799fd447356e54434475c77
|
scripts/officediff/pptx-dump.py
|
scripts/officediff/pptx-dump.py
|
import sys
from pptx import Presentation
for slide in Presentation(sys.argv[1]).slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
print(run.text)
|
Add Python script for Excel file diffs
|
Add Python script for Excel file diffs
|
Python
|
mit
|
Stratus3D/dotfiles,Stratus3D/dotfiles,Stratus3D/dotfiles
|
Add Python script for Excel file diffs
|
import sys
from pptx import Presentation
for slide in Presentation(sys.argv[1]).slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
print(run.text)
|
<commit_before><commit_msg>Add Python script for Excel file diffs<commit_after>
|
import sys
from pptx import Presentation
for slide in Presentation(sys.argv[1]).slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
print(run.text)
|
Add Python script for Excel file diffsimport sys
from pptx import Presentation
for slide in Presentation(sys.argv[1]).slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
print(run.text)
|
<commit_before><commit_msg>Add Python script for Excel file diffs<commit_after>import sys
from pptx import Presentation
for slide in Presentation(sys.argv[1]).slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
print(run.text)
|
|
6425c20b536e8952b062ccb8b470ea615ebc0fa2
|
conman/routes/migrations/0002_simplify_route_slug_help_text.py
|
conman/routes/migrations/0002_simplify_route_slug_help_text.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='slug',
field=models.SlugField(max_length=255, help_text='The url fragment at this point in the Route hierarchy.', default=''),
),
]
|
Add missing migration to routes app
|
Add missing migration to routes app
|
Python
|
bsd-2-clause
|
meshy/django-conman,Ian-Foote/django-conman,meshy/django-conman
|
Add missing migration to routes app
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='slug',
field=models.SlugField(max_length=255, help_text='The url fragment at this point in the Route hierarchy.', default=''),
),
]
|
<commit_before><commit_msg>Add missing migration to routes app<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='slug',
field=models.SlugField(max_length=255, help_text='The url fragment at this point in the Route hierarchy.', default=''),
),
]
|
Add missing migration to routes app# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='slug',
field=models.SlugField(max_length=255, help_text='The url fragment at this point in the Route hierarchy.', default=''),
),
]
|
<commit_before><commit_msg>Add missing migration to routes app<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='slug',
field=models.SlugField(max_length=255, help_text='The url fragment at this point in the Route hierarchy.', default=''),
),
]
|
|
678672d72accb773a3080394d4fd6936459c9f11
|
bio/subs_matrix.py
|
bio/subs_matrix.py
|
from __future__ import division
def load_subs_matrix(matrix_name):
import Bio.SubsMat.MatrixInfo as matrix_info
# Try to get the requested substitution matrix from biopython, and
# complain if no such matrix exists.
try:
half_matrix = getattr(matrix_info, matrix_name)
except AttributeError:
raise ValueError("No such substitution matrix '{}'.".format(matrix_name))
# For convenience, make sure the matrix is square (i.e. symmetrical).
full_matrix = {}
for i, j in half_matrix:
full_matrix[i, j] = half_matrix[i, j]
full_matrix[j, i] = half_matrix[i, j]
return full_matrix
def score_gap_free_alignment(seq_1, seq_2, subs_matrix):
if len(seq_1) != len(seq_2):
raise ValueError("sequence lengths don't match")
if '-' in seq_1 or '-' in seq_2:
raise ValueError("sequences with gaps are not supported.")
score = 0
max_score = 0
for aa_1, aa_2 in zip(seq_1, seq_2):
score += subs_matrix[aa_1, aa_2]
max_score += max(subs_matrix[aa_1, aa_1], subs_matrix[aa_2, aa_2])
# Convert the substitution matrix score into a normalized distance.
return 1 - max(score, 0) / max_score
|
Add a module for working with PAM, BLOSUM, etc.
|
Add a module for working with PAM, BLOSUM, etc.
|
Python
|
mit
|
Kortemme-Lab/klab,Kortemme-Lab/klab,Kortemme-Lab/klab,Kortemme-Lab/klab
|
Add a module for working with PAM, BLOSUM, etc.
|
from __future__ import division
def load_subs_matrix(matrix_name):
import Bio.SubsMat.MatrixInfo as matrix_info
# Try to get the requested substitution matrix from biopython, and
# complain if no such matrix exists.
try:
half_matrix = getattr(matrix_info, matrix_name)
except AttributeError:
raise ValueError("No such substitution matrix '{}'.".format(matrix_name))
# For convenience, make sure the matrix is square (i.e. symmetrical).
full_matrix = {}
for i, j in half_matrix:
full_matrix[i, j] = half_matrix[i, j]
full_matrix[j, i] = half_matrix[i, j]
return full_matrix
def score_gap_free_alignment(seq_1, seq_2, subs_matrix):
if len(seq_1) != len(seq_2):
raise ValueError("sequence lengths don't match")
if '-' in seq_1 or '-' in seq_2:
raise ValueError("sequences with gaps are not supported.")
score = 0
max_score = 0
for aa_1, aa_2 in zip(seq_1, seq_2):
score += subs_matrix[aa_1, aa_2]
max_score += max(subs_matrix[aa_1, aa_1], subs_matrix[aa_2, aa_2])
# Convert the substitution matrix score into a normalized distance.
return 1 - max(score, 0) / max_score
|
<commit_before><commit_msg>Add a module for working with PAM, BLOSUM, etc.<commit_after>
|
from __future__ import division
def load_subs_matrix(matrix_name):
import Bio.SubsMat.MatrixInfo as matrix_info
# Try to get the requested substitution matrix from biopython, and
# complain if no such matrix exists.
try:
half_matrix = getattr(matrix_info, matrix_name)
except AttributeError:
raise ValueError("No such substitution matrix '{}'.".format(matrix_name))
# For convenience, make sure the matrix is square (i.e. symmetrical).
full_matrix = {}
for i, j in half_matrix:
full_matrix[i, j] = half_matrix[i, j]
full_matrix[j, i] = half_matrix[i, j]
return full_matrix
def score_gap_free_alignment(seq_1, seq_2, subs_matrix):
if len(seq_1) != len(seq_2):
raise ValueError("sequence lengths don't match")
if '-' in seq_1 or '-' in seq_2:
raise ValueError("sequences with gaps are not supported.")
score = 0
max_score = 0
for aa_1, aa_2 in zip(seq_1, seq_2):
score += subs_matrix[aa_1, aa_2]
max_score += max(subs_matrix[aa_1, aa_1], subs_matrix[aa_2, aa_2])
# Convert the substitution matrix score into a normalized distance.
return 1 - max(score, 0) / max_score
|
Add a module for working with PAM, BLOSUM, etc.from __future__ import division
def load_subs_matrix(matrix_name):
import Bio.SubsMat.MatrixInfo as matrix_info
# Try to get the requested substitution matrix from biopython, and
# complain if no such matrix exists.
try:
half_matrix = getattr(matrix_info, matrix_name)
except AttributeError:
raise ValueError("No such substitution matrix '{}'.".format(matrix_name))
# For convenience, make sure the matrix is square (i.e. symmetrical).
full_matrix = {}
for i, j in half_matrix:
full_matrix[i, j] = half_matrix[i, j]
full_matrix[j, i] = half_matrix[i, j]
return full_matrix
def score_gap_free_alignment(seq_1, seq_2, subs_matrix):
if len(seq_1) != len(seq_2):
raise ValueError("sequence lengths don't match")
if '-' in seq_1 or '-' in seq_2:
raise ValueError("sequences with gaps are not supported.")
score = 0
max_score = 0
for aa_1, aa_2 in zip(seq_1, seq_2):
score += subs_matrix[aa_1, aa_2]
max_score += max(subs_matrix[aa_1, aa_1], subs_matrix[aa_2, aa_2])
# Convert the substitution matrix score into a normalized distance.
return 1 - max(score, 0) / max_score
|
<commit_before><commit_msg>Add a module for working with PAM, BLOSUM, etc.<commit_after>from __future__ import division
def load_subs_matrix(matrix_name):
import Bio.SubsMat.MatrixInfo as matrix_info
# Try to get the requested substitution matrix from biopython, and
# complain if no such matrix exists.
try:
half_matrix = getattr(matrix_info, matrix_name)
except AttributeError:
raise ValueError("No such substitution matrix '{}'.".format(matrix_name))
# For convenience, make sure the matrix is square (i.e. symmetrical).
full_matrix = {}
for i, j in half_matrix:
full_matrix[i, j] = half_matrix[i, j]
full_matrix[j, i] = half_matrix[i, j]
return full_matrix
def score_gap_free_alignment(seq_1, seq_2, subs_matrix):
if len(seq_1) != len(seq_2):
raise ValueError("sequence lengths don't match")
if '-' in seq_1 or '-' in seq_2:
raise ValueError("sequences with gaps are not supported.")
score = 0
max_score = 0
for aa_1, aa_2 in zip(seq_1, seq_2):
score += subs_matrix[aa_1, aa_2]
max_score += max(subs_matrix[aa_1, aa_1], subs_matrix[aa_2, aa_2])
# Convert the substitution matrix score into a normalized distance.
return 1 - max(score, 0) / max_score
|
|
764e0b742351c07dda5657fb2dc46f45dce4a3ef
|
migrations/versions/86b41c3dbd00_add_indexes_on_driver_for_licence_and_.py
|
migrations/versions/86b41c3dbd00_add_indexes_on_driver_for_licence_and_.py
|
"""Add indexes on driver for licence and departement
Revision ID: 86b41c3dbd00
Revises: ccd5b0142a76
Create Date: 2019-10-21 15:55:35.965422
"""
# revision identifiers, used by Alembic.
revision = '86b41c3dbd00'
down_revision = 'ccd5b0142a76'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False)
op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False)
def downgrade():
op.drop_index('driver_professional_licence_idx', table_name='driver')
op.drop_index('driver_departement_id_idx', table_name='driver')
|
Add migration to create index on driver for departement and licence
|
Add migration to create index on driver for departement and licence
|
Python
|
agpl-3.0
|
openmaraude/APITaxi,openmaraude/APITaxi
|
Add migration to create index on driver for departement and licence
|
"""Add indexes on driver for licence and departement
Revision ID: 86b41c3dbd00
Revises: ccd5b0142a76
Create Date: 2019-10-21 15:55:35.965422
"""
# revision identifiers, used by Alembic.
revision = '86b41c3dbd00'
down_revision = 'ccd5b0142a76'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False)
op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False)
def downgrade():
op.drop_index('driver_professional_licence_idx', table_name='driver')
op.drop_index('driver_departement_id_idx', table_name='driver')
|
<commit_before><commit_msg>Add migration to create index on driver for departement and licence<commit_after>
|
"""Add indexes on driver for licence and departement
Revision ID: 86b41c3dbd00
Revises: ccd5b0142a76
Create Date: 2019-10-21 15:55:35.965422
"""
# revision identifiers, used by Alembic.
revision = '86b41c3dbd00'
down_revision = 'ccd5b0142a76'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False)
op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False)
def downgrade():
op.drop_index('driver_professional_licence_idx', table_name='driver')
op.drop_index('driver_departement_id_idx', table_name='driver')
|
Add migration to create index on driver for departement and licence"""Add indexes on driver for licence and departement
Revision ID: 86b41c3dbd00
Revises: ccd5b0142a76
Create Date: 2019-10-21 15:55:35.965422
"""
# revision identifiers, used by Alembic.
revision = '86b41c3dbd00'
down_revision = 'ccd5b0142a76'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False)
op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False)
def downgrade():
op.drop_index('driver_professional_licence_idx', table_name='driver')
op.drop_index('driver_departement_id_idx', table_name='driver')
|
<commit_before><commit_msg>Add migration to create index on driver for departement and licence<commit_after>"""Add indexes on driver for licence and departement
Revision ID: 86b41c3dbd00
Revises: ccd5b0142a76
Create Date: 2019-10-21 15:55:35.965422
"""
# revision identifiers, used by Alembic.
revision = '86b41c3dbd00'
down_revision = 'ccd5b0142a76'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False)
op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False)
def downgrade():
op.drop_index('driver_professional_licence_idx', table_name='driver')
op.drop_index('driver_departement_id_idx', table_name='driver')
|
|
4a382bcf4ab0022c7cf24d3e1ef187dd9a5d388b
|
test/test_sclopf_scigrid.py
|
test/test_sclopf_scigrid.py
|
from __future__ import print_function, division
from __future__ import absolute_import
import pypsa
import numpy as np
def test_sclopf():
csv_folder_name = "../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#test results were generated with GLPK and other solvers may differ
solver_name = "glpk"
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
network.now = network.snapshots[0]
#choose the contingencies
branch_outages = network.lines.index[:3]
print("Performing security-constrained linear OPF:")
network.sclopf(branch_outages=branch_outages)
#For the PF, set the P to the optimised P
network.generators_t.p_set.loc[network.now] = network.generators_t.p.loc[network.now]
network.storage_units_t.p_set.loc[network.now] = network.storage_units_t.p.loc[network.now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(branch_outages=branch_outages)
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
if __name__ == "__main__":
test_sclopf()
|
Include unit test of Security-Constrained LOPF
|
Include unit test of Security-Constrained LOPF
|
Python
|
mit
|
PyPSA/PyPSA
|
Include unit test of Security-Constrained LOPF
|
from __future__ import print_function, division
from __future__ import absolute_import
import pypsa
import numpy as np
def test_sclopf():
csv_folder_name = "../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#test results were generated with GLPK and other solvers may differ
solver_name = "glpk"
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
network.now = network.snapshots[0]
#choose the contingencies
branch_outages = network.lines.index[:3]
print("Performing security-constrained linear OPF:")
network.sclopf(branch_outages=branch_outages)
#For the PF, set the P to the optimised P
network.generators_t.p_set.loc[network.now] = network.generators_t.p.loc[network.now]
network.storage_units_t.p_set.loc[network.now] = network.storage_units_t.p.loc[network.now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(branch_outages=branch_outages)
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
if __name__ == "__main__":
test_sclopf()
|
<commit_before><commit_msg>Include unit test of Security-Constrained LOPF<commit_after>
|
from __future__ import print_function, division
from __future__ import absolute_import
import pypsa
import numpy as np
def test_sclopf():
csv_folder_name = "../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#test results were generated with GLPK and other solvers may differ
solver_name = "glpk"
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
network.now = network.snapshots[0]
#choose the contingencies
branch_outages = network.lines.index[:3]
print("Performing security-constrained linear OPF:")
network.sclopf(branch_outages=branch_outages)
#For the PF, set the P to the optimised P
network.generators_t.p_set.loc[network.now] = network.generators_t.p.loc[network.now]
network.storage_units_t.p_set.loc[network.now] = network.storage_units_t.p.loc[network.now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(branch_outages=branch_outages)
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
if __name__ == "__main__":
test_sclopf()
|
Include unit test of Security-Constrained LOPFfrom __future__ import print_function, division
from __future__ import absolute_import
import pypsa
import numpy as np
def test_sclopf():
csv_folder_name = "../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#test results were generated with GLPK and other solvers may differ
solver_name = "glpk"
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
network.now = network.snapshots[0]
#choose the contingencies
branch_outages = network.lines.index[:3]
print("Performing security-constrained linear OPF:")
network.sclopf(branch_outages=branch_outages)
#For the PF, set the P to the optimised P
network.generators_t.p_set.loc[network.now] = network.generators_t.p.loc[network.now]
network.storage_units_t.p_set.loc[network.now] = network.storage_units_t.p.loc[network.now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(branch_outages=branch_outages)
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
if __name__ == "__main__":
test_sclopf()
|
<commit_before><commit_msg>Include unit test of Security-Constrained LOPF<commit_after>from __future__ import print_function, division
from __future__ import absolute_import
import pypsa
import numpy as np
def test_sclopf():
csv_folder_name = "../examples/scigrid-de/scigrid-with-load-gen-trafos/"
network = pypsa.Network(csv_folder_name=csv_folder_name)
#test results were generated with GLPK and other solvers may differ
solver_name = "glpk"
#There are some infeasibilities without line extensions
for line_name in ["316","527","602"]:
network.lines.loc[line_name,"s_nom"] = 1200
network.now = network.snapshots[0]
#choose the contingencies
branch_outages = network.lines.index[:3]
print("Performing security-constrained linear OPF:")
network.sclopf(branch_outages=branch_outages)
#For the PF, set the P to the optimised P
network.generators_t.p_set.loc[network.now] = network.generators_t.p.loc[network.now]
network.storage_units_t.p_set.loc[network.now] = network.storage_units_t.p.loc[network.now]
#Check no lines are overloaded with the linear contingency analysis
p0_test = network.lpf_contingency(branch_outages=branch_outages)
#check loading as per unit of s_nom in each contingency
max_loading = abs(p0_test.divide(network.passive_branches().s_nom,axis=0)).describe().loc["max"]
np.testing.assert_array_almost_equal(max_loading,np.ones((len(max_loading))))
if __name__ == "__main__":
test_sclopf()
|
|
f0d9944ccb7838bf438f6e4ff36ddd69941830f6
|
scripts/kmeans_generator.py
|
scripts/kmeans_generator.py
|
import random
D = 5
K = 5
N = 10
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
|
Add simple script to generate K-Means points.
|
Add simple script to generate K-Means points.
The data generated by this script is _very_ nice.
|
Python
|
apache-2.0
|
yliu120/K3,DaMSL/K3,DaMSL/K3
|
Add simple script to generate K-Means points.
The data generated by this script is _very_ nice.
|
import random
D = 5
K = 5
N = 10
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
|
<commit_before><commit_msg>Add simple script to generate K-Means points.
The data generated by this script is _very_ nice.<commit_after>
|
import random
D = 5
K = 5
N = 10
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
|
Add simple script to generate K-Means points.
The data generated by this script is _very_ nice.import random
D = 5
K = 5
N = 10
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
|
<commit_before><commit_msg>Add simple script to generate K-Means points.
The data generated by this script is _very_ nice.<commit_after>import random
D = 5
K = 5
N = 10
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
|
|
38fff5dec5a39fab6ab67c92854c9e2843cb49fc
|
syslog-logger.py
|
syslog-logger.py
|
import logging
import logging.handlers
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def hello():
log.debug('this is debug')
log.critical('this is critical')
if __name__ == '__main__':
hello()
|
Add script logging to syslog example
|
Add script logging to syslog example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add script logging to syslog example
|
import logging
import logging.handlers
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def hello():
log.debug('this is debug')
log.critical('this is critical')
if __name__ == '__main__':
hello()
|
<commit_before><commit_msg>Add script logging to syslog example<commit_after>
|
import logging
import logging.handlers
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def hello():
log.debug('this is debug')
log.critical('this is critical')
if __name__ == '__main__':
hello()
|
Add script logging to syslog exampleimport logging
import logging.handlers
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def hello():
log.debug('this is debug')
log.critical('this is critical')
if __name__ == '__main__':
hello()
|
<commit_before><commit_msg>Add script logging to syslog example<commit_after>import logging
import logging.handlers
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('%(module)s.%(funcName)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
def hello():
log.debug('this is debug')
log.critical('this is critical')
if __name__ == '__main__':
hello()
|
|
2e7866be517bb66907b4bba85f0a45c5310c0ddc
|
mini-project.py
|
mini-project.py
|
# Load the machine
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
# Other stuff
from cothread.catools import caget, caput, ca_nothing
# Load the machine
ap.machines.load('SRI21')
# First task
BPMS = ap.getElements('BPM')
print('There are {} BPM elements in the machine.'.format(len(BPMS)))
# Second task
print('A list of all the PV names for all BPMS')
for BPM in range(len(BPMS)):
print(BPMS[BPM].pv())
print caget(BPMS[BPM].pv())
# Third task
QUADS = ap.getElements('QUAD')
print('String values for the setpoint currents')
for QUAD in range(len(QUADS)):
print ('Readback value = {}'.format
(caget(QUADS[QUAD].pv(handle='readback'))))
print ('Setpoint value = {}'.format
(caget(QUADS[QUAD].pv(handle='setpoint'))))
|
Print values of both setpoint and readback currents
|
Print values of both setpoint and readback currents
|
Python
|
apache-2.0
|
razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects,razvanvasile/Work-Mini-Projects
|
Print values of both setpoint and readback currents
|
# Load the machine
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
# Other stuff
from cothread.catools import caget, caput, ca_nothing
# Load the machine
ap.machines.load('SRI21')
# First task
BPMS = ap.getElements('BPM')
print('There are {} BPM elements in the machine.'.format(len(BPMS)))
# Second task
print('A list of all the PV names for all BPMS')
for BPM in range(len(BPMS)):
print(BPMS[BPM].pv())
print caget(BPMS[BPM].pv())
# Third task
QUADS = ap.getElements('QUAD')
print('String values for the setpoint currents')
for QUAD in range(len(QUADS)):
print ('Readback value = {}'.format
(caget(QUADS[QUAD].pv(handle='readback'))))
print ('Setpoint value = {}'.format
(caget(QUADS[QUAD].pv(handle='setpoint'))))
|
<commit_before><commit_msg>Print values of both setpoint and readback currents<commit_after>
|
# Load the machine
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
# Other stuff
from cothread.catools import caget, caput, ca_nothing
# Load the machine
ap.machines.load('SRI21')
# First task
BPMS = ap.getElements('BPM')
print('There are {} BPM elements in the machine.'.format(len(BPMS)))
# Second task
print('A list of all the PV names for all BPMS')
for BPM in range(len(BPMS)):
print(BPMS[BPM].pv())
print caget(BPMS[BPM].pv())
# Third task
QUADS = ap.getElements('QUAD')
print('String values for the setpoint currents')
for QUAD in range(len(QUADS)):
print ('Readback value = {}'.format
(caget(QUADS[QUAD].pv(handle='readback'))))
print ('Setpoint value = {}'.format
(caget(QUADS[QUAD].pv(handle='setpoint'))))
|
Print values of both setpoint and readback currents# Load the machine
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
# Other stuff
from cothread.catools import caget, caput, ca_nothing
# Load the machine
ap.machines.load('SRI21')
# First task
BPMS = ap.getElements('BPM')
print('There are {} BPM elements in the machine.'.format(len(BPMS)))
# Second task
print('A list of all the PV names for all BPMS')
for BPM in range(len(BPMS)):
print(BPMS[BPM].pv())
print caget(BPMS[BPM].pv())
# Third task
QUADS = ap.getElements('QUAD')
print('String values for the setpoint currents')
for QUAD in range(len(QUADS)):
print ('Readback value = {}'.format
(caget(QUADS[QUAD].pv(handle='readback'))))
print ('Setpoint value = {}'.format
(caget(QUADS[QUAD].pv(handle='setpoint'))))
|
<commit_before><commit_msg>Print values of both setpoint and readback currents<commit_after># Load the machine
import pkg_resources
pkg_resources.require('aphla')
import aphla as ap
# Other stuff
from cothread.catools import caget, caput, ca_nothing
# Load the machine
ap.machines.load('SRI21')
# First task
BPMS = ap.getElements('BPM')
print('There are {} BPM elements in the machine.'.format(len(BPMS)))
# Second task
print('A list of all the PV names for all BPMS')
for BPM in range(len(BPMS)):
print(BPMS[BPM].pv())
print caget(BPMS[BPM].pv())
# Third task
QUADS = ap.getElements('QUAD')
print('String values for the setpoint currents')
for QUAD in range(len(QUADS)):
print ('Readback value = {}'.format
(caget(QUADS[QUAD].pv(handle='readback'))))
print ('Setpoint value = {}'.format
(caget(QUADS[QUAD].pv(handle='setpoint'))))
|
|
e7f0198684faf5c38d78a6ced7f0ff765f1ec17e
|
language/visualize.py
|
language/visualize.py
|
#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import subprocess, sys
def process(graph, output_filename):
proc = subprocess.Popen(['dot', '-Kdot', '-Tpdf', '-o', output_filename], stdin = subprocess.PIPE)
proc.communicate(graph)
if proc.returncode != 0:
assert False
def driver(prefix, suffix):
run = []
acc = False
num = 1
for line in sys.stdin.read().split('\n'):
if line == 'digraph {':
acc = True
if acc:
run.append(line)
if acc and line == '}':
acc = False
process('\n'.join(run), '%s%s%s' % (prefix, num, suffix))
run = []
num += 1
if __name__ == '__main__':
driver('test_', '.pdf')
|
Add visualization script for RDIR flow graphs.
|
regent: Add visualization script for RDIR flow graphs.
|
Python
|
apache-2.0
|
StanfordLegion/legion,chuckatkins/legion,sdalton1/legion,StanfordLegion/legion,sdalton1/legion,StanfordLegion/legion,sdalton1/legion,StanfordLegion/legion,chuckatkins/legion,sdalton1/legion,StanfordLegion/legion,sdalton1/legion,chuckatkins/legion,chuckatkins/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,chuckatkins/legion,chuckatkins/legion,chuckatkins/legion,sdalton1/legion
|
regent: Add visualization script for RDIR flow graphs.
|
#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import subprocess, sys
def process(graph, output_filename):
proc = subprocess.Popen(['dot', '-Kdot', '-Tpdf', '-o', output_filename], stdin = subprocess.PIPE)
proc.communicate(graph)
if proc.returncode != 0:
assert False
def driver(prefix, suffix):
run = []
acc = False
num = 1
for line in sys.stdin.read().split('\n'):
if line == 'digraph {':
acc = True
if acc:
run.append(line)
if acc and line == '}':
acc = False
process('\n'.join(run), '%s%s%s' % (prefix, num, suffix))
run = []
num += 1
if __name__ == '__main__':
driver('test_', '.pdf')
|
<commit_before><commit_msg>regent: Add visualization script for RDIR flow graphs.<commit_after>
|
#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import subprocess, sys
def process(graph, output_filename):
proc = subprocess.Popen(['dot', '-Kdot', '-Tpdf', '-o', output_filename], stdin = subprocess.PIPE)
proc.communicate(graph)
if proc.returncode != 0:
assert False
def driver(prefix, suffix):
run = []
acc = False
num = 1
for line in sys.stdin.read().split('\n'):
if line == 'digraph {':
acc = True
if acc:
run.append(line)
if acc and line == '}':
acc = False
process('\n'.join(run), '%s%s%s' % (prefix, num, suffix))
run = []
num += 1
if __name__ == '__main__':
driver('test_', '.pdf')
|
regent: Add visualization script for RDIR flow graphs.#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import subprocess, sys
def process(graph, output_filename):
proc = subprocess.Popen(['dot', '-Kdot', '-Tpdf', '-o', output_filename], stdin = subprocess.PIPE)
proc.communicate(graph)
if proc.returncode != 0:
assert False
def driver(prefix, suffix):
run = []
acc = False
num = 1
for line in sys.stdin.read().split('\n'):
if line == 'digraph {':
acc = True
if acc:
run.append(line)
if acc and line == '}':
acc = False
process('\n'.join(run), '%s%s%s' % (prefix, num, suffix))
run = []
num += 1
if __name__ == '__main__':
driver('test_', '.pdf')
|
<commit_before><commit_msg>regent: Add visualization script for RDIR flow graphs.<commit_after>#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import subprocess, sys
def process(graph, output_filename):
proc = subprocess.Popen(['dot', '-Kdot', '-Tpdf', '-o', output_filename], stdin = subprocess.PIPE)
proc.communicate(graph)
if proc.returncode != 0:
assert False
def driver(prefix, suffix):
run = []
acc = False
num = 1
for line in sys.stdin.read().split('\n'):
if line == 'digraph {':
acc = True
if acc:
run.append(line)
if acc and line == '}':
acc = False
process('\n'.join(run), '%s%s%s' % (prefix, num, suffix))
run = []
num += 1
if __name__ == '__main__':
driver('test_', '.pdf')
|
|
a72d75648c32e1c221c42a6024d92a92fe9e82ec
|
orchestra/migrations/0027_create_time_entries_for_snapshots.py
|
orchestra/migrations/0027_create_time_entries_for_snapshots.py
|
# -*- coding: utf-8 -*-
# Manually written
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import dateutil
def create_time_entries(apps, schema_editor):
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
TimeEntry = apps.get_model('orchestra', 'TimeEntry')
for assignment in TaskAssignment.objects.all():
# Check if keys exist before processing, to be compatible with previous
# versions of snapshots.
if 'snapshots' not in assignment.snapshots:
continue
for snapshot in assignment.snapshots['snapshots']:
if 'datetime' not in snapshot or 'work_time_seconds' not in snapshot:
continue
date = dateutil.parser.parse(snapshot['datetime']).date()
time_worked = datetime.timedelta(seconds=snapshot['work_time_seconds'])
TimeEntry.objects.get_or_create(assignment=assignment, date=date,
time_worked=time_worked)
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0026_timeentry'),
]
operations = [
migrations.RunPython(create_time_entries), # manually-reviewed
]
|
Migrate task assignment snapshots to TimeEntry
|
Migrate task assignment snapshots to TimeEntry
|
Python
|
apache-2.0
|
unlimitedlabs/orchestra,b12io/orchestra,b12io/orchestra,b12io/orchestra,unlimitedlabs/orchestra,b12io/orchestra,b12io/orchestra,unlimitedlabs/orchestra
|
Migrate task assignment snapshots to TimeEntry
|
# -*- coding: utf-8 -*-
# Manually written
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import dateutil
def create_time_entries(apps, schema_editor):
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
TimeEntry = apps.get_model('orchestra', 'TimeEntry')
for assignment in TaskAssignment.objects.all():
# Check if keys exist before processing, to be compatible with previous
# versions of snapshots.
if 'snapshots' not in assignment.snapshots:
continue
for snapshot in assignment.snapshots['snapshots']:
if 'datetime' not in snapshot or 'work_time_seconds' not in snapshot:
continue
date = dateutil.parser.parse(snapshot['datetime']).date()
time_worked = datetime.timedelta(seconds=snapshot['work_time_seconds'])
TimeEntry.objects.get_or_create(assignment=assignment, date=date,
time_worked=time_worked)
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0026_timeentry'),
]
operations = [
migrations.RunPython(create_time_entries), # manually-reviewed
]
|
<commit_before><commit_msg>Migrate task assignment snapshots to TimeEntry<commit_after>
|
# -*- coding: utf-8 -*-
# Manually written
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import dateutil
def create_time_entries(apps, schema_editor):
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
TimeEntry = apps.get_model('orchestra', 'TimeEntry')
for assignment in TaskAssignment.objects.all():
# Check if keys exist before processing, to be compatible with previous
# versions of snapshots.
if 'snapshots' not in assignment.snapshots:
continue
for snapshot in assignment.snapshots['snapshots']:
if 'datetime' not in snapshot or 'work_time_seconds' not in snapshot:
continue
date = dateutil.parser.parse(snapshot['datetime']).date()
time_worked = datetime.timedelta(seconds=snapshot['work_time_seconds'])
TimeEntry.objects.get_or_create(assignment=assignment, date=date,
time_worked=time_worked)
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0026_timeentry'),
]
operations = [
migrations.RunPython(create_time_entries), # manually-reviewed
]
|
Migrate task assignment snapshots to TimeEntry# -*- coding: utf-8 -*-
# Manually written
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import dateutil
def create_time_entries(apps, schema_editor):
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
TimeEntry = apps.get_model('orchestra', 'TimeEntry')
for assignment in TaskAssignment.objects.all():
# Check if keys exist before processing, to be compatible with previous
# versions of snapshots.
if 'snapshots' not in assignment.snapshots:
continue
for snapshot in assignment.snapshots['snapshots']:
if 'datetime' not in snapshot or 'work_time_seconds' not in snapshot:
continue
date = dateutil.parser.parse(snapshot['datetime']).date()
time_worked = datetime.timedelta(seconds=snapshot['work_time_seconds'])
TimeEntry.objects.get_or_create(assignment=assignment, date=date,
time_worked=time_worked)
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0026_timeentry'),
]
operations = [
migrations.RunPython(create_time_entries), # manually-reviewed
]
|
<commit_before><commit_msg>Migrate task assignment snapshots to TimeEntry<commit_after># -*- coding: utf-8 -*-
# Manually written
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
import dateutil
def create_time_entries(apps, schema_editor):
TaskAssignment = apps.get_model('orchestra', 'TaskAssignment')
TimeEntry = apps.get_model('orchestra', 'TimeEntry')
for assignment in TaskAssignment.objects.all():
# Check if keys exist before processing, to be compatible with previous
# versions of snapshots.
if 'snapshots' not in assignment.snapshots:
continue
for snapshot in assignment.snapshots['snapshots']:
if 'datetime' not in snapshot or 'work_time_seconds' not in snapshot:
continue
date = dateutil.parser.parse(snapshot['datetime']).date()
time_worked = datetime.timedelta(seconds=snapshot['work_time_seconds'])
TimeEntry.objects.get_or_create(assignment=assignment, date=date,
time_worked=time_worked)
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0026_timeentry'),
]
operations = [
migrations.RunPython(create_time_entries), # manually-reviewed
]
|
|
2f8e0d6c70160f646f374ef2431d819c3af6e5f3
|
orcalib/autoscaling.py
|
orcalib/autoscaling.py
|
import boto3
from aws_config import AwsConfig
from aws_config import OrcaConfig
class AwsAppAutoscaling(object):
'''
The class provides a simpler abstraction to the AWS boto3
Autoscaling client interface
'''
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def list_scaling_policies(self, service, profile_names=None, regions=None):
scaling_policies = list()
for profile in self.clients.keys():
if profile_names is not None and profile not in profile_names:
continue
for region in self.regions:
if regions is not None and region not in regions:
continue
client = self.clients[profile][region]
policies = client.describe_scaling_policies(
ServiceNamespace=service)
for policy in policies['ScalingPolicies']:
policy['region'] = region
policy['profile_name'] = profile
scaling_policies.append(policy)
return scaling_policies
|
Add `list_scaling_policies` for App Autoscaling service
|
Add `list_scaling_policies` for App Autoscaling service
Note: current AWS orca user has no permissions to list scaling policies.
|
Python
|
apache-2.0
|
bdastur/orca,bdastur/orca
|
Add `list_scaling_policies` for App Autoscaling service
Note: current AWS orca user has no permissions to list scaling policies.
|
import boto3
from aws_config import AwsConfig
from aws_config import OrcaConfig
class AwsAppAutoscaling(object):
'''
The class provides a simpler abstraction to the AWS boto3
Autoscaling client interface
'''
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def list_scaling_policies(self, service, profile_names=None, regions=None):
scaling_policies = list()
for profile in self.clients.keys():
if profile_names is not None and profile not in profile_names:
continue
for region in self.regions:
if regions is not None and region not in regions:
continue
client = self.clients[profile][region]
policies = client.describe_scaling_policies(
ServiceNamespace=service)
for policy in policies['ScalingPolicies']:
policy['region'] = region
policy['profile_name'] = profile
scaling_policies.append(policy)
return scaling_policies
|
<commit_before><commit_msg>Add `list_scaling_policies` for App Autoscaling service
Note: current AWS orca user has no permissions to list scaling policies.<commit_after>
|
import boto3
from aws_config import AwsConfig
from aws_config import OrcaConfig
class AwsAppAutoscaling(object):
'''
The class provides a simpler abstraction to the AWS boto3
Autoscaling client interface
'''
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def list_scaling_policies(self, service, profile_names=None, regions=None):
scaling_policies = list()
for profile in self.clients.keys():
if profile_names is not None and profile not in profile_names:
continue
for region in self.regions:
if regions is not None and region not in regions:
continue
client = self.clients[profile][region]
policies = client.describe_scaling_policies(
ServiceNamespace=service)
for policy in policies['ScalingPolicies']:
policy['region'] = region
policy['profile_name'] = profile
scaling_policies.append(policy)
return scaling_policies
|
Add `list_scaling_policies` for App Autoscaling service
Note: current AWS orca user has no permissions to list scaling policies.import boto3
from aws_config import AwsConfig
from aws_config import OrcaConfig
class AwsAppAutoscaling(object):
'''
The class provides a simpler abstraction to the AWS boto3
Autoscaling client interface
'''
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def list_scaling_policies(self, service, profile_names=None, regions=None):
scaling_policies = list()
for profile in self.clients.keys():
if profile_names is not None and profile not in profile_names:
continue
for region in self.regions:
if regions is not None and region not in regions:
continue
client = self.clients[profile][region]
policies = client.describe_scaling_policies(
ServiceNamespace=service)
for policy in policies['ScalingPolicies']:
policy['region'] = region
policy['profile_name'] = profile
scaling_policies.append(policy)
return scaling_policies
|
<commit_before><commit_msg>Add `list_scaling_policies` for App Autoscaling service
Note: current AWS orca user has no permissions to list scaling policies.<commit_after>import boto3
from aws_config import AwsConfig
from aws_config import OrcaConfig
class AwsAppAutoscaling(object):
'''
The class provides a simpler abstraction to the AWS boto3
Autoscaling client interface
'''
def __init__(self,
profile_names=None,
access_key_id=None,
secret_access_key=None):
"""
Create a Autoscaling service client to one ore more environments
by name.
"""
service = 'application-autoscaling'
orca_config = OrcaConfig()
self.regions = orca_config.get_regions()
self.clients = {}
if profile_names is not None:
for profile_name in profile_names:
session = boto3.Session(profile_name=profile_name)
self.clients[profile_name] = session.client(service)
elif access_key_id is not None and secret_access_key is not None:
self.clients['default'] = boto3.client(
service,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
else:
awsconfig = AwsConfig()
profiles = awsconfig.get_profiles()
for profile in profiles:
session = boto3.Session(profile_name=profile)
self.clients[profile] = {}
for region in self.regions:
self.clients[profile][region] = \
session.client(service, region_name=region)
def list_scaling_policies(self, service, profile_names=None, regions=None):
scaling_policies = list()
for profile in self.clients.keys():
if profile_names is not None and profile not in profile_names:
continue
for region in self.regions:
if regions is not None and region not in regions:
continue
client = self.clients[profile][region]
policies = client.describe_scaling_policies(
ServiceNamespace=service)
for policy in policies['ScalingPolicies']:
policy['region'] = region
policy['profile_name'] = profile
scaling_policies.append(policy)
return scaling_policies
|
|
eac082bff13b660a15dfcd00d73f1a0e89c292dd
|
polling_stations/apps/data_collection/management/commands/import_neath-pt.py
|
polling_stations/apps/data_collection/management/commands/import_neath-pt.py
|
"""
Import Neath Port Talbot
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import (
BaseAddressCsvImporter,
import_polling_station_shapefiles
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Neath Port Talbot
"""
council_id = 'W06000012'
addresses_name = 'polling_properties.csv'
stations_name = 'polling_stations.shp'
srid = 27700
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
# polling stations provided as shape files, not csv
def import_polling_stations(self):
import_polling_station_shapefiles(self)
def station_record_to_dict(self, record):
# format address
address_parts = []
address_parts.append(record[2])
for i in range(4,8):
if record[i].strip():
address_parts.append(record[i].strip())
address = "\n".join(address_parts)
return {
'internal_council_id': record[1],
'postcode' : record[8],
'address' : address
}
def address_record_to_dict(self, record):
# format address
if record.substreetn:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.substreetn)
address1 = address1.strip()
address2 = record.streetname
else:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.streetname)
address1 = address1.strip()
address2 = ""
address = "\n".join([
address1,
address2,
record.locality,
record.town
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return {
'address' : address,
'postcode' : record.housepostc,
'polling_station_id': record.pollingdis
}
|
Add import script for Neath Port Talbot
|
Add import script for Neath Port Talbot
There are a small number of duplicate rows in the address file
so this intentionally imports a slightly smaller number of
ResidentialAddress records than the number of rows in the csv.
Apart from that, all good.
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations
|
Add import script for Neath Port Talbot
There are a small number of duplicate rows in the address file
so this intentionally imports a slightly smaller number of
ResidentialAddress records than the number of rows in the csv.
Apart from that, all good.
|
"""
Import Neath Port Talbot
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import (
BaseAddressCsvImporter,
import_polling_station_shapefiles
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Neath Port Talbot
"""
council_id = 'W06000012'
addresses_name = 'polling_properties.csv'
stations_name = 'polling_stations.shp'
srid = 27700
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
# polling stations provided as shape files, not csv
def import_polling_stations(self):
import_polling_station_shapefiles(self)
def station_record_to_dict(self, record):
# format address
address_parts = []
address_parts.append(record[2])
for i in range(4,8):
if record[i].strip():
address_parts.append(record[i].strip())
address = "\n".join(address_parts)
return {
'internal_council_id': record[1],
'postcode' : record[8],
'address' : address
}
def address_record_to_dict(self, record):
# format address
if record.substreetn:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.substreetn)
address1 = address1.strip()
address2 = record.streetname
else:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.streetname)
address1 = address1.strip()
address2 = ""
address = "\n".join([
address1,
address2,
record.locality,
record.town
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return {
'address' : address,
'postcode' : record.housepostc,
'polling_station_id': record.pollingdis
}
|
<commit_before><commit_msg>Add import script for Neath Port Talbot
There are a small number of duplicate rows in the address file
so this intentionally imports a slightly smaller number of
ResidentialAddress records than the number of rows in the csv.
Apart from that, all good.<commit_after>
|
"""
Import Neath Port Talbot
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import (
BaseAddressCsvImporter,
import_polling_station_shapefiles
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Neath Port Talbot
"""
council_id = 'W06000012'
addresses_name = 'polling_properties.csv'
stations_name = 'polling_stations.shp'
srid = 27700
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
# polling stations provided as shape files, not csv
def import_polling_stations(self):
import_polling_station_shapefiles(self)
def station_record_to_dict(self, record):
# format address
address_parts = []
address_parts.append(record[2])
for i in range(4,8):
if record[i].strip():
address_parts.append(record[i].strip())
address = "\n".join(address_parts)
return {
'internal_council_id': record[1],
'postcode' : record[8],
'address' : address
}
def address_record_to_dict(self, record):
# format address
if record.substreetn:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.substreetn)
address1 = address1.strip()
address2 = record.streetname
else:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.streetname)
address1 = address1.strip()
address2 = ""
address = "\n".join([
address1,
address2,
record.locality,
record.town
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return {
'address' : address,
'postcode' : record.housepostc,
'polling_station_id': record.pollingdis
}
|
Add import script for Neath Port Talbot
There are a small number of duplicate rows in the address file
so this intentionally imports a slightly smaller number of
ResidentialAddress records than the number of rows in the csv.
Apart from that, all good."""
Import Neath Port Talbot
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import (
BaseAddressCsvImporter,
import_polling_station_shapefiles
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Neath Port Talbot
"""
council_id = 'W06000012'
addresses_name = 'polling_properties.csv'
stations_name = 'polling_stations.shp'
srid = 27700
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
# polling stations provided as shape files, not csv
def import_polling_stations(self):
import_polling_station_shapefiles(self)
def station_record_to_dict(self, record):
# format address
address_parts = []
address_parts.append(record[2])
for i in range(4,8):
if record[i].strip():
address_parts.append(record[i].strip())
address = "\n".join(address_parts)
return {
'internal_council_id': record[1],
'postcode' : record[8],
'address' : address
}
def address_record_to_dict(self, record):
# format address
if record.substreetn:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.substreetn)
address1 = address1.strip()
address2 = record.streetname
else:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.streetname)
address1 = address1.strip()
address2 = ""
address = "\n".join([
address1,
address2,
record.locality,
record.town
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return {
'address' : address,
'postcode' : record.housepostc,
'polling_station_id': record.pollingdis
}
|
<commit_before><commit_msg>Add import script for Neath Port Talbot
There are a small number of duplicate rows in the address file
so this intentionally imports a slightly smaller number of
ResidentialAddress records than the number of rows in the csv.
Apart from that, all good.<commit_after>"""
Import Neath Port Talbot
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import (
BaseAddressCsvImporter,
import_polling_station_shapefiles
)
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Neath Port Talbot
"""
council_id = 'W06000012'
addresses_name = 'polling_properties.csv'
stations_name = 'polling_stations.shp'
srid = 27700
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
# polling stations provided as shape files, not csv
def import_polling_stations(self):
import_polling_station_shapefiles(self)
def station_record_to_dict(self, record):
# format address
address_parts = []
address_parts.append(record[2])
for i in range(4,8):
if record[i].strip():
address_parts.append(record[i].strip())
address = "\n".join(address_parts)
return {
'internal_council_id': record[1],
'postcode' : record[8],
'address' : address
}
def address_record_to_dict(self, record):
# format address
if record.substreetn:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.substreetn)
address1 = address1.strip()
address2 = record.streetname
else:
address1 = "%s %s %s" % (record.housename, record.housenumbe, record.streetname)
address1 = address1.strip()
address2 = ""
address = "\n".join([
address1,
address2,
record.locality,
record.town
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return {
'address' : address,
'postcode' : record.housepostc,
'polling_station_id': record.pollingdis
}
|
|
eac05b2e29b667fe80ba925b723d8133970725ac
|
molo/profiles/migrations/0002_userprofile_auth_service_uuid.py
|
molo/profiles/migrations/0002_userprofile_auth_service_uuid.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-17 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_squashed_0021_remove_uuid_null'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth_service_uuid',
field=models.UUIDField(null=True, unique=True),
),
]
|
Add migration for UserProfile.auth_service_uuid field
|
Add migration for UserProfile.auth_service_uuid field
|
Python
|
bsd-2-clause
|
praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo
|
Add migration for UserProfile.auth_service_uuid field
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-17 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_squashed_0021_remove_uuid_null'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth_service_uuid',
field=models.UUIDField(null=True, unique=True),
),
]
|
<commit_before><commit_msg>Add migration for UserProfile.auth_service_uuid field<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-17 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_squashed_0021_remove_uuid_null'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth_service_uuid',
field=models.UUIDField(null=True, unique=True),
),
]
|
Add migration for UserProfile.auth_service_uuid field# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-17 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_squashed_0021_remove_uuid_null'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth_service_uuid',
field=models.UUIDField(null=True, unique=True),
),
]
|
<commit_before><commit_msg>Add migration for UserProfile.auth_service_uuid field<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-17 16:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_squashed_0021_remove_uuid_null'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='auth_service_uuid',
field=models.UUIDField(null=True, unique=True),
),
]
|
|
46eb5dc81ff0c26b2c9ff785a8b9aadea07b6aaa
|
py/next-greater-element-i.py
|
py/next-greater-element-i.py
|
from collections import defaultdict
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
next_greater = defaultdict(lambda: -1)
stack = []
for n in nums:
while stack and stack[-1] < n:
next_greater[stack[-1]] = n
stack.pop()
stack.append(n)
return map(next_greater.__getitem__, findNums)
|
Add py solution for 496. Next Greater Element I
|
Add py solution for 496. Next Greater Element I
496. Next Greater Element I: https://leetcode.com/problems/next-greater-element-i/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 496. Next Greater Element I
496. Next Greater Element I: https://leetcode.com/problems/next-greater-element-i/
|
from collections import defaultdict
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
next_greater = defaultdict(lambda: -1)
stack = []
for n in nums:
while stack and stack[-1] < n:
next_greater[stack[-1]] = n
stack.pop()
stack.append(n)
return map(next_greater.__getitem__, findNums)
|
<commit_before><commit_msg>Add py solution for 496. Next Greater Element I
496. Next Greater Element I: https://leetcode.com/problems/next-greater-element-i/<commit_after>
|
from collections import defaultdict
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
next_greater = defaultdict(lambda: -1)
stack = []
for n in nums:
while stack and stack[-1] < n:
next_greater[stack[-1]] = n
stack.pop()
stack.append(n)
return map(next_greater.__getitem__, findNums)
|
Add py solution for 496. Next Greater Element I
496. Next Greater Element I: https://leetcode.com/problems/next-greater-element-i/from collections import defaultdict
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
next_greater = defaultdict(lambda: -1)
stack = []
for n in nums:
while stack and stack[-1] < n:
next_greater[stack[-1]] = n
stack.pop()
stack.append(n)
return map(next_greater.__getitem__, findNums)
|
<commit_before><commit_msg>Add py solution for 496. Next Greater Element I
496. Next Greater Element I: https://leetcode.com/problems/next-greater-element-i/<commit_after>from collections import defaultdict
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
next_greater = defaultdict(lambda: -1)
stack = []
for n in nums:
while stack and stack[-1] < n:
next_greater[stack[-1]] = n
stack.pop()
stack.append(n)
return map(next_greater.__getitem__, findNums)
|
|
c02d3e1b17549f1047fc374dafd9b9613c1e35fd
|
src/util/deleteLongLines.py
|
src/util/deleteLongLines.py
|
import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted.\n".format(sys.argv[0], deletedLines))
return 0
if __name__ == "__main__":
main()
|
Add a Python script to delete long lines. We will use it for the Netflix data.
|
Add a Python script to delete long lines. We will use it for the Netflix data.
|
Python
|
apache-2.0
|
jdebrabant/parallel_arules,jdebrabant/parallel_arules,jdebrabant/parallel_arules,jdebrabant/parallel_arules
|
Add a Python script to delete long lines. We will use it for the Netflix data.
|
import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted.\n".format(sys.argv[0], deletedLines))
return 0
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a Python script to delete long lines. We will use it for the Netflix data.<commit_after>
|
import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted.\n".format(sys.argv[0], deletedLines))
return 0
if __name__ == "__main__":
main()
|
Add a Python script to delete long lines. We will use it for the Netflix data.import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted.\n".format(sys.argv[0], deletedLines))
return 0
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a Python script to delete long lines. We will use it for the Netflix data.<commit_after>import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted.\n".format(sys.argv[0], deletedLines))
return 0
if __name__ == "__main__":
main()
|
|
3275fe6bc958e2001ecbbb064d785199f9165814
|
dataactcore/migrations/versions/d998c46bacd9_merge_job_err_with_add_fsrs.py
|
dataactcore/migrations/versions/d998c46bacd9_merge_job_err_with_add_fsrs.py
|
"""merge job_err with add_fsrs
Revision ID: d998c46bacd9
Revises: 361fbffcf08b, caa6895e7450
Create Date: 2016-08-26 19:09:39.554574
"""
# revision identifiers, used by Alembic.
revision = 'd998c46bacd9'
down_revision = ('361fbffcf08b', 'caa6895e7450')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
Add merge migration to resolve alembic conflict
|
Add merge migration to resolve alembic conflict
|
Python
|
cc0-1.0
|
fedspendingtransparency/data-act-broker-backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend,fedspendingtransparency/data-act-broker-backend
|
Add merge migration to resolve alembic conflict
|
"""merge job_err with add_fsrs
Revision ID: d998c46bacd9
Revises: 361fbffcf08b, caa6895e7450
Create Date: 2016-08-26 19:09:39.554574
"""
# revision identifiers, used by Alembic.
revision = 'd998c46bacd9'
down_revision = ('361fbffcf08b', 'caa6895e7450')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
<commit_before><commit_msg>Add merge migration to resolve alembic conflict<commit_after>
|
"""merge job_err with add_fsrs
Revision ID: d998c46bacd9
Revises: 361fbffcf08b, caa6895e7450
Create Date: 2016-08-26 19:09:39.554574
"""
# revision identifiers, used by Alembic.
revision = 'd998c46bacd9'
down_revision = ('361fbffcf08b', 'caa6895e7450')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
Add merge migration to resolve alembic conflict"""merge job_err with add_fsrs
Revision ID: d998c46bacd9
Revises: 361fbffcf08b, caa6895e7450
Create Date: 2016-08-26 19:09:39.554574
"""
# revision identifiers, used by Alembic.
revision = 'd998c46bacd9'
down_revision = ('361fbffcf08b', 'caa6895e7450')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
<commit_before><commit_msg>Add merge migration to resolve alembic conflict<commit_after>"""merge job_err with add_fsrs
Revision ID: d998c46bacd9
Revises: 361fbffcf08b, caa6895e7450
Create Date: 2016-08-26 19:09:39.554574
"""
# revision identifiers, used by Alembic.
revision = 'd998c46bacd9'
down_revision = ('361fbffcf08b', 'caa6895e7450')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
|
aea5a46a90fa01d429305e9abecb124fb2b22ae0
|
src/iterations/exercise4.py
|
src/iterations/exercise4.py
|
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
#
def print_letters_with_for( word ):
for w in word:
print w
print '\n'
def print_letters_with_while( word ):
length = len( word )
i = 0
while i < length:
print word[i]
i += 1
print '\n'
def main( ):
word = raw_input('>>')
while True:
if word != 'done!':
print_letters_with_for( word )
print_letters_with_while( word )
word = raw_input('>>')
else:
break
quit(0)
main( )
|
Print every single letter of a word with 'for' iteration and with 'while' iteration
|
Print every single letter of a word with 'for' iteration and with 'while' iteration
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
|
Python
|
mit
|
let42/python-course
|
Print every single letter of a word with 'for' iteration and with 'while' iteration
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
|
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
#
def print_letters_with_for( word ):
for w in word:
print w
print '\n'
def print_letters_with_while( word ):
length = len( word )
i = 0
while i < length:
print word[i]
i += 1
print '\n'
def main( ):
word = raw_input('>>')
while True:
if word != 'done!':
print_letters_with_for( word )
print_letters_with_while( word )
word = raw_input('>>')
else:
break
quit(0)
main( )
|
<commit_before><commit_msg>Print every single letter of a word with 'for' iteration and with 'while' iteration
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'<commit_after>
|
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
#
def print_letters_with_for( word ):
for w in word:
print w
print '\n'
def print_letters_with_while( word ):
length = len( word )
i = 0
while i < length:
print word[i]
i += 1
print '\n'
def main( ):
word = raw_input('>>')
while True:
if word != 'done!':
print_letters_with_for( word )
print_letters_with_while( word )
word = raw_input('>>')
else:
break
quit(0)
main( )
|
Print every single letter of a word with 'for' iteration and with 'while' iteration
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
#
def print_letters_with_for( word ):
for w in word:
print w
print '\n'
def print_letters_with_while( word ):
length = len( word )
i = 0
while i < length:
print word[i]
i += 1
print '\n'
def main( ):
word = raw_input('>>')
while True:
if word != 'done!':
print_letters_with_for( word )
print_letters_with_while( word )
word = raw_input('>>')
else:
break
quit(0)
main( )
|
<commit_before><commit_msg>Print every single letter of a word with 'for' iteration and with 'while' iteration
# Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'<commit_after># Print every single letter of a word with 'for' iteration and with 'while' iteration
# Also create a method for all single iteration required. Finally with main method
# require a word to be printed, until isn't typed 'done!'
#
def print_letters_with_for( word ):
for w in word:
print w
print '\n'
def print_letters_with_while( word ):
length = len( word )
i = 0
while i < length:
print word[i]
i += 1
print '\n'
def main( ):
word = raw_input('>>')
while True:
if word != 'done!':
print_letters_with_for( word )
print_letters_with_while( word )
word = raw_input('>>')
else:
break
quit(0)
main( )
|
|
110de31dd7e2d3727cc4b9c8bd606f3367e5eb13
|
tests/subprocessdata/sigchild_ignore.py
|
tests/subprocessdata/sigchild_ignore.py
|
import os
import signal, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
log = open('/var/tmp/subprocess', 'w')
log.write(os.path.join(os.path.dirname(__file__), '..', '..'))
log.close()
from kitchen.pycompat27.subprocess import _subprocess as subprocess
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
Add the new subprocess test script
|
Add the new subprocess test script
|
Python
|
lgpl-2.1
|
fedora-infra/kitchen,fedora-infra/kitchen
|
Add the new subprocess test script
|
import os
import signal, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
log = open('/var/tmp/subprocess', 'w')
log.write(os.path.join(os.path.dirname(__file__), '..', '..'))
log.close()
from kitchen.pycompat27.subprocess import _subprocess as subprocess
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
<commit_before><commit_msg>Add the new subprocess test script<commit_after>
|
import os
import signal, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
log = open('/var/tmp/subprocess', 'w')
log.write(os.path.join(os.path.dirname(__file__), '..', '..'))
log.close()
from kitchen.pycompat27.subprocess import _subprocess as subprocess
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
Add the new subprocess test scriptimport os
import signal, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
log = open('/var/tmp/subprocess', 'w')
log.write(os.path.join(os.path.dirname(__file__), '..', '..'))
log.close()
from kitchen.pycompat27.subprocess import _subprocess as subprocess
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
<commit_before><commit_msg>Add the new subprocess test script<commit_after>import os
import signal, sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
log = open('/var/tmp/subprocess', 'w')
log.write(os.path.join(os.path.dirname(__file__), '..', '..'))
log.close()
from kitchen.pycompat27.subprocess import _subprocess as subprocess
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
|
|
3f758781c9c42b5c3bd14f70f70caa555d497d88
|
rsplayer_logs.py
|
rsplayer_logs.py
|
"""Helper to process RSPlayer logs for Programme Return.
Put RSPlayer logs into some folder, change to it, open a Python prompt and paste
in this code. Be sure that the logs contain only the data you want (ie trim the
start and end to get rid of data outside of the reporting period).
XXX To do:
"""
import csv
import glob
import re
import sets
headings = ["Date", "Time", "Artist", "Title"]
pat = re.compile('(?P<D>[0-9]{2})/(?P<M>[0-9]{2})/(?P<Y>[0-9]{4}) (?P<h>[0-9]{2}):(?P<m>[0-9]{2}):(?P<s>[0-9]{2}) SONG "(?P<a>[^-"]+) - (?P<t>[^"]+)"')
def make_row(m):
if m is None:
return None
d = m.groupdict()
return [
"%(Y)s-%(M)s-%(D)s" % d,
"%(h)s:%(m)s:%(s)s" % d,
d['a'],
d['t'],
]
def fix(r):
if re.match('[0-9]+$', r[2]):
if ' _ ' in r[3]:
a, t = r[3].split(' _ ', 1)
else:
no_title.add((r[2], r[3]))
a, t = r[3], '?'
r[2] = a
r[3] = t
return r
no_title = sets.Set()
all_data = []
for fname in glob.glob('log*.txt'):
f = open(fname, 'r')
for line in f:
line = line.strip()
if not line:
continue
match = pat.match(line)
if not match:
continue
all_data.append(fix(make_row(match)))
f.close()
print 'Processed', fname
all_data.sort()
d1 = all_data[0][0].replace('-', '')
d2 = all_data[-1][0].replace('-', '')
g = open('log-%s-%s.csv' % (d1, d2), 'wb')
w = csv.writer(g, dialect='excel')
w.writerow(headings)
w.writerows(all_data)
g.close()
no_title = list(no_title)
no_title.sort()
if no_title:
print 'Tracks with no title (%d)' % (len(no_title),)
for tpl in no_title:
print '%s - %s' % tpl
else:
print 'No tracks found with no title.'
|
Convert RSPlayer 2 logs into CSV for programme return.
|
Convert RSPlayer 2 logs into CSV for programme return.
|
Python
|
mit
|
radio-st-austell-bay/helpers
|
Convert RSPlayer 2 logs into CSV for programme return.
|
"""Helper to process RSPlayer logs for Programme Return.
Put RSPlayer logs into some folder, change to it, open a Python prompt and paste
in this code. Be sure that the logs contain only the data you want (ie trim the
start and end to get rid of data outside of the reporting period).
XXX To do:
"""
import csv
import glob
import re
import sets
headings = ["Date", "Time", "Artist", "Title"]
pat = re.compile('(?P<D>[0-9]{2})/(?P<M>[0-9]{2})/(?P<Y>[0-9]{4}) (?P<h>[0-9]{2}):(?P<m>[0-9]{2}):(?P<s>[0-9]{2}) SONG "(?P<a>[^-"]+) - (?P<t>[^"]+)"')
def make_row(m):
if m is None:
return None
d = m.groupdict()
return [
"%(Y)s-%(M)s-%(D)s" % d,
"%(h)s:%(m)s:%(s)s" % d,
d['a'],
d['t'],
]
def fix(r):
if re.match('[0-9]+$', r[2]):
if ' _ ' in r[3]:
a, t = r[3].split(' _ ', 1)
else:
no_title.add((r[2], r[3]))
a, t = r[3], '?'
r[2] = a
r[3] = t
return r
no_title = sets.Set()
all_data = []
for fname in glob.glob('log*.txt'):
f = open(fname, 'r')
for line in f:
line = line.strip()
if not line:
continue
match = pat.match(line)
if not match:
continue
all_data.append(fix(make_row(match)))
f.close()
print 'Processed', fname
all_data.sort()
d1 = all_data[0][0].replace('-', '')
d2 = all_data[-1][0].replace('-', '')
g = open('log-%s-%s.csv' % (d1, d2), 'wb')
w = csv.writer(g, dialect='excel')
w.writerow(headings)
w.writerows(all_data)
g.close()
no_title = list(no_title)
no_title.sort()
if no_title:
print 'Tracks with no title (%d)' % (len(no_title),)
for tpl in no_title:
print '%s - %s' % tpl
else:
print 'No tracks found with no title.'
|
<commit_before><commit_msg>Convert RSPlayer 2 logs into CSV for programme return.<commit_after>
|
"""Helper to process RSPlayer logs for Programme Return.
Put RSPlayer logs into some folder, change to it, open a Python prompt and paste
in this code. Be sure that the logs contain only the data you want (ie trim the
start and end to get rid of data outside of the reporting period).
XXX To do:
"""
import csv
import glob
import re
import sets
headings = ["Date", "Time", "Artist", "Title"]
pat = re.compile('(?P<D>[0-9]{2})/(?P<M>[0-9]{2})/(?P<Y>[0-9]{4}) (?P<h>[0-9]{2}):(?P<m>[0-9]{2}):(?P<s>[0-9]{2}) SONG "(?P<a>[^-"]+) - (?P<t>[^"]+)"')
def make_row(m):
if m is None:
return None
d = m.groupdict()
return [
"%(Y)s-%(M)s-%(D)s" % d,
"%(h)s:%(m)s:%(s)s" % d,
d['a'],
d['t'],
]
def fix(r):
if re.match('[0-9]+$', r[2]):
if ' _ ' in r[3]:
a, t = r[3].split(' _ ', 1)
else:
no_title.add((r[2], r[3]))
a, t = r[3], '?'
r[2] = a
r[3] = t
return r
no_title = sets.Set()
all_data = []
for fname in glob.glob('log*.txt'):
f = open(fname, 'r')
for line in f:
line = line.strip()
if not line:
continue
match = pat.match(line)
if not match:
continue
all_data.append(fix(make_row(match)))
f.close()
print 'Processed', fname
all_data.sort()
d1 = all_data[0][0].replace('-', '')
d2 = all_data[-1][0].replace('-', '')
g = open('log-%s-%s.csv' % (d1, d2), 'wb')
w = csv.writer(g, dialect='excel')
w.writerow(headings)
w.writerows(all_data)
g.close()
no_title = list(no_title)
no_title.sort()
if no_title:
print 'Tracks with no title (%d)' % (len(no_title),)
for tpl in no_title:
print '%s - %s' % tpl
else:
print 'No tracks found with no title.'
|
Convert RSPlayer 2 logs into CSV for programme return."""Helper to process RSPlayer logs for Programme Return.
Put RSPlayer logs into some folder, change to it, open a Python prompt and paste
in this code. Be sure that the logs contain only the data you want (ie trim the
start and end to get rid of data outside of the reporting period).
XXX To do:
"""
import csv
import glob
import re
import sets
headings = ["Date", "Time", "Artist", "Title"]
pat = re.compile('(?P<D>[0-9]{2})/(?P<M>[0-9]{2})/(?P<Y>[0-9]{4}) (?P<h>[0-9]{2}):(?P<m>[0-9]{2}):(?P<s>[0-9]{2}) SONG "(?P<a>[^-"]+) - (?P<t>[^"]+)"')
def make_row(m):
if m is None:
return None
d = m.groupdict()
return [
"%(Y)s-%(M)s-%(D)s" % d,
"%(h)s:%(m)s:%(s)s" % d,
d['a'],
d['t'],
]
def fix(r):
if re.match('[0-9]+$', r[2]):
if ' _ ' in r[3]:
a, t = r[3].split(' _ ', 1)
else:
no_title.add((r[2], r[3]))
a, t = r[3], '?'
r[2] = a
r[3] = t
return r
no_title = sets.Set()
all_data = []
for fname in glob.glob('log*.txt'):
f = open(fname, 'r')
for line in f:
line = line.strip()
if not line:
continue
match = pat.match(line)
if not match:
continue
all_data.append(fix(make_row(match)))
f.close()
print 'Processed', fname
all_data.sort()
d1 = all_data[0][0].replace('-', '')
d2 = all_data[-1][0].replace('-', '')
g = open('log-%s-%s.csv' % (d1, d2), 'wb')
w = csv.writer(g, dialect='excel')
w.writerow(headings)
w.writerows(all_data)
g.close()
no_title = list(no_title)
no_title.sort()
if no_title:
print 'Tracks with no title (%d)' % (len(no_title),)
for tpl in no_title:
print '%s - %s' % tpl
else:
print 'No tracks found with no title.'
|
<commit_before><commit_msg>Convert RSPlayer 2 logs into CSV for programme return.<commit_after>"""Helper to process RSPlayer logs for Programme Return.
Put RSPlayer logs into some folder, change to it, open a Python prompt and paste
in this code. Be sure that the logs contain only the data you want (ie trim the
start and end to get rid of data outside of the reporting period).
XXX To do:
"""
import csv
import glob
import re
import sets
headings = ["Date", "Time", "Artist", "Title"]
pat = re.compile('(?P<D>[0-9]{2})/(?P<M>[0-9]{2})/(?P<Y>[0-9]{4}) (?P<h>[0-9]{2}):(?P<m>[0-9]{2}):(?P<s>[0-9]{2}) SONG "(?P<a>[^-"]+) - (?P<t>[^"]+)"')
def make_row(m):
if m is None:
return None
d = m.groupdict()
return [
"%(Y)s-%(M)s-%(D)s" % d,
"%(h)s:%(m)s:%(s)s" % d,
d['a'],
d['t'],
]
def fix(r):
if re.match('[0-9]+$', r[2]):
if ' _ ' in r[3]:
a, t = r[3].split(' _ ', 1)
else:
no_title.add((r[2], r[3]))
a, t = r[3], '?'
r[2] = a
r[3] = t
return r
no_title = sets.Set()
all_data = []
for fname in glob.glob('log*.txt'):
f = open(fname, 'r')
for line in f:
line = line.strip()
if not line:
continue
match = pat.match(line)
if not match:
continue
all_data.append(fix(make_row(match)))
f.close()
print 'Processed', fname
all_data.sort()
d1 = all_data[0][0].replace('-', '')
d2 = all_data[-1][0].replace('-', '')
g = open('log-%s-%s.csv' % (d1, d2), 'wb')
w = csv.writer(g, dialect='excel')
w.writerow(headings)
w.writerows(all_data)
g.close()
no_title = list(no_title)
no_title.sort()
if no_title:
print 'Tracks with no title (%d)' % (len(no_title),)
for tpl in no_title:
print '%s - %s' % tpl
else:
print 'No tracks found with no title.'
|
|
7cd6515e7a06997bbdb24908accfd503b95824be
|
src/pythonic/test_primes.py
|
src/pythonic/test_primes.py
|
import pytest
import itertools
from main import Primes, Sieve
def test_sieve_limit():
limit = 10000
with Sieve(limit) as s:
assert s.upper_bound() >= limit
def test_upper_bound_exception():
limit = 10
with Sieve(limit) as s:
with pytest.raises(IndexError):
s.is_prime(101)
def test_zero_is_not_in_prime_list():
with Primes() as p:
n = 20
assert 0 not in list(itertools.islice(p, n))
def test_number_primes_asked_is_given():
with Primes() as p:
n = 20
assert len(list(itertools.islice(p, n))) == n
|
Add basic Python tests for Primes and Sieve
|
Add basic Python tests for Primes and Sieve
* Make sure an exception is thrown if upper_bounds is exceeded
* 0 is not in primes list
* Number of prives asked for is given
* Sieve upper bounds >= limit
|
Python
|
cc0-1.0
|
Michael-F-Bryan/rust-ffi-guide,Michael-F-Bryan/rust-ffi-guide,Michael-F-Bryan/rust-ffi-guide
|
Add basic Python tests for Primes and Sieve
* Make sure an exception is thrown if upper_bounds is exceeded
* 0 is not in primes list
* Number of prives asked for is given
* Sieve upper bounds >= limit
|
import pytest
import itertools
from main import Primes, Sieve
def test_sieve_limit():
limit = 10000
with Sieve(limit) as s:
assert s.upper_bound() >= limit
def test_upper_bound_exception():
limit = 10
with Sieve(limit) as s:
with pytest.raises(IndexError):
s.is_prime(101)
def test_zero_is_not_in_prime_list():
with Primes() as p:
n = 20
assert 0 not in list(itertools.islice(p, n))
def test_number_primes_asked_is_given():
with Primes() as p:
n = 20
assert len(list(itertools.islice(p, n))) == n
|
<commit_before><commit_msg>Add basic Python tests for Primes and Sieve
* Make sure an exception is thrown if upper_bounds is exceeded
* 0 is not in primes list
* Number of prives asked for is given
* Sieve upper bounds >= limit<commit_after>
|
import pytest
import itertools
from main import Primes, Sieve
def test_sieve_limit():
limit = 10000
with Sieve(limit) as s:
assert s.upper_bound() >= limit
def test_upper_bound_exception():
limit = 10
with Sieve(limit) as s:
with pytest.raises(IndexError):
s.is_prime(101)
def test_zero_is_not_in_prime_list():
with Primes() as p:
n = 20
assert 0 not in list(itertools.islice(p, n))
def test_number_primes_asked_is_given():
with Primes() as p:
n = 20
assert len(list(itertools.islice(p, n))) == n
|
Add basic Python tests for Primes and Sieve
* Make sure an exception is thrown if upper_bounds is exceeded
* 0 is not in primes list
* Number of prives asked for is given
* Sieve upper bounds >= limitimport pytest
import itertools
from main import Primes, Sieve
def test_sieve_limit():
limit = 10000
with Sieve(limit) as s:
assert s.upper_bound() >= limit
def test_upper_bound_exception():
limit = 10
with Sieve(limit) as s:
with pytest.raises(IndexError):
s.is_prime(101)
def test_zero_is_not_in_prime_list():
with Primes() as p:
n = 20
assert 0 not in list(itertools.islice(p, n))
def test_number_primes_asked_is_given():
with Primes() as p:
n = 20
assert len(list(itertools.islice(p, n))) == n
|
<commit_before><commit_msg>Add basic Python tests for Primes and Sieve
* Make sure an exception is thrown if upper_bounds is exceeded
* 0 is not in primes list
* Number of prives asked for is given
* Sieve upper bounds >= limit<commit_after>import pytest
import itertools
from main import Primes, Sieve
def test_sieve_limit():
limit = 10000
with Sieve(limit) as s:
assert s.upper_bound() >= limit
def test_upper_bound_exception():
limit = 10
with Sieve(limit) as s:
with pytest.raises(IndexError):
s.is_prime(101)
def test_zero_is_not_in_prime_list():
with Primes() as p:
n = 20
assert 0 not in list(itertools.islice(p, n))
def test_number_primes_asked_is_given():
with Primes() as p:
n = 20
assert len(list(itertools.islice(p, n))) == n
|
|
740e1c1171a3c9f50a0f69a6154acf840f52652f
|
Trie.py
|
Trie.py
|
#! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_lookup.restype = c_char_p
class TrieException(Exception):
pass
class Trie(object):
def __init__(self, filename):
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
raise TrieException("Failed to load library")
def lookup(self, key):
res = libtrie.trie_lookup(self.ptr, c_char_p(key))
if res:
return [s.decode('utf8') for s in str(res).split('\n')]
else:
return []
t = Trie('prijmeni.trie')
for s in t.lookup('Sedlář'):
print s
print '%s' % t.lookup('blah')
|
Add a python binding via ctypes
|
Add a python binding via ctypes
|
Python
|
bsd-3-clause
|
lubomir/libtrie,lubomir/libtrie,lubomir/libtrie
|
Add a python binding via ctypes
|
#! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_lookup.restype = c_char_p
class TrieException(Exception):
pass
class Trie(object):
def __init__(self, filename):
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
raise TrieException("Failed to load library")
def lookup(self, key):
res = libtrie.trie_lookup(self.ptr, c_char_p(key))
if res:
return [s.decode('utf8') for s in str(res).split('\n')]
else:
return []
t = Trie('prijmeni.trie')
for s in t.lookup('Sedlář'):
print s
print '%s' % t.lookup('blah')
|
<commit_before><commit_msg>Add a python binding via ctypes<commit_after>
|
#! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_lookup.restype = c_char_p
class TrieException(Exception):
pass
class Trie(object):
def __init__(self, filename):
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
raise TrieException("Failed to load library")
def lookup(self, key):
res = libtrie.trie_lookup(self.ptr, c_char_p(key))
if res:
return [s.decode('utf8') for s in str(res).split('\n')]
else:
return []
t = Trie('prijmeni.trie')
for s in t.lookup('Sedlář'):
print s
print '%s' % t.lookup('blah')
|
Add a python binding via ctypes#! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_lookup.restype = c_char_p
class TrieException(Exception):
pass
class Trie(object):
def __init__(self, filename):
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
raise TrieException("Failed to load library")
def lookup(self, key):
res = libtrie.trie_lookup(self.ptr, c_char_p(key))
if res:
return [s.decode('utf8') for s in str(res).split('\n')]
else:
return []
t = Trie('prijmeni.trie')
for s in t.lookup('Sedlář'):
print s
print '%s' % t.lookup('blah')
|
<commit_before><commit_msg>Add a python binding via ctypes<commit_after>#! /usr/bin/env python
# vim: set encoding=utf-8
from ctypes import *
libtrie = cdll.LoadLibrary("./libtrie.so")
libtrie.trie_lookup.restype = c_char_p
class TrieException(Exception):
pass
class Trie(object):
def __init__(self, filename):
self.ptr = libtrie.trie_load(filename)
if self.ptr == 0:
raise TrieException("Failed to load library")
def lookup(self, key):
res = libtrie.trie_lookup(self.ptr, c_char_p(key))
if res:
return [s.decode('utf8') for s in str(res).split('\n')]
else:
return []
t = Trie('prijmeni.trie')
for s in t.lookup('Sedlář'):
print s
print '%s' % t.lookup('blah')
|
|
61e542ab3fab4ef15cff8e1d5189652f8e10b5cf
|
scriptOffsets.py
|
scriptOffsets.py
|
from msc import *
from sys import argv
with open(argv[1], 'rb') as f:
mscFile = MscFile()
mscFile.readFromFile(f)
if len(argv) > 2:
nums = [int(i,0) for i in argv[2:]]
for num in nums:
for i,script in enumerate(mscFile):
if script.bounds[0] == num:
print('Offset %i = script_%i' % (num,i))
else:
for i,script in enumerate(mscFile):
print('Offset %X = script_%i' % (script.bounds[0],i))
|
Add dev tool script to print off script names from offsets
|
Add dev tool script to print off script names from offsets
|
Python
|
mit
|
jam1garner/pymsc
|
Add dev tool script to print off script names from offsets
|
from msc import *
from sys import argv
with open(argv[1], 'rb') as f:
mscFile = MscFile()
mscFile.readFromFile(f)
if len(argv) > 2:
nums = [int(i,0) for i in argv[2:]]
for num in nums:
for i,script in enumerate(mscFile):
if script.bounds[0] == num:
print('Offset %i = script_%i' % (num,i))
else:
for i,script in enumerate(mscFile):
print('Offset %X = script_%i' % (script.bounds[0],i))
|
<commit_before><commit_msg>Add dev tool script to print off script names from offsets<commit_after>
|
from msc import *
from sys import argv
with open(argv[1], 'rb') as f:
mscFile = MscFile()
mscFile.readFromFile(f)
if len(argv) > 2:
nums = [int(i,0) for i in argv[2:]]
for num in nums:
for i,script in enumerate(mscFile):
if script.bounds[0] == num:
print('Offset %i = script_%i' % (num,i))
else:
for i,script in enumerate(mscFile):
print('Offset %X = script_%i' % (script.bounds[0],i))
|
Add dev tool script to print off script names from offsetsfrom msc import *
from sys import argv
with open(argv[1], 'rb') as f:
mscFile = MscFile()
mscFile.readFromFile(f)
if len(argv) > 2:
nums = [int(i,0) for i in argv[2:]]
for num in nums:
for i,script in enumerate(mscFile):
if script.bounds[0] == num:
print('Offset %i = script_%i' % (num,i))
else:
for i,script in enumerate(mscFile):
print('Offset %X = script_%i' % (script.bounds[0],i))
|
<commit_before><commit_msg>Add dev tool script to print off script names from offsets<commit_after>from msc import *
from sys import argv
with open(argv[1], 'rb') as f:
mscFile = MscFile()
mscFile.readFromFile(f)
if len(argv) > 2:
nums = [int(i,0) for i in argv[2:]]
for num in nums:
for i,script in enumerate(mscFile):
if script.bounds[0] == num:
print('Offset %i = script_%i' % (num,i))
else:
for i,script in enumerate(mscFile):
print('Offset %X = script_%i' % (script.bounds[0],i))
|
|
ef32dccfe3df84b3619cf200463a6fa7d08e1bae
|
anydo/lib/tests/test_error.py
|
anydo/lib/tests/test_error.py
|
# -*- coding: utf-8 -*-
import unittest
from anydo import error
from anydo.lib import error as lib_error
class ErrorTests(unittest.TestCase):
def test_error_msg(self):
self.assertEqual(error.AnyDoAPIError('dummy', 'test').__str__(),
'(dummy): test')
def test_lib_error_msg(self):
self.assertEqual(lib_error.AnyDoAPIBinderError('test').__str__(),
'test')
|
Add unittest for anydo.error, anydo.lib.error.
|
Add unittest for anydo.error, anydo.lib.error.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net>
|
Python
|
mit
|
gvkalra/python-anydo,gvkalra/python-anydo
|
Add unittest for anydo.error, anydo.lib.error.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net>
|
# -*- coding: utf-8 -*-
import unittest
from anydo import error
from anydo.lib import error as lib_error
class ErrorTests(unittest.TestCase):
def test_error_msg(self):
self.assertEqual(error.AnyDoAPIError('dummy', 'test').__str__(),
'(dummy): test')
def test_lib_error_msg(self):
self.assertEqual(lib_error.AnyDoAPIBinderError('test').__str__(),
'test')
|
<commit_before><commit_msg>Add unittest for anydo.error, anydo.lib.error.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net><commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from anydo import error
from anydo.lib import error as lib_error
class ErrorTests(unittest.TestCase):
def test_error_msg(self):
self.assertEqual(error.AnyDoAPIError('dummy', 'test').__str__(),
'(dummy): test')
def test_lib_error_msg(self):
self.assertEqual(lib_error.AnyDoAPIBinderError('test').__str__(),
'test')
|
Add unittest for anydo.error, anydo.lib.error.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net># -*- coding: utf-8 -*-
import unittest
from anydo import error
from anydo.lib import error as lib_error
class ErrorTests(unittest.TestCase):
def test_error_msg(self):
self.assertEqual(error.AnyDoAPIError('dummy', 'test').__str__(),
'(dummy): test')
def test_lib_error_msg(self):
self.assertEqual(lib_error.AnyDoAPIBinderError('test').__str__(),
'test')
|
<commit_before><commit_msg>Add unittest for anydo.error, anydo.lib.error.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net><commit_after># -*- coding: utf-8 -*-
import unittest
from anydo import error
from anydo.lib import error as lib_error
class ErrorTests(unittest.TestCase):
def test_error_msg(self):
self.assertEqual(error.AnyDoAPIError('dummy', 'test').__str__(),
'(dummy): test')
def test_lib_error_msg(self):
self.assertEqual(lib_error.AnyDoAPIBinderError('test').__str__(),
'test')
|
|
a406e198127d22944340a0c364112684556177f2
|
scripts/feature_selection.py
|
scripts/feature_selection.py
|
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.feature_selection import SelectFromModel
from utils.metrics import ndcg_scorer
path = '../data/processed/'
train_users = pd.read_csv(path + 'ohe_count_processed_train_users.csv')
y_train = train_users['country_destination']
train_users.drop('country_destination', axis=1, inplace=True)
train_users.drop('id', axis=1, inplace=True)
train_users = train_users.fillna(-1)
x_train = train_users.values
label_encoder = LabelEncoder()
encoded_y_train = label_encoder.fit_transform(y_train)
clf = XGBClassifier(n_estimators=1, nthread=-1, seed=42)
kf = KFold(len(x_train), n_folds=5, random_state=42)
score = cross_val_score(clf, x_train, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
class CustomXGB(XGBClassifier):
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
booster = self.booster()
scores = booster.get_fscore()
all_scores = pd.Series(np.zeros(x_train.shape[1]))
scores = pd.Series(scores)
scores.index = scores.index.map(lambda x: x[1:]).astype(int)
final_scores = all_scores + scores
importances = final_scores.fillna(0).values
return importances
custom = CustomXGB(n_estimators=1, seed=42, nthread=-1)
model = SelectFromModel(custom)
X_new = model.fit_transform(x_train, encoded_y_train)
score = cross_val_score(clf, X_new, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
|
Add structure to feature selection script
|
Add structure to feature selection script
|
Python
|
mit
|
davidgasquez/kaggle-airbnb
|
Add structure to feature selection script
|
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.feature_selection import SelectFromModel
from utils.metrics import ndcg_scorer
path = '../data/processed/'
train_users = pd.read_csv(path + 'ohe_count_processed_train_users.csv')
y_train = train_users['country_destination']
train_users.drop('country_destination', axis=1, inplace=True)
train_users.drop('id', axis=1, inplace=True)
train_users = train_users.fillna(-1)
x_train = train_users.values
label_encoder = LabelEncoder()
encoded_y_train = label_encoder.fit_transform(y_train)
clf = XGBClassifier(n_estimators=1, nthread=-1, seed=42)
kf = KFold(len(x_train), n_folds=5, random_state=42)
score = cross_val_score(clf, x_train, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
class CustomXGB(XGBClassifier):
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
booster = self.booster()
scores = booster.get_fscore()
all_scores = pd.Series(np.zeros(x_train.shape[1]))
scores = pd.Series(scores)
scores.index = scores.index.map(lambda x: x[1:]).astype(int)
final_scores = all_scores + scores
importances = final_scores.fillna(0).values
return importances
custom = CustomXGB(n_estimators=1, seed=42, nthread=-1)
model = SelectFromModel(custom)
X_new = model.fit_transform(x_train, encoded_y_train)
score = cross_val_score(clf, X_new, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
|
<commit_before><commit_msg>Add structure to feature selection script<commit_after>
|
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.feature_selection import SelectFromModel
from utils.metrics import ndcg_scorer
path = '../data/processed/'
train_users = pd.read_csv(path + 'ohe_count_processed_train_users.csv')
y_train = train_users['country_destination']
train_users.drop('country_destination', axis=1, inplace=True)
train_users.drop('id', axis=1, inplace=True)
train_users = train_users.fillna(-1)
x_train = train_users.values
label_encoder = LabelEncoder()
encoded_y_train = label_encoder.fit_transform(y_train)
clf = XGBClassifier(n_estimators=1, nthread=-1, seed=42)
kf = KFold(len(x_train), n_folds=5, random_state=42)
score = cross_val_score(clf, x_train, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
class CustomXGB(XGBClassifier):
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
booster = self.booster()
scores = booster.get_fscore()
all_scores = pd.Series(np.zeros(x_train.shape[1]))
scores = pd.Series(scores)
scores.index = scores.index.map(lambda x: x[1:]).astype(int)
final_scores = all_scores + scores
importances = final_scores.fillna(0).values
return importances
custom = CustomXGB(n_estimators=1, seed=42, nthread=-1)
model = SelectFromModel(custom)
X_new = model.fit_transform(x_train, encoded_y_train)
score = cross_val_score(clf, X_new, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
|
Add structure to feature selection scriptimport pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.feature_selection import SelectFromModel
from utils.metrics import ndcg_scorer
path = '../data/processed/'
train_users = pd.read_csv(path + 'ohe_count_processed_train_users.csv')
y_train = train_users['country_destination']
train_users.drop('country_destination', axis=1, inplace=True)
train_users.drop('id', axis=1, inplace=True)
train_users = train_users.fillna(-1)
x_train = train_users.values
label_encoder = LabelEncoder()
encoded_y_train = label_encoder.fit_transform(y_train)
clf = XGBClassifier(n_estimators=1, nthread=-1, seed=42)
kf = KFold(len(x_train), n_folds=5, random_state=42)
score = cross_val_score(clf, x_train, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
class CustomXGB(XGBClassifier):
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
booster = self.booster()
scores = booster.get_fscore()
all_scores = pd.Series(np.zeros(x_train.shape[1]))
scores = pd.Series(scores)
scores.index = scores.index.map(lambda x: x[1:]).astype(int)
final_scores = all_scores + scores
importances = final_scores.fillna(0).values
return importances
custom = CustomXGB(n_estimators=1, seed=42, nthread=-1)
model = SelectFromModel(custom)
X_new = model.fit_transform(x_train, encoded_y_train)
score = cross_val_score(clf, X_new, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
|
<commit_before><commit_msg>Add structure to feature selection script<commit_after>import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.feature_selection import SelectFromModel
from utils.metrics import ndcg_scorer
path = '../data/processed/'
train_users = pd.read_csv(path + 'ohe_count_processed_train_users.csv')
y_train = train_users['country_destination']
train_users.drop('country_destination', axis=1, inplace=True)
train_users.drop('id', axis=1, inplace=True)
train_users = train_users.fillna(-1)
x_train = train_users.values
label_encoder = LabelEncoder()
encoded_y_train = label_encoder.fit_transform(y_train)
clf = XGBClassifier(n_estimators=1, nthread=-1, seed=42)
kf = KFold(len(x_train), n_folds=5, random_state=42)
score = cross_val_score(clf, x_train, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
class CustomXGB(XGBClassifier):
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
booster = self.booster()
scores = booster.get_fscore()
all_scores = pd.Series(np.zeros(x_train.shape[1]))
scores = pd.Series(scores)
scores.index = scores.index.map(lambda x: x[1:]).astype(int)
final_scores = all_scores + scores
importances = final_scores.fillna(0).values
return importances
custom = CustomXGB(n_estimators=1, seed=42, nthread=-1)
model = SelectFromModel(custom)
X_new = model.fit_transform(x_train, encoded_y_train)
score = cross_val_score(clf, X_new, encoded_y_train,
cv=kf, scoring=ndcg_scorer)
print 'Score:', score.mean()
|
|
b5378ee0b1562401e5ee7274faa991ad59047d3a
|
whats_fresh/whats_fresh_api/migrations/0003_auto_20141120_2308.py
|
whats_fresh/whats_fresh_api/migrations/0003_auto_20141120_2308.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0002_auto_20141120_2246'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.TextField(default=b''),
),
]
|
Add default values to name fields
|
Add default values to name fields
refs #17433
|
Python
|
apache-2.0
|
osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api
|
Add default values to name fields
refs #17433
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0002_auto_20141120_2246'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.TextField(default=b''),
),
]
|
<commit_before><commit_msg>Add default values to name fields
refs #17433<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0002_auto_20141120_2246'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.TextField(default=b''),
),
]
|
Add default values to name fields
refs #17433# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0002_auto_20141120_2246'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.TextField(default=b''),
),
]
|
<commit_before><commit_msg>Add default values to name fields
refs #17433<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0002_auto_20141120_2246'),
]
operations = [
migrations.AlterField(
model_name='image',
name='name',
field=models.TextField(default=b''),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.TextField(default=b''),
),
]
|
|
8332a9150e621306e94f3ac994f048451325e3db
|
scripts/scraper_converter.py
|
scripts/scraper_converter.py
|
'''
Usage:
python scraper_converter.py scraped.db formatted.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to a new sqlite database.
Card attributes are saved according to finder.models.Card
'''
import sqlsoup
from finder import (
controllers,
models,
util
)
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, outdb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
dst = None
raise NotImplementedError('dst is None!')
rows = src.MTGCardInfo.all()
for row in rows:
convert_row(row, dst, scale=scale)
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.add(to_card(attrs, scale=scale, split=side))
else:
dst.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite .db database to load from (should use gatherer downloader save format')
parser.add_argument('output', help='filename to save well-formed card sqlite .db database to')
args = parser.parse_args()
convert(args.input, args.output, scale=10)
|
Add shell of "GathererDownloader" converter
|
Add shell of "GathererDownloader" converter
This script takes the raw values from GathererDownloader and adds
rich comparison values such as integer power, toughness, as well as
new fields such as tilde rules (for finding self-referential cards) or
ascii name for the whole series of Aether <Whatever>.
Currently this only sets up reading, and does not properly open a file
for writing.
|
Python
|
mit
|
numberoverzero/finder
|
Add shell of "GathererDownloader" converter
This script takes the raw values from GathererDownloader and adds
rich comparison values such as integer power, toughness, as well as
new fields such as tilde rules (for finding self-referential cards) or
ascii name for the whole series of Aether <Whatever>.
Currently this only sets up reading, and does not properly open a file
for writing.
|
'''
Usage:
python scraper_converter.py scraped.db formatted.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to a new sqlite database.
Card attributes are saved according to finder.models.Card
'''
import sqlsoup
from finder import (
controllers,
models,
util
)
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, outdb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
dst = None
raise NotImplementedError('dst is None!')
rows = src.MTGCardInfo.all()
for row in rows:
convert_row(row, dst, scale=scale)
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.add(to_card(attrs, scale=scale, split=side))
else:
dst.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite .db database to load from (should use gatherer downloader save format')
parser.add_argument('output', help='filename to save well-formed card sqlite .db database to')
args = parser.parse_args()
convert(args.input, args.output, scale=10)
|
<commit_before><commit_msg>Add shell of "GathererDownloader" converter
This script takes the raw values from GathererDownloader and adds
rich comparison values such as integer power, toughness, as well as
new fields such as tilde rules (for finding self-referential cards) or
ascii name for the whole series of Aether <Whatever>.
Currently this only sets up reading, and does not properly open a file
for writing.<commit_after>
|
'''
Usage:
python scraper_converter.py scraped.db formatted.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to a new sqlite database.
Card attributes are saved according to finder.models.Card
'''
import sqlsoup
from finder import (
controllers,
models,
util
)
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, outdb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
dst = None
raise NotImplementedError('dst is None!')
rows = src.MTGCardInfo.all()
for row in rows:
convert_row(row, dst, scale=scale)
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.add(to_card(attrs, scale=scale, split=side))
else:
dst.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite .db database to load from (should use gatherer downloader save format')
parser.add_argument('output', help='filename to save well-formed card sqlite .db database to')
args = parser.parse_args()
convert(args.input, args.output, scale=10)
|
Add shell of "GathererDownloader" converter
This script takes the raw values from GathererDownloader and adds
rich comparison values such as integer power, toughness, as well as
new fields such as tilde rules (for finding self-referential cards) or
ascii name for the whole series of Aether <Whatever>.
Currently this only sets up reading, and does not properly open a file
for writing.'''
Usage:
python scraper_converter.py scraped.db formatted.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to a new sqlite database.
Card attributes are saved according to finder.models.Card
'''
import sqlsoup
from finder import (
controllers,
models,
util
)
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, outdb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
dst = None
raise NotImplementedError('dst is None!')
rows = src.MTGCardInfo.all()
for row in rows:
convert_row(row, dst, scale=scale)
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.add(to_card(attrs, scale=scale, split=side))
else:
dst.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite .db database to load from (should use gatherer downloader save format')
parser.add_argument('output', help='filename to save well-formed card sqlite .db database to')
args = parser.parse_args()
convert(args.input, args.output, scale=10)
|
<commit_before><commit_msg>Add shell of "GathererDownloader" converter
This script takes the raw values from GathererDownloader and adds
rich comparison values such as integer power, toughness, as well as
new fields such as tilde rules (for finding self-referential cards) or
ascii name for the whole series of Aether <Whatever>.
Currently this only sets up reading, and does not properly open a file
for writing.<commit_after>'''
Usage:
python scraper_converter.py scraped.db formatted.db
Processes the cards scraped using the gatherer downloader and adds sane attributes fields for querying
(int pow/toughness, cmc) and saves the output to a new sqlite database.
Card attributes are saved according to finder.models.Card
'''
import sqlsoup
from finder import (
controllers,
models,
util
)
sides = [u'left', u'right']
# raw field name => models.Card attribute name
# note that these are only the fields we care about (printedname, printedrules etc are omitted)
field_conversion = {
'id': 'multiverse_id',
'name': 'name',
'cost': 'cost',
'color': 'color',
'type': 'type',
'set': 'set',
'rarity': 'rarity',
'power': 'power',
'toughness': 'toughness',
'rules': 'oracle_rules',
'flavor': 'flavor_text',
'watermark': 'watermark',
'cardnum': 'number',
'artist': 'artist',
'rulings': 'rulings'
}
def convert(indb, outdb, scale=10):
'''Convert each entry in indb using various parsers and save to outdb'''
src = sqlsoup.SQLSoup('sqlite:///{}'.format(indb))
dst = None
raise NotImplementedError('dst is None!')
rows = src.MTGCardInfo.all()
for row in rows:
convert_row(row, dst, scale=scale)
def convert_row(row, dst, scale=10):
'''Convert a src row into one or more dst cards'''
name = util.sanitize(row.name)
attrs = {dkey: getattr(row, skey) for skey, dkey in field_conversion.iteritems()}
# Split card, process both halves
if u'//' in name:
for side in sides:
dst.add(to_card(attrs, scale=scale, split=side))
else:
dst.add(to_card(attrs, scale=scale))
def to_card(attrs, scale=10, split=''):
'''attrs is a dictionary whose keys are finder.model.Card attributes.'''
card = models.Card(**attrs)
controllers.process_card(card, scale=scale, split=split)
return card
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input', help='sqlite .db database to load from (should use gatherer downloader save format')
parser.add_argument('output', help='filename to save well-formed card sqlite .db database to')
args = parser.parse_args()
convert(args.input, args.output, scale=10)
|
|
df32bf731285be48a7f713657ef1b281229c3226
|
get_module_api.py
|
get_module_api.py
|
#!/usr/bin/python3
import click
from pdc_client import PDCClient
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_modulemd(module_name, stream):
"""
Check if module and stream are built successfully on PDC server
"""
pdc_server = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants"
#Using develop=True to not authenticate to the server
pdc_session = PDCClient(pdc_server, ssl_verify=True, develop=True)
pdc_query = dict(
variant_id = module_name,
variant_version = stream,
fields="modulemd",
ordering="variant_release",
#active=True returns only succesful builds
active = True,
)
try:
mod_info = pdc_session(**pdc_query)
except Exception as ex:
raise IOError("Could not query PDC server for %s (stream: %s) - %s" % (
module_name, stream, ex))
if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]:
raise IOError("%s (stream: %s) is not available on PDC" % (
module_name, stream))
return mod_info["results"][-1]["modulemd"]
@click.command()
@click.option('--module', default='base-runtime',
help='The module to get the API from')
@click.option('--ref', default='f26',
help='The ref of the module to retrieve')
def main(module, ref):
modulemd = yaml.load(get_modulemd(module, ref), Loader=Loader)
for rpm in sorted(modulemd['data']['api']['rpms']):
print(rpm)
if __name__ == "__main__":
main()
|
Add script to retrieve the RPM API of modules
|
Add script to retrieve the RPM API of modules
|
Python
|
mit
|
sgallagher/baseruntime-package-lists,sgallagher/baseruntime-package-lists,fedora-modularity/baseruntime-package-lists,sgallagher/baseruntime-package-lists,fedora-modularity/baseruntime-package-lists,fedora-modularity/baseruntime-package-lists
|
Add script to retrieve the RPM API of modules
|
#!/usr/bin/python3
import click
from pdc_client import PDCClient
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_modulemd(module_name, stream):
"""
Check if module and stream are built successfully on PDC server
"""
pdc_server = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants"
#Using develop=True to not authenticate to the server
pdc_session = PDCClient(pdc_server, ssl_verify=True, develop=True)
pdc_query = dict(
variant_id = module_name,
variant_version = stream,
fields="modulemd",
ordering="variant_release",
#active=True returns only succesful builds
active = True,
)
try:
mod_info = pdc_session(**pdc_query)
except Exception as ex:
raise IOError("Could not query PDC server for %s (stream: %s) - %s" % (
module_name, stream, ex))
if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]:
raise IOError("%s (stream: %s) is not available on PDC" % (
module_name, stream))
return mod_info["results"][-1]["modulemd"]
@click.command()
@click.option('--module', default='base-runtime',
help='The module to get the API from')
@click.option('--ref', default='f26',
help='The ref of the module to retrieve')
def main(module, ref):
modulemd = yaml.load(get_modulemd(module, ref), Loader=Loader)
for rpm in sorted(modulemd['data']['api']['rpms']):
print(rpm)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to retrieve the RPM API of modules<commit_after>
|
#!/usr/bin/python3
import click
from pdc_client import PDCClient
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_modulemd(module_name, stream):
"""
Check if module and stream are built successfully on PDC server
"""
pdc_server = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants"
#Using develop=True to not authenticate to the server
pdc_session = PDCClient(pdc_server, ssl_verify=True, develop=True)
pdc_query = dict(
variant_id = module_name,
variant_version = stream,
fields="modulemd",
ordering="variant_release",
#active=True returns only succesful builds
active = True,
)
try:
mod_info = pdc_session(**pdc_query)
except Exception as ex:
raise IOError("Could not query PDC server for %s (stream: %s) - %s" % (
module_name, stream, ex))
if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]:
raise IOError("%s (stream: %s) is not available on PDC" % (
module_name, stream))
return mod_info["results"][-1]["modulemd"]
@click.command()
@click.option('--module', default='base-runtime',
help='The module to get the API from')
@click.option('--ref', default='f26',
help='The ref of the module to retrieve')
def main(module, ref):
modulemd = yaml.load(get_modulemd(module, ref), Loader=Loader)
for rpm in sorted(modulemd['data']['api']['rpms']):
print(rpm)
if __name__ == "__main__":
main()
|
Add script to retrieve the RPM API of modules#!/usr/bin/python3
import click
from pdc_client import PDCClient
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_modulemd(module_name, stream):
"""
Check if module and stream are built successfully on PDC server
"""
pdc_server = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants"
#Using develop=True to not authenticate to the server
pdc_session = PDCClient(pdc_server, ssl_verify=True, develop=True)
pdc_query = dict(
variant_id = module_name,
variant_version = stream,
fields="modulemd",
ordering="variant_release",
#active=True returns only succesful builds
active = True,
)
try:
mod_info = pdc_session(**pdc_query)
except Exception as ex:
raise IOError("Could not query PDC server for %s (stream: %s) - %s" % (
module_name, stream, ex))
if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]:
raise IOError("%s (stream: %s) is not available on PDC" % (
module_name, stream))
return mod_info["results"][-1]["modulemd"]
@click.command()
@click.option('--module', default='base-runtime',
help='The module to get the API from')
@click.option('--ref', default='f26',
help='The ref of the module to retrieve')
def main(module, ref):
modulemd = yaml.load(get_modulemd(module, ref), Loader=Loader)
for rpm in sorted(modulemd['data']['api']['rpms']):
print(rpm)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to retrieve the RPM API of modules<commit_after>#!/usr/bin/python3
import click
from pdc_client import PDCClient
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def get_modulemd(module_name, stream):
"""
Check if module and stream are built successfully on PDC server
"""
pdc_server = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants"
#Using develop=True to not authenticate to the server
pdc_session = PDCClient(pdc_server, ssl_verify=True, develop=True)
pdc_query = dict(
variant_id = module_name,
variant_version = stream,
fields="modulemd",
ordering="variant_release",
#active=True returns only succesful builds
active = True,
)
try:
mod_info = pdc_session(**pdc_query)
except Exception as ex:
raise IOError("Could not query PDC server for %s (stream: %s) - %s" % (
module_name, stream, ex))
if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]:
raise IOError("%s (stream: %s) is not available on PDC" % (
module_name, stream))
return mod_info["results"][-1]["modulemd"]
@click.command()
@click.option('--module', default='base-runtime',
help='The module to get the API from')
@click.option('--ref', default='f26',
help='The ref of the module to retrieve')
def main(module, ref):
modulemd = yaml.load(get_modulemd(module, ref), Loader=Loader)
for rpm in sorted(modulemd['data']['api']['rpms']):
print(rpm)
if __name__ == "__main__":
main()
|
|
8840ac409cac7c187f2fd7941f8186397beb61fb
|
src/arlobot_apps/arlobot_navigation/scripts/laser_filter.py
|
src/arlobot_apps/arlobot_navigation/scripts/laser_filter.py
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
import math
def callback(data):
#Option 1) Conform data to specified input/output ranges
#data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [data.range_max if range_val>data.range_max else (0.0 if range_val<data.range_min else range_val) for range_val in data.ranges]
#Option 2) Conform input/output ranges to data
data.range_max = max(data.range_max,max(data.ranges))
data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node('laser_filter')
scan_topic = rospy.get_param('~scan_topic', 'scan')
global pub
pub = rospy.Publisher(scan_topic+'_filtered', LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
Add laster filter for adjusting laser scanner data on the fly.
|
Add laster filter for adjusting laser scanner data on the fly.
|
Python
|
mit
|
remarvel/ArloBot,chrisl8/ArloBot,remarvel/ArloBot,DTU-R3/ArloBot,DTU-R3/ArloBot,chrisl8/ArloBot,chrisl8/ArloBot,chrisl8/ArloBot,DTU-R3/ArloBot,chrisl8/ArloBot,remarvel/ArloBot,DTU-R3/ArloBot,remarvel/ArloBot,chrisl8/ArloBot,remarvel/ArloBot,remarvel/ArloBot,DTU-R3/ArloBot,DTU-R3/ArloBot
|
Add laster filter for adjusting laser scanner data on the fly.
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
import math
def callback(data):
#Option 1) Conform data to specified input/output ranges
#data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [data.range_max if range_val>data.range_max else (0.0 if range_val<data.range_min else range_val) for range_val in data.ranges]
#Option 2) Conform input/output ranges to data
data.range_max = max(data.range_max,max(data.ranges))
data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node('laser_filter')
scan_topic = rospy.get_param('~scan_topic', 'scan')
global pub
pub = rospy.Publisher(scan_topic+'_filtered', LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
<commit_before><commit_msg>Add laster filter for adjusting laser scanner data on the fly.<commit_after>
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
import math
def callback(data):
#Option 1) Conform data to specified input/output ranges
#data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [data.range_max if range_val>data.range_max else (0.0 if range_val<data.range_min else range_val) for range_val in data.ranges]
#Option 2) Conform input/output ranges to data
data.range_max = max(data.range_max,max(data.ranges))
data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node('laser_filter')
scan_topic = rospy.get_param('~scan_topic', 'scan')
global pub
pub = rospy.Publisher(scan_topic+'_filtered', LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
Add laster filter for adjusting laser scanner data on the fly.#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
import math
def callback(data):
#Option 1) Conform data to specified input/output ranges
#data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [data.range_max if range_val>data.range_max else (0.0 if range_val<data.range_min else range_val) for range_val in data.ranges]
#Option 2) Conform input/output ranges to data
data.range_max = max(data.range_max,max(data.ranges))
data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node('laser_filter')
scan_topic = rospy.get_param('~scan_topic', 'scan')
global pub
pub = rospy.Publisher(scan_topic+'_filtered', LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
<commit_before><commit_msg>Add laster filter for adjusting laser scanner data on the fly.<commit_after>#!/usr/bin/env python
import rospy
from sensor_msgs.msg import LaserScan
import math
def callback(data):
#Option 1) Conform data to specified input/output ranges
#data.ranges = [data.range_max if range_val>data.range_max else (data.range_min if range_val<data.range_min else range_val) for range_val in data.ranges]
data.ranges = [data.range_max if range_val>data.range_max else (0.0 if range_val<data.range_min else range_val) for range_val in data.ranges]
#Option 2) Conform input/output ranges to data
data.range_max = max(data.range_max,max(data.ranges))
data.range_min = min(data.range_min,min(data.ranges))
pub.publish(data)
# Intializes everything
def start():
rospy.init_node('laser_filter')
scan_topic = rospy.get_param('~scan_topic', 'scan')
global pub
pub = rospy.Publisher(scan_topic+'_filtered', LaserScan, queue_size=10)
rospy.Subscriber(scan_topic, LaserScan, callback)
rospy.spin()
if __name__ == '__main__':
start()
|
|
2adb05bb518bbb18036c8c6ccc353e2381a79d86
|
indra/tests/test_rest_api.py
|
indra/tests/test_rest_api.py
|
import requests
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "BE": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "BE": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://ec2-34-226-201-156.compute-1.amazonaws.com:8080/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
Add smoke test for REST API on Travis
|
Add smoke test for REST API on Travis
|
Python
|
bsd-2-clause
|
johnbachman/belpy,bgyori/indra,sorgerlab/belpy,pvtodorov/indra,johnbachman/indra,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra,bgyori/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,sorgerlab/belpy,sorgerlab/belpy
|
Add smoke test for REST API on Travis
|
import requests
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "BE": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "BE": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://ec2-34-226-201-156.compute-1.amazonaws.com:8080/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
<commit_before><commit_msg>Add smoke test for REST API on Travis<commit_after>
|
import requests
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "BE": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "BE": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://ec2-34-226-201-156.compute-1.amazonaws.com:8080/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
Add smoke test for REST API on Travisimport requests
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "BE": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "BE": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://ec2-34-226-201-156.compute-1.amazonaws.com:8080/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
<commit_before><commit_msg>Add smoke test for REST API on Travis<commit_after>import requests
def test_rest_api_responsive():
stmt_str = '{"statements": [{"sbo": "http://identifiers.org/sbo/SBO:0000526", "type": "Complex", "id": "acc6d47c-f622-41a4-8ae9-d7b0f3d24a2f", "members": [{"db_refs": {"TEXT": "MEK", "BE": "MEK"}, "name": "MEK"}, {"db_refs": {"TEXT": "ERK", "NCIT": "C26360", "BE": "ERK"}, "name": "ERK"}], "evidence": [{"text": "MEK binds ERK", "source_api": "trips"}]}]}'
url = 'http://ec2-34-226-201-156.compute-1.amazonaws.com:8080/' + \
'assemblers/cyjs'
res = requests.post(url, stmt_str)
assert res.status_code == 200
|
|
0c86a376e75d5ed2363bcd986558bb5f0841c8ec
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Polyphony.py
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Polyphony.py
|
import Axon
class Polyphoniser(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
polyphony = 8
def __init__(self, **argd):
super(Polyphoniser, self).__init__(**argd)
self.voices = []
for i in range(self.polyphony):
self.addOutbox("voice%i" % i)
self.voices.append(None)
def main(self, **argd):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
noteNumber = arguments[0]
if None in self.voices:
index = self.voices.index(None)
self.voices[index] = noteNumber
self.send((address, arguments), "voice%i" % index)
else:
# Verbose - we ignore the note if the level of polyphony
# isn't high enough
pass
elif address == "Off":
noteNumber, frequency = arguments
if noteNumber in self.voices:
index = self.voices.index(noteNumber)
self.voices[index] = None
self.send((address, arguments), "voice%i" % index)
if not self.anyReady():
self.pause()
yield 1
|
Add polyphoniser component for routeing note on and off messages around a number of voices
|
Add polyphoniser component for routeing note on and off messages around a number of voices
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Add polyphoniser component for routeing note on and off messages around a number of voices
|
import Axon
class Polyphoniser(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
polyphony = 8
def __init__(self, **argd):
super(Polyphoniser, self).__init__(**argd)
self.voices = []
for i in range(self.polyphony):
self.addOutbox("voice%i" % i)
self.voices.append(None)
def main(self, **argd):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
noteNumber = arguments[0]
if None in self.voices:
index = self.voices.index(None)
self.voices[index] = noteNumber
self.send((address, arguments), "voice%i" % index)
else:
# Verbose - we ignore the note if the level of polyphony
# isn't high enough
pass
elif address == "Off":
noteNumber, frequency = arguments
if noteNumber in self.voices:
index = self.voices.index(noteNumber)
self.voices[index] = None
self.send((address, arguments), "voice%i" % index)
if not self.anyReady():
self.pause()
yield 1
|
<commit_before><commit_msg>Add polyphoniser component for routeing note on and off messages around a number of voices<commit_after>
|
import Axon
class Polyphoniser(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
polyphony = 8
def __init__(self, **argd):
super(Polyphoniser, self).__init__(**argd)
self.voices = []
for i in range(self.polyphony):
self.addOutbox("voice%i" % i)
self.voices.append(None)
def main(self, **argd):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
noteNumber = arguments[0]
if None in self.voices:
index = self.voices.index(None)
self.voices[index] = noteNumber
self.send((address, arguments), "voice%i" % index)
else:
# Verbose - we ignore the note if the level of polyphony
# isn't high enough
pass
elif address == "Off":
noteNumber, frequency = arguments
if noteNumber in self.voices:
index = self.voices.index(noteNumber)
self.voices[index] = None
self.send((address, arguments), "voice%i" % index)
if not self.anyReady():
self.pause()
yield 1
|
Add polyphoniser component for routeing note on and off messages around a number of voicesimport Axon
class Polyphoniser(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
polyphony = 8
def __init__(self, **argd):
super(Polyphoniser, self).__init__(**argd)
self.voices = []
for i in range(self.polyphony):
self.addOutbox("voice%i" % i)
self.voices.append(None)
def main(self, **argd):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
noteNumber = arguments[0]
if None in self.voices:
index = self.voices.index(None)
self.voices[index] = noteNumber
self.send((address, arguments), "voice%i" % index)
else:
# Verbose - we ignore the note if the level of polyphony
# isn't high enough
pass
elif address == "Off":
noteNumber, frequency = arguments
if noteNumber in self.voices:
index = self.voices.index(noteNumber)
self.voices[index] = None
self.send((address, arguments), "voice%i" % index)
if not self.anyReady():
self.pause()
yield 1
|
<commit_before><commit_msg>Add polyphoniser component for routeing note on and off messages around a number of voices<commit_after>import Axon
class Polyphoniser(Axon.AdaptiveCommsComponent.AdaptiveCommsComponent):
polyphony = 8
def __init__(self, **argd):
super(Polyphoniser, self).__init__(**argd)
self.voices = []
for i in range(self.polyphony):
self.addOutbox("voice%i" % i)
self.voices.append(None)
def main(self, **argd):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
noteNumber = arguments[0]
if None in self.voices:
index = self.voices.index(None)
self.voices[index] = noteNumber
self.send((address, arguments), "voice%i" % index)
else:
# Verbose - we ignore the note if the level of polyphony
# isn't high enough
pass
elif address == "Off":
noteNumber, frequency = arguments
if noteNumber in self.voices:
index = self.voices.index(noteNumber)
self.voices[index] = None
self.send((address, arguments), "voice%i" % index)
if not self.anyReady():
self.pause()
yield 1
|
|
8bd1efce568b603159a5a083cc9f9ce3a550d2b8
|
eva/util/kutil.py
|
eva/util/kutil.py
|
import keras.backend as K
def get_input(rows, cols, channels):
return (channels, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, channels)
|
Add keras util with get input func
|
Add keras util with get input func
|
Python
|
apache-2.0
|
israelg99/eva
|
Add keras util with get input func
|
import keras.backend as K
def get_input(rows, cols, channels):
return (channels, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, channels)
|
<commit_before><commit_msg>Add keras util with get input func<commit_after>
|
import keras.backend as K
def get_input(rows, cols, channels):
return (channels, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, channels)
|
Add keras util with get input funcimport keras.backend as K
def get_input(rows, cols, channels):
return (channels, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, channels)
|
<commit_before><commit_msg>Add keras util with get input func<commit_after>import keras.backend as K
def get_input(rows, cols, channels):
return (channels, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, channels)
|
|
f85d45a781eef0ab6d7362dad45da94be4bbf8df
|
zerver/management/commands/turn_off_digests.py
|
zerver/management/commands/turn_off_digests.py
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_enable_digest_emails
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Turn off digests for a domain or specified set of email addresses."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='Turn off digests for all users in this domain.'),
make_option('-u', '--users',
dest='users',
type='str',
help='Turn off digests for this comma-separated list of email addresses.'),
)
def handle(self, **options):
if options["domain"] is None and options["users"] is None:
self.print_help("python manage.py", "turn_off_digests")
exit(1)
if options["domain"]:
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
print "Turned off digest emails for:"
for user_profile in user_profiles:
already_disabled_prefix = ""
if user_profile.enable_digest_emails:
do_change_enable_digest_emails(user_profile, False)
else:
already_disabled_prefix = "(already off) "
print "%s%s <%s>" % (already_disabled_prefix, user_profile.full_name,
user_profile.email)
|
Add a management command to bulk turn off digests.
|
Add a management command to bulk turn off digests.
(imported from commit 0ffb565ecc9be219807ae9a45abb7b0e3e940204)
|
Python
|
apache-2.0
|
qq1012803704/zulip,eastlhu/zulip,wweiradio/zulip,Gabriel0402/zulip,LeeRisk/zulip,JPJPJPOPOP/zulip,Jianchun1/zulip,Qgap/zulip,JanzTam/zulip,punchagan/zulip,Gabriel0402/zulip,gigawhitlocks/zulip,zorojean/zulip,suxinde2009/zulip,zacps/zulip,fw1121/zulip,JPJPJPOPOP/zulip,jeffcao/zulip,jimmy54/zulip,PhilSk/zulip,jainayush975/zulip,shubhamdhama/zulip,natanovia/zulip,rht/zulip,luyifan/zulip,MariaFaBella85/zulip,showell/zulip,ashwinirudrappa/zulip,jphilipsen05/zulip,JanzTam/zulip,xuxiao/zulip,JPJPJPOPOP/zulip,RobotCaleb/zulip,bowlofstew/zulip,RobotCaleb/zulip,babbage/zulip,ikasumiwt/zulip,tommyip/zulip,Galexrt/zulip,akuseru/zulip,xuanhan863/zulip,thomasboyt/zulip,bssrdf/zulip,tbutter/zulip,rht/zulip,technicalpickles/zulip,deer-hope/zulip,pradiptad/zulip,wdaher/zulip,niftynei/zulip,bluesea/zulip,calvinleenyc/zulip,hackerkid/zulip,christi3k/zulip,reyha/zulip,AZtheAsian/zulip,armooo/zulip,bluesea/zulip,xuanhan863/zulip,mdavid/zulip,punchagan/zulip,SmartPeople/zulip,gigawhitlocks/zulip,Cheppers/zulip,kaiyuanheshang/zulip,vaidap/zulip,udxxabp/zulip,adnanh/zulip,vikas-parashar/zulip,swinghu/zulip,esander91/zulip,blaze225/zulip,Vallher/zulip,wavelets/zulip,isht3/zulip,peiwei/zulip,adnanh/zulip,proliming/zulip,karamcnair/zulip,RobotCaleb/zulip,hustlzp/zulip,alliejones/zulip,zhaoweigg/zulip,joyhchen/zulip,mohsenSy/zulip,ApsOps/zulip,peguin40/zulip,gkotian/zulip,littledogboy/zulip,dwrpayne/zulip,wdaher/zulip,vakila/zulip,johnnygaddarr/zulip,vikas-parashar/zulip,mohsenSy/zulip,sonali0901/zulip,ikasumiwt/zulip,hayderimran7/zulip,glovebx/zulip,developerfm/zulip,reyha/zulip,joshisa/zulip,xuxiao/zulip,guiquanz/zulip,Drooids/zulip,dnmfarrell/zulip,avastu/zulip,arpitpanwar/zulip,ipernet/zulip,ryansnowboarder/zulip,bluesea/zulip,zhaoweigg/zulip,synicalsyntax/zulip,zulip/zulip,wavelets/zulip,esander91/zulip,dattatreya303/zulip,zwily/zulip,peguin40/zulip,pradiptad/zulip,johnny9/zulip,EasonYi/zulip,qq1012803704/zulip,aps-sids/zulip,technicalpickles/zulip,seapasulli/zulip,yocome/zulip,KJin99/zulip,KingxBanana/zulip,dxq-git/zulip,ahmadassaf/zulip,ApsOps/zulip,Drooids/zulip,calvinleenyc/zulip,udxxabp/zulip,verma-varsha/zulip,ufosky-server/zulip,mansilladev/zulip,amyliu345/zulip,amyliu345/zulip,xuxiao/zulip,AZtheAsian/zulip,timabbott/zulip,Gabriel0402/zulip,huangkebo/zulip,Juanvulcano/zulip,Gabriel0402/zulip,MariaFaBella85/zulip,PaulPetring/zulip,moria/zulip,zulip/zulip,he15his/zulip,synicalsyntax/zulip,hj3938/zulip,zulip/zulip,itnihao/zulip,tommyip/zulip,rht/zulip,reyha/zulip,eeshangarg/zulip,pradiptad/zulip,ApsOps/zulip,jerryge/zulip,cosmicAsymmetry/zulip,Galexrt/zulip,zorojean/zulip,dwrpayne/zulip,amyliu345/zulip,christi3k/zulip,ApsOps/zulip,huangkebo/zulip,hackerkid/zulip,MayB/zulip,yuvipanda/zulip,Batterfii/zulip,LAndreas/zulip,dhcrzf/zulip,bssrdf/zulip,deer-hope/zulip,gigawhitlocks/zulip,sup95/zulip,tiansiyuan/zulip,Gabriel0402/zulip,joyhchen/zulip,RobotCaleb/zulip,rishig/zulip,kaiyuanheshang/zulip,xuanhan863/zulip,esander91/zulip,calvinleenyc/zulip,JanzTam/zulip,aps-sids/zulip,paxapy/zulip,aliceriot/zulip,sharmaeklavya2/zulip,isht3/zulip,dhcrzf/zulip,SmartPeople/zulip,vaidap/zulip,jainayush975/zulip,eastlhu/zulip,Jianchun1/zulip,Diptanshu8/zulip,sonali0901/zulip,udxxabp/zulip,rishig/zulip,amallia/zulip,dnmfarrell/zulip,samatdav/zulip,MayB/zulip,tiansiyuan/zulip,ahmadassaf/zulip,wdaher/zulip,zwily/zulip,themass/zulip,punchagan/zulip,cosmicAsymmetry/zulip,fw1121/zulip,PhilSk/zulip,eeshangarg/zulip,praveenaki/zulip,zorojean/zulip,bluesea/zulip,bitemyapp/zulip,schatt/zulip,nicholasbs/zulip,suxinde2009/zulip,codeKonami/zulip,willingc/zulip,zacps/zulip,vabs22/zulip,umkay/zulip,dnmfarrell/zulip,amanharitsh123/zulip,he15his/zulip,rht/zulip,johnnygaddarr/zulip,nicholasbs/zulip,EasonYi/zulip,rishig/zulip,krtkmj/zulip,eeshangarg/zulip,jackrzhang/zulip,rht/zulip,bitemyapp/zulip,wangdeshui/zulip,zorojean/zulip,dwrpayne/zulip,kaiyuanheshang/zulip,LeeRisk/zulip,littledogboy/zulip,Suninus/zulip,arpith/zulip,sup95/zulip,johnny9/zulip,LeeRisk/zulip,hengqujushi/zulip,j831/zulip,jainayush975/zulip,zofuthan/zulip,willingc/zulip,lfranchi/zulip,bastianh/zulip,bluesea/zulip,christi3k/zulip,aakash-cr7/zulip,dwrpayne/zulip,zulip/zulip,ryansnowboarder/zulip,showell/zulip,yuvipanda/zulip,hayderimran7/zulip,qq1012803704/zulip,jackrzhang/zulip,mdavid/zulip,rishig/zulip,technicalpickles/zulip,cosmicAsymmetry/zulip,kaiyuanheshang/zulip,shaunstanislaus/zulip,sonali0901/zulip,seapasulli/zulip,ApsOps/zulip,babbage/zulip,saitodisse/zulip,udxxabp/zulip,armooo/zulip,littledogboy/zulip,vakila/zulip,johnny9/zulip,jerryge/zulip,PhilSk/zulip,Juanvulcano/zulip,he15his/zulip,jphilipsen05/zulip,jessedhillon/zulip,ryansnowboarder/zulip,zorojean/zulip,susansls/zulip,jerryge/zulip,EasonYi/zulip,levixie/zulip,bowlofstew/zulip,bitemyapp/zulip,brainwane/zulip,joyhchen/zulip,shubhamdhama/zulip,natanovia/zulip,johnnygaddarr/zulip,m1ssou/zulip,DazWorrall/zulip,glovebx/zulip,huangkebo/zulip,kou/zulip,avastu/zulip,firstblade/zulip,atomic-labs/zulip,huangkebo/zulip,mohsenSy/zulip,umkay/zulip,Vallher/zulip,brockwhittaker/zulip,krtkmj/zulip,m1ssou/zulip,arpitpanwar/zulip,mohsenSy/zulip,tiansiyuan/zulip,hj3938/zulip,shubhamdhama/zulip,paxapy/zulip,TigorC/zulip,udxxabp/zulip,Cheppers/zulip,aakash-cr7/zulip,Batterfii/zulip,PaulPetring/zulip,umkay/zulip,MariaFaBella85/zulip,dwrpayne/zulip,so0k/zulip,xuanhan863/zulip,johnny9/zulip,timabbott/zulip,zulip/zulip,xuanhan863/zulip,hengqujushi/zulip,hengqujushi/zulip,reyha/zulip,peguin40/zulip,wangdeshui/zulip,Suninus/zulip,wangdeshui/zulip,gkotian/zulip,tommyip/zulip,so0k/zulip,proliming/zulip,ufosky-server/zulip,alliejones/zulip,suxinde2009/zulip,guiquanz/zulip,willingc/zulip,bssrdf/zulip,zhaoweigg/zulip,ahmadassaf/zulip,technicalpickles/zulip,sharmaeklavya2/zulip,PhilSk/zulip,dnmfarrell/zulip,seapasulli/zulip,thomasboyt/zulip,stamhe/zulip,ahmadassaf/zulip,eastlhu/zulip,thomasboyt/zulip,littledogboy/zulip,sup95/zulip,esander91/zulip,zofuthan/zulip,shubhamdhama/zulip,tiansiyuan/zulip,ryansnowboarder/zulip,littledogboy/zulip,dnmfarrell/zulip,shrikrishnaholla/zulip,niftynei/zulip,zacps/zulip,MayB/zulip,SmartPeople/zulip,saitodisse/zulip,luyifan/zulip,schatt/zulip,tbutter/zulip,seapasulli/zulip,bluesea/zulip,luyifan/zulip,saitodisse/zulip,fw1121/zulip,seapasulli/zulip,akuseru/zulip,hj3938/zulip,stamhe/zulip,Qgap/zulip,KJin99/zulip,joyhchen/zulip,codeKonami/zulip,jeffcao/zulip,vakila/zulip,reyha/zulip,sharmaeklavya2/zulip,hustlzp/zulip,ApsOps/zulip,joshisa/zulip,Gabriel0402/zulip,easyfmxu/zulip,krtkmj/zulip,Frouk/zulip,alliejones/zulip,Diptanshu8/zulip,cosmicAsymmetry/zulip,tiansiyuan/zulip,arpith/zulip,kaiyuanheshang/zulip,armooo/zulip,codeKonami/zulip,niftynei/zulip,dnmfarrell/zulip,PaulPetring/zulip,brockwhittaker/zulip,voidException/zulip,itnihao/zulip,ufosky-server/zulip,Suninus/zulip,Cheppers/zulip,ryansnowboarder/zulip,so0k/zulip,Suninus/zulip,sharmaeklavya2/zulip,yuvipanda/zulip,gigawhitlocks/zulip,gigawhitlocks/zulip,zwily/zulip,tommyip/zulip,j831/zulip,kokoar/zulip,zhaoweigg/zulip,dattatreya303/zulip,Frouk/zulip,brainwane/zulip,ericzhou2008/zulip,gkotian/zulip,Galexrt/zulip,mahim97/zulip,KJin99/zulip,jonesgithub/zulip,PhilSk/zulip,reyha/zulip,dawran6/zulip,mansilladev/zulip,tbutter/zulip,zofuthan/zulip,andersk/zulip,itnihao/zulip,themass/zulip,sonali0901/zulip,synicalsyntax/zulip,alliejones/zulip,hustlzp/zulip,brainwane/zulip,moria/zulip,EasonYi/zulip,karamcnair/zulip,vaidap/zulip,udxxabp/zulip,Frouk/zulip,littledogboy/zulip,souravbadami/zulip,wavelets/zulip,ufosky-server/zulip,guiquanz/zulip,johnnygaddarr/zulip,huangkebo/zulip,amallia/zulip,dattatreya303/zulip,dhcrzf/zulip,codeKonami/zulip,Qgap/zulip,hayderimran7/zulip,jainayush975/zulip,avastu/zulip,saitodisse/zulip,itnihao/zulip,aps-sids/zulip,jphilipsen05/zulip,mdavid/zulip,karamcnair/zulip,yocome/zulip,JanzTam/zulip,souravbadami/zulip,sup95/zulip,susansls/zulip,j831/zulip,jeffcao/zulip,Galexrt/zulip,gkotian/zulip,so0k/zulip,samatdav/zulip,tiansiyuan/zulip,jeffcao/zulip,blaze225/zulip,bastianh/zulip,johnnygaddarr/zulip,itnihao/zulip,ahmadassaf/zulip,swinghu/zulip,moria/zulip,alliejones/zulip,vakila/zulip,KingxBanana/zulip,themass/zulip,jerryge/zulip,mdavid/zulip,easyfmxu/zulip,showell/zulip,jerryge/zulip,adnanh/zulip,jessedhillon/zulip,shubhamdhama/zulip,jeffcao/zulip,bssrdf/zulip,zacps/zulip,joshisa/zulip,jessedhillon/zulip,LeeRisk/zulip,zachallaun/zulip,LAndreas/zulip,samatdav/zulip,peguin40/zulip,LAndreas/zulip,blaze225/zulip,nicholasbs/zulip,KJin99/zulip,praveenaki/zulip,zorojean/zulip,vakila/zulip,glovebx/zulip,umkay/zulip,bastianh/zulip,LAndreas/zulip,dawran6/zulip,firstblade/zulip,ashwinirudrappa/zulip,firstblade/zulip,dxq-git/zulip,suxinde2009/zulip,Vallher/zulip,dxq-git/zulip,bitemyapp/zulip,EasonYi/zulip,dnmfarrell/zulip,DazWorrall/zulip,shaunstanislaus/zulip,qq1012803704/zulip,JPJPJPOPOP/zulip,synicalsyntax/zulip,themass/zulip,zhaoweigg/zulip,eastlhu/zulip,susansls/zulip,arpith/zulip,bastianh/zulip,dwrpayne/zulip,shrikrishnaholla/zulip,KingxBanana/zulip,lfranchi/zulip,aps-sids/zulip,babbage/zulip,he15his/zulip,krtkmj/zulip,mohsenSy/zulip,esander91/zulip,joshisa/zulip,LeeRisk/zulip,Qgap/zulip,tdr130/zulip,brainwane/zulip,levixie/zulip,christi3k/zulip,moria/zulip,ericzhou2008/zulip,aps-sids/zulip,niftynei/zulip,hustlzp/zulip,andersk/zulip,Diptanshu8/zulip,ikasumiwt/zulip,Drooids/zulip,xuanhan863/zulip,luyifan/zulip,adnanh/zulip,wavelets/zulip,johnny9/zulip,mahim97/zulip,schatt/zulip,TigorC/zulip,yuvipanda/zulip,schatt/zulip,zhaoweigg/zulip,ahmadassaf/zulip,timabbott/zulip,schatt/zulip,arpitpanwar/zulip,he15his/zulip,natanovia/zulip,niftynei/zulip,wavelets/zulip,dattatreya303/zulip,vabs22/zulip,Vallher/zulip,paxapy/zulip,tdr130/zulip,RobotCaleb/zulip,luyifan/zulip,arpith/zulip,jimmy54/zulip,hafeez3000/zulip,Diptanshu8/zulip,vabs22/zulip,vaidap/zulip,joyhchen/zulip,Juanvulcano/zulip,brainwane/zulip,JPJPJPOPOP/zulip,Vallher/zulip,zofuthan/zulip,Suninus/zulip,zwily/zulip,hj3938/zulip,thomasboyt/zulip,adnanh/zulip,babbage/zulip,proliming/zulip,blaze225/zulip,qq1012803704/zulip,jessedhillon/zulip,fw1121/zulip,AZtheAsian/zulip,aliceriot/zulip,fw1121/zulip,nicholasbs/zulip,ufosky-server/zulip,DazWorrall/zulip,PaulPetring/zulip,amanharitsh123/zulip,kou/zulip,synicalsyntax/zulip,yocome/zulip,sharmaeklavya2/zulip,peiwei/zulip,stamhe/zulip,pradiptad/zulip,natanovia/zulip,akuseru/zulip,guiquanz/zulip,hackerkid/zulip,joshisa/zulip,kokoar/zulip,kou/zulip,jessedhillon/zulip,wavelets/zulip,ufosky-server/zulip,zulip/zulip,ikasumiwt/zulip,so0k/zulip,krtkmj/zulip,pradiptad/zulip,seapasulli/zulip,avastu/zulip,grave-w-grave/zulip,brockwhittaker/zulip,arpitpanwar/zulip,christi3k/zulip,wavelets/zulip,jrowan/zulip,esander91/zulip,jphilipsen05/zulip,hafeez3000/zulip,swinghu/zulip,SmartPeople/zulip,atomic-labs/zulip,KJin99/zulip,developerfm/zulip,firstblade/zulip,wdaher/zulip,KingxBanana/zulip,hackerkid/zulip,wweiradio/zulip,jainayush975/zulip,suxinde2009/zulip,voidException/zulip,MariaFaBella85/zulip,vaidap/zulip,timabbott/zulip,aakash-cr7/zulip,hayderimran7/zulip,karamcnair/zulip,isht3/zulip,RobotCaleb/zulip,dxq-git/zulip,dhcrzf/zulip,armooo/zulip,xuxiao/zulip,hayderimran7/zulip,praveenaki/zulip,noroot/zulip,Diptanshu8/zulip,Batterfii/zulip,andersk/zulip,Frouk/zulip,lfranchi/zulip,JPJPJPOPOP/zulip,itnihao/zulip,so0k/zulip,firstblade/zulip,willingc/zulip,Drooids/zulip,arpitpanwar/zulip,yocome/zulip,mansilladev/zulip,peguin40/zulip,cosmicAsymmetry/zulip,saitodisse/zulip,ipernet/zulip,MayB/zulip,he15his/zulip,MayB/zulip,vakila/zulip,LeeRisk/zulip,he15his/zulip,zorojean/zulip,samatdav/zulip,mohsenSy/zulip,dotcool/zulip,atomic-labs/zulip,ipernet/zulip,jeffcao/zulip,rishig/zulip,showell/zulip,pradiptad/zulip,shaunstanislaus/zulip,calvinleenyc/zulip,ipernet/zulip,calvinleenyc/zulip,hayderimran7/zulip,bitemyapp/zulip,peguin40/zulip,Cheppers/zulip,avastu/zulip,themass/zulip,RobotCaleb/zulip,jimmy54/zulip,levixie/zulip,shaunstanislaus/zulip,wangdeshui/zulip,hustlzp/zulip,cosmicAsymmetry/zulip,akuseru/zulip,m1ssou/zulip,ufosky-server/zulip,mahim97/zulip,jackrzhang/zulip,ericzhou2008/zulip,easyfmxu/zulip,susansls/zulip,paxapy/zulip,LAndreas/zulip,aliceriot/zulip,punchagan/zulip,themass/zulip,voidException/zulip,wweiradio/zulip,hafeez3000/zulip,tdr130/zulip,bastianh/zulip,sonali0901/zulip,PaulPetring/zulip,voidException/zulip,jrowan/zulip,deer-hope/zulip,paxapy/zulip,xuxiao/zulip,synicalsyntax/zulip,nicholasbs/zulip,avastu/zulip,hayderimran7/zulip,vaidap/zulip,moria/zulip,amanharitsh123/zulip,ericzhou2008/zulip,deer-hope/zulip,proliming/zulip,amyliu345/zulip,EasonYi/zulip,hengqujushi/zulip,ryansnowboarder/zulip,bssrdf/zulip,hengqujushi/zulip,themass/zulip,amanharitsh123/zulip,rishig/zulip,dxq-git/zulip,udxxabp/zulip,hafeez3000/zulip,eeshangarg/zulip,aakash-cr7/zulip,bastianh/zulip,gigawhitlocks/zulip,codeKonami/zulip,jerryge/zulip,souravbadami/zulip,JanzTam/zulip,vabs22/zulip,hj3938/zulip,arpitpanwar/zulip,Juanvulcano/zulip,armooo/zulip,shrikrishnaholla/zulip,joyhchen/zulip,bluesea/zulip,amyliu345/zulip,proliming/zulip,tdr130/zulip,vabs22/zulip,lfranchi/zulip,zacps/zulip,aps-sids/zulip,arpith/zulip,paxapy/zulip,timabbott/zulip,jonesgithub/zulip,kou/zulip,DazWorrall/zulip,andersk/zulip,SmartPeople/zulip,LAndreas/zulip,isht3/zulip,swinghu/zulip,qq1012803704/zulip,luyifan/zulip,natanovia/zulip,armooo/zulip,wangdeshui/zulip,ashwinirudrappa/zulip,tbutter/zulip,m1ssou/zulip,willingc/zulip,noroot/zulip,bowlofstew/zulip,ipernet/zulip,dotcool/zulip,levixie/zulip,tommyip/zulip,willingc/zulip,noroot/zulip,shrikrishnaholla/zulip,levixie/zulip,TigorC/zulip,MariaFaBella85/zulip,johnnygaddarr/zulip,KingxBanana/zulip,ericzhou2008/zulip,hustlzp/zulip,jerryge/zulip,aakash-cr7/zulip,Drooids/zulip,jonesgithub/zulip,calvinleenyc/zulip,yuvipanda/zulip,souravbadami/zulip,tbutter/zulip,johnnygaddarr/zulip,bssrdf/zulip,jonesgithub/zulip,vikas-parashar/zulip,Frouk/zulip,brainwane/zulip,amallia/zulip,kou/zulip,ashwinirudrappa/zulip,proliming/zulip,atomic-labs/zulip,praveenaki/zulip,stamhe/zulip,bitemyapp/zulip,gkotian/zulip,Vallher/zulip,jimmy54/zulip,akuseru/zulip,yocome/zulip,avastu/zulip,jphilipsen05/zulip,armooo/zulip,so0k/zulip,rht/zulip,mahim97/zulip,karamcnair/zulip,proliming/zulip,brockwhittaker/zulip,eeshangarg/zulip,dhcrzf/zulip,firstblade/zulip,voidException/zulip,JanzTam/zulip,swinghu/zulip,bowlofstew/zulip,Jianchun1/zulip,grave-w-grave/zulip,Gabriel0402/zulip,aps-sids/zulip,j831/zulip,zachallaun/zulip,xuxiao/zulip,lfranchi/zulip,mansilladev/zulip,hafeez3000/zulip,natanovia/zulip,Drooids/zulip,xuxiao/zulip,eastlhu/zulip,huangkebo/zulip,eastlhu/zulip,jimmy54/zulip,bowlofstew/zulip,amallia/zulip,alliejones/zulip,susansls/zulip,shrikrishnaholla/zulip,ryanbackman/zulip,blaze225/zulip,zwily/zulip,Suninus/zulip,umkay/zulip,Juanvulcano/zulip,brockwhittaker/zulip,easyfmxu/zulip,grave-w-grave/zulip,jrowan/zulip,amallia/zulip,blaze225/zulip,thomasboyt/zulip,jrowan/zulip,atomic-labs/zulip,krtkmj/zulip,guiquanz/zulip,babbage/zulip,dattatreya303/zulip,jonesgithub/zulip,bowlofstew/zulip,jrowan/zulip,voidException/zulip,levixie/zulip,ikasumiwt/zulip,ashwinirudrappa/zulip,zachallaun/zulip,rishig/zulip,LAndreas/zulip,jessedhillon/zulip,showell/zulip,AZtheAsian/zulip,Qgap/zulip,zwily/zulip,xuanhan863/zulip,dotcool/zulip,arpitpanwar/zulip,yocome/zulip,m1ssou/zulip,joshisa/zulip,developerfm/zulip,niftynei/zulip,shrikrishnaholla/zulip,j831/zulip,m1ssou/zulip,akuseru/zulip,karamcnair/zulip,christi3k/zulip,ryansnowboarder/zulip,zachallaun/zulip,peiwei/zulip,Batterfii/zulip,peiwei/zulip,kaiyuanheshang/zulip,arpith/zulip,developerfm/zulip,showell/zulip,bssrdf/zulip,PhilSk/zulip,amallia/zulip,stamhe/zulip,kokoar/zulip,andersk/zulip,mdavid/zulip,LeeRisk/zulip,luyifan/zulip,developerfm/zulip,amanharitsh123/zulip,easyfmxu/zulip,dhcrzf/zulip,wangdeshui/zulip,eeshangarg/zulip,Batterfii/zulip,tommyip/zulip,tbutter/zulip,ipernet/zulip,johnny9/zulip,verma-varsha/zulip,hustlzp/zulip,praveenaki/zulip,jimmy54/zulip,ryanbackman/zulip,yuvipanda/zulip,glovebx/zulip,pradiptad/zulip,tbutter/zulip,hengqujushi/zulip,aliceriot/zulip,shaunstanislaus/zulip,willingc/zulip,shubhamdhama/zulip,Cheppers/zulip,thomasboyt/zulip,kokoar/zulip,brainwane/zulip,noroot/zulip,developerfm/zulip,verma-varsha/zulip,AZtheAsian/zulip,krtkmj/zulip,lfranchi/zulip,Batterfii/zulip,alliejones/zulip,Batterfii/zulip,amanharitsh123/zulip,tdr130/zulip,verma-varsha/zulip,souravbadami/zulip,adnanh/zulip,dxq-git/zulip,dotcool/zulip,susansls/zulip,dawran6/zulip,hj3938/zulip,jainayush975/zulip,vakila/zulip,wweiradio/zulip,aliceriot/zulip,rht/zulip,jackrzhang/zulip,EasonYi/zulip,verma-varsha/zulip,showell/zulip,DazWorrall/zulip,brockwhittaker/zulip,praveenaki/zulip,adnanh/zulip,PaulPetring/zulip,huangkebo/zulip,grave-w-grave/zulip,wdaher/zulip,developerfm/zulip,Suninus/zulip,zhaoweigg/zulip,ashwinirudrappa/zulip,jackrzhang/zulip,atomic-labs/zulip,stamhe/zulip,wweiradio/zulip,grave-w-grave/zulip,dattatreya303/zulip,vikas-parashar/zulip,swinghu/zulip,deer-hope/zulip,ikasumiwt/zulip,schatt/zulip,timabbott/zulip,Jianchun1/zulip,hafeez3000/zulip,gkotian/zulip,KJin99/zulip,ryanbackman/zulip,ipernet/zulip,yuvipanda/zulip,tommyip/zulip,souravbadami/zulip,guiquanz/zulip,samatdav/zulip,aliceriot/zulip,Frouk/zulip,sharmaeklavya2/zulip,zwily/zulip,jeffcao/zulip,DazWorrall/zulip,TigorC/zulip,glovebx/zulip,eastlhu/zulip,hackerkid/zulip,seapasulli/zulip,peiwei/zulip,zofuthan/zulip,umkay/zulip,zachallaun/zulip,ericzhou2008/zulip,Qgap/zulip,saitodisse/zulip,dotcool/zulip,tiansiyuan/zulip,zachallaun/zulip,timabbott/zulip,moria/zulip,jonesgithub/zulip,ApsOps/zulip,thomasboyt/zulip,fw1121/zulip,Qgap/zulip,AZtheAsian/zulip,mahim97/zulip,peiwei/zulip,vabs22/zulip,Drooids/zulip,bowlofstew/zulip,mansilladev/zulip,atomic-labs/zulip,KJin99/zulip,umkay/zulip,hj3938/zulip,bastianh/zulip,littledogboy/zulip,kou/zulip,MariaFaBella85/zulip,codeKonami/zulip,kokoar/zulip,noroot/zulip,kou/zulip,mansilladev/zulip,karamcnair/zulip,amyliu345/zulip,hafeez3000/zulip,Jianchun1/zulip,zachallaun/zulip,deer-hope/zulip,jphilipsen05/zulip,technicalpickles/zulip,dawran6/zulip,babbage/zulip,shrikrishnaholla/zulip,hackerkid/zulip,fw1121/zulip,verma-varsha/zulip,joshisa/zulip,eeshangarg/zulip,ryanbackman/zulip,kaiyuanheshang/zulip,jonesgithub/zulip,shaunstanislaus/zulip,dotcool/zulip,kokoar/zulip,glovebx/zulip,noroot/zulip,ericzhou2008/zulip,MayB/zulip,ryanbackman/zulip,andersk/zulip,ikasumiwt/zulip,sup95/zulip,mdavid/zulip,hengqujushi/zulip,andersk/zulip,Jianchun1/zulip,tdr130/zulip,natanovia/zulip,peiwei/zulip,jessedhillon/zulip,swinghu/zulip,levixie/zulip,gigawhitlocks/zulip,aakash-cr7/zulip,deer-hope/zulip,wdaher/zulip,mdavid/zulip,Cheppers/zulip,zulip/zulip,zofuthan/zulip,punchagan/zulip,wweiradio/zulip,wangdeshui/zulip,voidException/zulip,Galexrt/zulip,punchagan/zulip,dawran6/zulip,babbage/zulip,mahim97/zulip,tdr130/zulip,hackerkid/zulip,nicholasbs/zulip,suxinde2009/zulip,JanzTam/zulip,Cheppers/zulip,qq1012803704/zulip,dotcool/zulip,praveenaki/zulip,technicalpickles/zulip,Diptanshu8/zulip,Frouk/zulip,esander91/zulip,MayB/zulip,easyfmxu/zulip,guiquanz/zulip,codeKonami/zulip,lfranchi/zulip,easyfmxu/zulip,vikas-parashar/zulip,PaulPetring/zulip,amallia/zulip,grave-w-grave/zulip,sup95/zulip,saitodisse/zulip,noroot/zulip,Juanvulcano/zulip,ryanbackman/zulip,stamhe/zulip,ahmadassaf/zulip,vikas-parashar/zulip,MariaFaBella85/zulip,kokoar/zulip,wdaher/zulip,sonali0901/zulip,akuseru/zulip,technicalpickles/zulip,wweiradio/zulip,isht3/zulip,gkotian/zulip,jackrzhang/zulip,dwrpayne/zulip,yocome/zulip,johnny9/zulip,dhcrzf/zulip,aliceriot/zulip,dawran6/zulip,moria/zulip,isht3/zulip,samatdav/zulip,firstblade/zulip,itnihao/zulip,punchagan/zulip,jrowan/zulip,shubhamdhama/zulip,jimmy54/zulip,TigorC/zulip,schatt/zulip,Vallher/zulip,DazWorrall/zulip,j831/zulip,mansilladev/zulip,zofuthan/zulip,suxinde2009/zulip,dxq-git/zulip,KingxBanana/zulip,ashwinirudrappa/zulip,nicholasbs/zulip,bitemyapp/zulip,TigorC/zulip,jackrzhang/zulip,glovebx/zulip,m1ssou/zulip,SmartPeople/zulip,Galexrt/zulip,Galexrt/zulip,shaunstanislaus/zulip,synicalsyntax/zulip,zacps/zulip
|
Add a management command to bulk turn off digests.
(imported from commit 0ffb565ecc9be219807ae9a45abb7b0e3e940204)
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_enable_digest_emails
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Turn off digests for a domain or specified set of email addresses."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='Turn off digests for all users in this domain.'),
make_option('-u', '--users',
dest='users',
type='str',
help='Turn off digests for this comma-separated list of email addresses.'),
)
def handle(self, **options):
if options["domain"] is None and options["users"] is None:
self.print_help("python manage.py", "turn_off_digests")
exit(1)
if options["domain"]:
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
print "Turned off digest emails for:"
for user_profile in user_profiles:
already_disabled_prefix = ""
if user_profile.enable_digest_emails:
do_change_enable_digest_emails(user_profile, False)
else:
already_disabled_prefix = "(already off) "
print "%s%s <%s>" % (already_disabled_prefix, user_profile.full_name,
user_profile.email)
|
<commit_before><commit_msg>Add a management command to bulk turn off digests.
(imported from commit 0ffb565ecc9be219807ae9a45abb7b0e3e940204)<commit_after>
|
from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_enable_digest_emails
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Turn off digests for a domain or specified set of email addresses."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='Turn off digests for all users in this domain.'),
make_option('-u', '--users',
dest='users',
type='str',
help='Turn off digests for this comma-separated list of email addresses.'),
)
def handle(self, **options):
if options["domain"] is None and options["users"] is None:
self.print_help("python manage.py", "turn_off_digests")
exit(1)
if options["domain"]:
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
print "Turned off digest emails for:"
for user_profile in user_profiles:
already_disabled_prefix = ""
if user_profile.enable_digest_emails:
do_change_enable_digest_emails(user_profile, False)
else:
already_disabled_prefix = "(already off) "
print "%s%s <%s>" % (already_disabled_prefix, user_profile.full_name,
user_profile.email)
|
Add a management command to bulk turn off digests.
(imported from commit 0ffb565ecc9be219807ae9a45abb7b0e3e940204)from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_enable_digest_emails
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Turn off digests for a domain or specified set of email addresses."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='Turn off digests for all users in this domain.'),
make_option('-u', '--users',
dest='users',
type='str',
help='Turn off digests for this comma-separated list of email addresses.'),
)
def handle(self, **options):
if options["domain"] is None and options["users"] is None:
self.print_help("python manage.py", "turn_off_digests")
exit(1)
if options["domain"]:
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
print "Turned off digest emails for:"
for user_profile in user_profiles:
already_disabled_prefix = ""
if user_profile.enable_digest_emails:
do_change_enable_digest_emails(user_profile, False)
else:
already_disabled_prefix = "(already off) "
print "%s%s <%s>" % (already_disabled_prefix, user_profile.full_name,
user_profile.email)
|
<commit_before><commit_msg>Add a management command to bulk turn off digests.
(imported from commit 0ffb565ecc9be219807ae9a45abb7b0e3e940204)<commit_after>from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_enable_digest_emails
from zerver.models import Realm, UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Turn off digests for a domain or specified set of email addresses."""
option_list = BaseCommand.option_list + (
make_option('-d', '--domain',
dest='domain',
type='str',
help='Turn off digests for all users in this domain.'),
make_option('-u', '--users',
dest='users',
type='str',
help='Turn off digests for this comma-separated list of email addresses.'),
)
def handle(self, **options):
if options["domain"] is None and options["users"] is None:
self.print_help("python manage.py", "turn_off_digests")
exit(1)
if options["domain"]:
realm = Realm.objects.get(domain=options["domain"])
user_profiles = UserProfile.objects.filter(realm=realm)
else:
emails = set([email.strip() for email in options["users"].split(",")])
user_profiles = []
for email in emails:
user_profiles.append(get_user_profile_by_email(email))
print "Turned off digest emails for:"
for user_profile in user_profiles:
already_disabled_prefix = ""
if user_profile.enable_digest_emails:
do_change_enable_digest_emails(user_profile, False)
else:
already_disabled_prefix = "(already off) "
print "%s%s <%s>" % (already_disabled_prefix, user_profile.full_name,
user_profile.email)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.