commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e3f3b83974c4826cddcfdb73f2d4eb4abe2aca1
|
examples/test_download_files.py
|
examples/test_download_files.py
|
from seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name")
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
self.click('div#files a[href$="%s"]' % whl_file)
self.assert_downloaded_file(whl_file)
tar_gz_file = pkg_name + ".tar.gz"
self.click('div#files a[href$="%s"]' % tar_gz_file)
self.assert_downloaded_file(tar_gz_file)
|
Add test for asserting downloaded files
|
Add test for asserting downloaded files
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
|
Add test for asserting downloaded files
|
from seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name")
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
self.click('div#files a[href$="%s"]' % whl_file)
self.assert_downloaded_file(whl_file)
tar_gz_file = pkg_name + ".tar.gz"
self.click('div#files a[href$="%s"]' % tar_gz_file)
self.assert_downloaded_file(tar_gz_file)
|
<commit_before><commit_msg>Add test for asserting downloaded files<commit_after>
|
from seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name")
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
self.click('div#files a[href$="%s"]' % whl_file)
self.assert_downloaded_file(whl_file)
tar_gz_file = pkg_name + ".tar.gz"
self.click('div#files a[href$="%s"]' % tar_gz_file)
self.assert_downloaded_file(tar_gz_file)
|
Add test for asserting downloaded filesfrom seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name")
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
self.click('div#files a[href$="%s"]' % whl_file)
self.assert_downloaded_file(whl_file)
tar_gz_file = pkg_name + ".tar.gz"
self.click('div#files a[href$="%s"]' % tar_gz_file)
self.assert_downloaded_file(tar_gz_file)
|
<commit_before><commit_msg>Add test for asserting downloaded files<commit_after>from seleniumbase import BaseCase
class DownloadTests(BaseCase):
def test_download_files(self):
self.open("https://pypi.org/project/seleniumbase/#files")
pkg_header = self.get_text("h1.package-header__name")
pkg_name = pkg_header.replace(" ", "-")
whl_file = pkg_name + "-py2.py3-none-any.whl"
self.click('div#files a[href$="%s"]' % whl_file)
self.assert_downloaded_file(whl_file)
tar_gz_file = pkg_name + ".tar.gz"
self.click('div#files a[href$="%s"]' % tar_gz_file)
self.assert_downloaded_file(tar_gz_file)
|
|
724e8303a80f17c83128b5876dbb3d95c106805c
|
segments/npm_version.py
|
segments/npm_version.py
|
import subprocess
def add_npm_version_segment(powerline):
try:
p1 = subprocess.Popen(["npm", "--version"], stdout=subprocess.PIPE)
version = p1.communicate()[0].decode("utf-8").rstrip()
version = "npm " + version
powerline.append(version, 15, 18)
except OSError:
return
|
Add segment for npm version
|
Add segment for npm version
|
Python
|
mit
|
tswsl1989/powerline-shell,bitIO/powerline-shell,milkbikis/powerline-shell,banga/powerline-shell,b-ryan/powerline-shell,b-ryan/powerline-shell,banga/powerline-shell
|
Add segment for npm version
|
import subprocess
def add_npm_version_segment(powerline):
try:
p1 = subprocess.Popen(["npm", "--version"], stdout=subprocess.PIPE)
version = p1.communicate()[0].decode("utf-8").rstrip()
version = "npm " + version
powerline.append(version, 15, 18)
except OSError:
return
|
<commit_before><commit_msg>Add segment for npm version<commit_after>
|
import subprocess
def add_npm_version_segment(powerline):
try:
p1 = subprocess.Popen(["npm", "--version"], stdout=subprocess.PIPE)
version = p1.communicate()[0].decode("utf-8").rstrip()
version = "npm " + version
powerline.append(version, 15, 18)
except OSError:
return
|
Add segment for npm versionimport subprocess
def add_npm_version_segment(powerline):
try:
p1 = subprocess.Popen(["npm", "--version"], stdout=subprocess.PIPE)
version = p1.communicate()[0].decode("utf-8").rstrip()
version = "npm " + version
powerline.append(version, 15, 18)
except OSError:
return
|
<commit_before><commit_msg>Add segment for npm version<commit_after>import subprocess
def add_npm_version_segment(powerline):
try:
p1 = subprocess.Popen(["npm", "--version"], stdout=subprocess.PIPE)
version = p1.communicate()[0].decode("utf-8").rstrip()
version = "npm " + version
powerline.append(version, 15, 18)
except OSError:
return
|
|
9b42c3553b6b55125d63c902de6f9a92bc7c1fc0
|
openprescribing/frontend/migrations/0007_auto_20160908_0811.py
|
openprescribing/frontend/migrations/0007_auto_20160908_0811.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_importlog_populate'),
]
operations = [
migrations.AlterModelOptions(
name='importlog',
options={'ordering': ['-current_at']},
),
migrations.AlterField(
model_name='importlog',
name='category',
field=models.CharField(db_index=True, max_length=15),
),
]
|
Add file found on server
|
Add file found on server
|
Python
|
mit
|
ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing
|
Add file found on server
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_importlog_populate'),
]
operations = [
migrations.AlterModelOptions(
name='importlog',
options={'ordering': ['-current_at']},
),
migrations.AlterField(
model_name='importlog',
name='category',
field=models.CharField(db_index=True, max_length=15),
),
]
|
<commit_before><commit_msg>Add file found on server<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_importlog_populate'),
]
operations = [
migrations.AlterModelOptions(
name='importlog',
options={'ordering': ['-current_at']},
),
migrations.AlterField(
model_name='importlog',
name='category',
field=models.CharField(db_index=True, max_length=15),
),
]
|
Add file found on server# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_importlog_populate'),
]
operations = [
migrations.AlterModelOptions(
name='importlog',
options={'ordering': ['-current_at']},
),
migrations.AlterField(
model_name='importlog',
name='category',
field=models.CharField(db_index=True, max_length=15),
),
]
|
<commit_before><commit_msg>Add file found on server<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-09-08 07:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0006_importlog_populate'),
]
operations = [
migrations.AlterModelOptions(
name='importlog',
options={'ordering': ['-current_at']},
),
migrations.AlterField(
model_name='importlog',
name='category',
field=models.CharField(db_index=True, max_length=15),
),
]
|
|
1c293752b03b74105ad48d4d9be0a38bec55ca9a
|
flexx/ui/examples/code_editor.py
|
flexx/ui/examples/code_editor.py
|
# doc-export: CodeEditor
"""
This example demonstrates a code editor widget based on CodeMirror.
"""
# todo: Maybe this should be a widget in the library (flexx.ui.CodeMirror) ?
from flexx import ui, app, event
from flexx.pyscript.stubs import window
# Associate CodeMirror's assets with this module so that Flexx will load
# them when (things from) this module is used.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/'
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.css')
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.js')
class CodeEditor(ui.Widget):
""" A CodeEditor widget based on CodeMirror.
"""
CSS = """
.flx-CodeEditor > .CodeMirror {
width: 100%;
height: 100%;
}
"""
class JS:
def init(self):
# https://codemirror.net/doc/manual.html
options = dict(value='type code here ...',
mode='python',
theme='default',
indentUnit=4,
smartIndent=True,
lineWrapping=True,
lineNumbers=True,
firstLineNumber=1,
readOnly=False,
)
self.cm = window.CodeMirror(self.node, options)
@event.connect('size')
def __on_size(self, *events):
self.cm.refresh()
if __name__ == '__main__':
app.launch(CodeEditor, 'xul')
app.run()
|
Add example for code editor widget based on codemirror
|
Add example for code editor widget based on codemirror
|
Python
|
bsd-2-clause
|
jrversteegh/flexx,JohnLunzer/flexx,zoofIO/flexx,jrversteegh/flexx,JohnLunzer/flexx,zoofIO/flexx,JohnLunzer/flexx
|
Add example for code editor widget based on codemirror
|
# doc-export: CodeEditor
"""
This example demonstrates a code editor widget based on CodeMirror.
"""
# todo: Maybe this should be a widget in the library (flexx.ui.CodeMirror) ?
from flexx import ui, app, event
from flexx.pyscript.stubs import window
# Associate CodeMirror's assets with this module so that Flexx will load
# them when (things from) this module is used.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/'
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.css')
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.js')
class CodeEditor(ui.Widget):
""" A CodeEditor widget based on CodeMirror.
"""
CSS = """
.flx-CodeEditor > .CodeMirror {
width: 100%;
height: 100%;
}
"""
class JS:
def init(self):
# https://codemirror.net/doc/manual.html
options = dict(value='type code here ...',
mode='python',
theme='default',
indentUnit=4,
smartIndent=True,
lineWrapping=True,
lineNumbers=True,
firstLineNumber=1,
readOnly=False,
)
self.cm = window.CodeMirror(self.node, options)
@event.connect('size')
def __on_size(self, *events):
self.cm.refresh()
if __name__ == '__main__':
app.launch(CodeEditor, 'xul')
app.run()
|
<commit_before><commit_msg>Add example for code editor widget based on codemirror<commit_after>
|
# doc-export: CodeEditor
"""
This example demonstrates a code editor widget based on CodeMirror.
"""
# todo: Maybe this should be a widget in the library (flexx.ui.CodeMirror) ?
from flexx import ui, app, event
from flexx.pyscript.stubs import window
# Associate CodeMirror's assets with this module so that Flexx will load
# them when (things from) this module is used.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/'
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.css')
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.js')
class CodeEditor(ui.Widget):
""" A CodeEditor widget based on CodeMirror.
"""
CSS = """
.flx-CodeEditor > .CodeMirror {
width: 100%;
height: 100%;
}
"""
class JS:
def init(self):
# https://codemirror.net/doc/manual.html
options = dict(value='type code here ...',
mode='python',
theme='default',
indentUnit=4,
smartIndent=True,
lineWrapping=True,
lineNumbers=True,
firstLineNumber=1,
readOnly=False,
)
self.cm = window.CodeMirror(self.node, options)
@event.connect('size')
def __on_size(self, *events):
self.cm.refresh()
if __name__ == '__main__':
app.launch(CodeEditor, 'xul')
app.run()
|
Add example for code editor widget based on codemirror# doc-export: CodeEditor
"""
This example demonstrates a code editor widget based on CodeMirror.
"""
# todo: Maybe this should be a widget in the library (flexx.ui.CodeMirror) ?
from flexx import ui, app, event
from flexx.pyscript.stubs import window
# Associate CodeMirror's assets with this module so that Flexx will load
# them when (things from) this module is used.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/'
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.css')
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.js')
class CodeEditor(ui.Widget):
""" A CodeEditor widget based on CodeMirror.
"""
CSS = """
.flx-CodeEditor > .CodeMirror {
width: 100%;
height: 100%;
}
"""
class JS:
def init(self):
# https://codemirror.net/doc/manual.html
options = dict(value='type code here ...',
mode='python',
theme='default',
indentUnit=4,
smartIndent=True,
lineWrapping=True,
lineNumbers=True,
firstLineNumber=1,
readOnly=False,
)
self.cm = window.CodeMirror(self.node, options)
@event.connect('size')
def __on_size(self, *events):
self.cm.refresh()
if __name__ == '__main__':
app.launch(CodeEditor, 'xul')
app.run()
|
<commit_before><commit_msg>Add example for code editor widget based on codemirror<commit_after># doc-export: CodeEditor
"""
This example demonstrates a code editor widget based on CodeMirror.
"""
# todo: Maybe this should be a widget in the library (flexx.ui.CodeMirror) ?
from flexx import ui, app, event
from flexx.pyscript.stubs import window
# Associate CodeMirror's assets with this module so that Flexx will load
# them when (things from) this module is used.
base_url = 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/'
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.css')
app.assets.associate_asset(__name__, base_url + '5.21.0/codemirror.min.js')
class CodeEditor(ui.Widget):
""" A CodeEditor widget based on CodeMirror.
"""
CSS = """
.flx-CodeEditor > .CodeMirror {
width: 100%;
height: 100%;
}
"""
class JS:
def init(self):
# https://codemirror.net/doc/manual.html
options = dict(value='type code here ...',
mode='python',
theme='default',
indentUnit=4,
smartIndent=True,
lineWrapping=True,
lineNumbers=True,
firstLineNumber=1,
readOnly=False,
)
self.cm = window.CodeMirror(self.node, options)
@event.connect('size')
def __on_size(self, *events):
self.cm.refresh()
if __name__ == '__main__':
app.launch(CodeEditor, 'xul')
app.run()
|
|
051e2ff67a87fbbf7229b8f04b70742d441b75fd
|
examples/freesolv/gather_data_for_analysis.py
|
examples/freesolv/gather_data_for_analysis.py
|
import numpy as np
import yaml
from perses.analysis import Analysis
import glob
def collect_file_conditions(experiment_directory):
"""
Collect the experiment files for each condition of phase, ncmc steps, sterics, and geometry intervals.
This assumes there is one output for each experimental condition.
Parameters
----------
experiment_directory : str
The path to where the experiments were conducted
Returns
-------
condition_files : dict of tuple: str
The filename for each condition
"""
condition_files = {}
yaml_filenames = glob.glob(experiment_directory)
for filename in yaml_filenames:
with open(filename, 'r') as yamlfile:
experiment_options = yaml.load(yamlfile)
phase = "explicit" if experiment_options['phase'] == "solvent" else "vacuum"
data_filename = experiment_options['output_filename']
ncmc_length = experiment_options['ncmc_switching_times'][phase]
sterics = experiment_options['use_sterics'][phase]
geometry_intervals = experiment_options['geometry_divisions'][phase]
condition_files[(phase, ncmc_length, sterics, geometry_intervals)] = data_filename
return condition_files
def collect_logP_accept(condition_files):
"""
Given a set of files specifying conditions, extract the logP_accept of each and store in a data structure
Parameters
----------
condition_files : dict of tuple: str
Should have the format (phase, ncmc_length, sterics, geometry_intervals) : filename
Returns
-------
logP_accept_conditions: dict of tuple: np.array
the logP_accept (minus sams weights) for each set of conditions
"""
logP_accept_conditions = {}
for condition, filename in condition_files.items():
try:
analyzer = Analysis(filename)
logP_without_sams = analyzer.extract_logP_values(condition[0], "logP_accept", subtract_sams=True)
logPs_flat_list = []
for value in logP_without_sams.values():
logPs_flat_list.extend(value)
logP_array = np.array(logPs_flat_list)
logP_accept_conditions[condition] = logP_array
except Exception as e:
print(str(e))
print("Unable to process {}".format(filename))
continue
return logP_accept_conditions
|
Add beginning of simple script to gather data on cluster
|
Add beginning of simple script to gather data on cluster
|
Python
|
mit
|
choderalab/perses,choderalab/perses
|
Add beginning of simple script to gather data on cluster
|
import numpy as np
import yaml
from perses.analysis import Analysis
import glob
def collect_file_conditions(experiment_directory):
"""
Collect the experiment files for each condition of phase, ncmc steps, sterics, and geometry intervals.
This assumes there is one output for each experimental condition.
Parameters
----------
experiment_directory : str
The path to where the experiments were conducted
Returns
-------
condition_files : dict of tuple: str
The filename for each condition
"""
condition_files = {}
yaml_filenames = glob.glob(experiment_directory)
for filename in yaml_filenames:
with open(filename, 'r') as yamlfile:
experiment_options = yaml.load(yamlfile)
phase = "explicit" if experiment_options['phase'] == "solvent" else "vacuum"
data_filename = experiment_options['output_filename']
ncmc_length = experiment_options['ncmc_switching_times'][phase]
sterics = experiment_options['use_sterics'][phase]
geometry_intervals = experiment_options['geometry_divisions'][phase]
condition_files[(phase, ncmc_length, sterics, geometry_intervals)] = data_filename
return condition_files
def collect_logP_accept(condition_files):
"""
Given a set of files specifying conditions, extract the logP_accept of each and store in a data structure
Parameters
----------
condition_files : dict of tuple: str
Should have the format (phase, ncmc_length, sterics, geometry_intervals) : filename
Returns
-------
logP_accept_conditions: dict of tuple: np.array
the logP_accept (minus sams weights) for each set of conditions
"""
logP_accept_conditions = {}
for condition, filename in condition_files.items():
try:
analyzer = Analysis(filename)
logP_without_sams = analyzer.extract_logP_values(condition[0], "logP_accept", subtract_sams=True)
logPs_flat_list = []
for value in logP_without_sams.values():
logPs_flat_list.extend(value)
logP_array = np.array(logPs_flat_list)
logP_accept_conditions[condition] = logP_array
except Exception as e:
print(str(e))
print("Unable to process {}".format(filename))
continue
return logP_accept_conditions
|
<commit_before><commit_msg>Add beginning of simple script to gather data on cluster<commit_after>
|
import numpy as np
import yaml
from perses.analysis import Analysis
import glob
def collect_file_conditions(experiment_directory):
"""
Collect the experiment files for each condition of phase, ncmc steps, sterics, and geometry intervals.
This assumes there is one output for each experimental condition.
Parameters
----------
experiment_directory : str
The path to where the experiments were conducted
Returns
-------
condition_files : dict of tuple: str
The filename for each condition
"""
condition_files = {}
yaml_filenames = glob.glob(experiment_directory)
for filename in yaml_filenames:
with open(filename, 'r') as yamlfile:
experiment_options = yaml.load(yamlfile)
phase = "explicit" if experiment_options['phase'] == "solvent" else "vacuum"
data_filename = experiment_options['output_filename']
ncmc_length = experiment_options['ncmc_switching_times'][phase]
sterics = experiment_options['use_sterics'][phase]
geometry_intervals = experiment_options['geometry_divisions'][phase]
condition_files[(phase, ncmc_length, sterics, geometry_intervals)] = data_filename
return condition_files
def collect_logP_accept(condition_files):
"""
Given a set of files specifying conditions, extract the logP_accept of each and store in a data structure
Parameters
----------
condition_files : dict of tuple: str
Should have the format (phase, ncmc_length, sterics, geometry_intervals) : filename
Returns
-------
logP_accept_conditions: dict of tuple: np.array
the logP_accept (minus sams weights) for each set of conditions
"""
logP_accept_conditions = {}
for condition, filename in condition_files.items():
try:
analyzer = Analysis(filename)
logP_without_sams = analyzer.extract_logP_values(condition[0], "logP_accept", subtract_sams=True)
logPs_flat_list = []
for value in logP_without_sams.values():
logPs_flat_list.extend(value)
logP_array = np.array(logPs_flat_list)
logP_accept_conditions[condition] = logP_array
except Exception as e:
print(str(e))
print("Unable to process {}".format(filename))
continue
return logP_accept_conditions
|
Add beginning of simple script to gather data on clusterimport numpy as np
import yaml
from perses.analysis import Analysis
import glob
def collect_file_conditions(experiment_directory):
"""
Collect the experiment files for each condition of phase, ncmc steps, sterics, and geometry intervals.
This assumes there is one output for each experimental condition.
Parameters
----------
experiment_directory : str
The path to where the experiments were conducted
Returns
-------
condition_files : dict of tuple: str
The filename for each condition
"""
condition_files = {}
yaml_filenames = glob.glob(experiment_directory)
for filename in yaml_filenames:
with open(filename, 'r') as yamlfile:
experiment_options = yaml.load(yamlfile)
phase = "explicit" if experiment_options['phase'] == "solvent" else "vacuum"
data_filename = experiment_options['output_filename']
ncmc_length = experiment_options['ncmc_switching_times'][phase]
sterics = experiment_options['use_sterics'][phase]
geometry_intervals = experiment_options['geometry_divisions'][phase]
condition_files[(phase, ncmc_length, sterics, geometry_intervals)] = data_filename
return condition_files
def collect_logP_accept(condition_files):
"""
Given a set of files specifying conditions, extract the logP_accept of each and store in a data structure
Parameters
----------
condition_files : dict of tuple: str
Should have the format (phase, ncmc_length, sterics, geometry_intervals) : filename
Returns
-------
logP_accept_conditions: dict of tuple: np.array
the logP_accept (minus sams weights) for each set of conditions
"""
logP_accept_conditions = {}
for condition, filename in condition_files.items():
try:
analyzer = Analysis(filename)
logP_without_sams = analyzer.extract_logP_values(condition[0], "logP_accept", subtract_sams=True)
logPs_flat_list = []
for value in logP_without_sams.values():
logPs_flat_list.extend(value)
logP_array = np.array(logPs_flat_list)
logP_accept_conditions[condition] = logP_array
except Exception as e:
print(str(e))
print("Unable to process {}".format(filename))
continue
return logP_accept_conditions
|
<commit_before><commit_msg>Add beginning of simple script to gather data on cluster<commit_after>import numpy as np
import yaml
from perses.analysis import Analysis
import glob
def collect_file_conditions(experiment_directory):
"""
Collect the experiment files for each condition of phase, ncmc steps, sterics, and geometry intervals.
This assumes there is one output for each experimental condition.
Parameters
----------
experiment_directory : str
The path to where the experiments were conducted
Returns
-------
condition_files : dict of tuple: str
The filename for each condition
"""
condition_files = {}
yaml_filenames = glob.glob(experiment_directory)
for filename in yaml_filenames:
with open(filename, 'r') as yamlfile:
experiment_options = yaml.load(yamlfile)
phase = "explicit" if experiment_options['phase'] == "solvent" else "vacuum"
data_filename = experiment_options['output_filename']
ncmc_length = experiment_options['ncmc_switching_times'][phase]
sterics = experiment_options['use_sterics'][phase]
geometry_intervals = experiment_options['geometry_divisions'][phase]
condition_files[(phase, ncmc_length, sterics, geometry_intervals)] = data_filename
return condition_files
def collect_logP_accept(condition_files):
"""
Given a set of files specifying conditions, extract the logP_accept of each and store in a data structure
Parameters
----------
condition_files : dict of tuple: str
Should have the format (phase, ncmc_length, sterics, geometry_intervals) : filename
Returns
-------
logP_accept_conditions: dict of tuple: np.array
the logP_accept (minus sams weights) for each set of conditions
"""
logP_accept_conditions = {}
for condition, filename in condition_files.items():
try:
analyzer = Analysis(filename)
logP_without_sams = analyzer.extract_logP_values(condition[0], "logP_accept", subtract_sams=True)
logPs_flat_list = []
for value in logP_without_sams.values():
logPs_flat_list.extend(value)
logP_array = np.array(logPs_flat_list)
logP_accept_conditions[condition] = logP_array
except Exception as e:
print(str(e))
print("Unable to process {}".format(filename))
continue
return logP_accept_conditions
|
|
16d690031e3a95f636e1730ba59f8c91c0019f97
|
scripts/check_yaml_cde_calculation.py
|
scripts/check_yaml_cde_calculation.py
|
#!/usr/bin/env python
"""
Validates CDE calculation javascript in registry YAML files.
"""
from __future__ import print_function
import io
import sys
import yaml
from rdrf.utils import check_calculation
yaml.add_constructor(u'tag:yaml.org,2002:str',
yaml.constructor.Constructor.construct_python_unicode)
def main():
for infile in sys.argv[1:]:
check_file(infile)
def check_file(filename):
data = yaml.load(io.open(filename, errors="replace"))
for cde in data.get("cdes") or []:
calc = cde.get("calculation")
if calc:
result = check_calculation(calc)
for error in filter(None, result.strip().split("\n")):
print("%s %s: '%s'" % (filename, cde.get("code", ""), error))
if __name__ == '__main__':
main()
|
Add a script for checking CDE calculations in rdrd repo
|
scripts: Add a script for checking CDE calculations in rdrd repo
|
Python
|
agpl-3.0
|
muccg/rdrf,muccg/rdrf,muccg/rdrf,muccg/rdrf,muccg/rdrf
|
scripts: Add a script for checking CDE calculations in rdrd repo
|
#!/usr/bin/env python
"""
Validates CDE calculation javascript in registry YAML files.
"""
from __future__ import print_function
import io
import sys
import yaml
from rdrf.utils import check_calculation
yaml.add_constructor(u'tag:yaml.org,2002:str',
yaml.constructor.Constructor.construct_python_unicode)
def main():
for infile in sys.argv[1:]:
check_file(infile)
def check_file(filename):
data = yaml.load(io.open(filename, errors="replace"))
for cde in data.get("cdes") or []:
calc = cde.get("calculation")
if calc:
result = check_calculation(calc)
for error in filter(None, result.strip().split("\n")):
print("%s %s: '%s'" % (filename, cde.get("code", ""), error))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>scripts: Add a script for checking CDE calculations in rdrd repo<commit_after>
|
#!/usr/bin/env python
"""
Validates CDE calculation javascript in registry YAML files.
"""
from __future__ import print_function
import io
import sys
import yaml
from rdrf.utils import check_calculation
yaml.add_constructor(u'tag:yaml.org,2002:str',
yaml.constructor.Constructor.construct_python_unicode)
def main():
for infile in sys.argv[1:]:
check_file(infile)
def check_file(filename):
data = yaml.load(io.open(filename, errors="replace"))
for cde in data.get("cdes") or []:
calc = cde.get("calculation")
if calc:
result = check_calculation(calc)
for error in filter(None, result.strip().split("\n")):
print("%s %s: '%s'" % (filename, cde.get("code", ""), error))
if __name__ == '__main__':
main()
|
scripts: Add a script for checking CDE calculations in rdrd repo#!/usr/bin/env python
"""
Validates CDE calculation javascript in registry YAML files.
"""
from __future__ import print_function
import io
import sys
import yaml
from rdrf.utils import check_calculation
yaml.add_constructor(u'tag:yaml.org,2002:str',
yaml.constructor.Constructor.construct_python_unicode)
def main():
for infile in sys.argv[1:]:
check_file(infile)
def check_file(filename):
data = yaml.load(io.open(filename, errors="replace"))
for cde in data.get("cdes") or []:
calc = cde.get("calculation")
if calc:
result = check_calculation(calc)
for error in filter(None, result.strip().split("\n")):
print("%s %s: '%s'" % (filename, cde.get("code", ""), error))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>scripts: Add a script for checking CDE calculations in rdrd repo<commit_after>#!/usr/bin/env python
"""
Validates CDE calculation javascript in registry YAML files.
"""
from __future__ import print_function
import io
import sys
import yaml
from rdrf.utils import check_calculation
yaml.add_constructor(u'tag:yaml.org,2002:str',
yaml.constructor.Constructor.construct_python_unicode)
def main():
for infile in sys.argv[1:]:
check_file(infile)
def check_file(filename):
data = yaml.load(io.open(filename, errors="replace"))
for cde in data.get("cdes") or []:
calc = cde.get("calculation")
if calc:
result = check_calculation(calc)
for error in filter(None, result.strip().split("\n")):
print("%s %s: '%s'" % (filename, cde.get("code", ""), error))
if __name__ == '__main__':
main()
|
|
25f0375683064d39fc460da6f42109a8b6b2e60c
|
migrations/versions/690_add_brief_length_to_published_briefs.py
|
migrations/versions/690_add_brief_length_to_published_briefs.py
|
"""Give published specialist briefs a requirementsLength of '2 weeks'
Revision ID: 690
Revises: 680
Create Date: 2016-07-28 12:30:11.406853
"""
# revision identifiers, used by Alembic.
revision = '690'
down_revision = '680'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('published_at', sa.DateTime),
column('data', postgresql.JSON),
column('lot_id', sa.Integer)
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# skip briefs that are unpublished (&) not a specialist brief (&) have requirements length set
if not brief.publishedAt or brief.lot_id != 6 or brief.data.get('requirementsLength') != None:
continue
brief.data['requirementsLength'] = '2 weeks'
conn.execute(
briefs.update().where(
briefs.c.id == brief.id
).values(
data=brief.data
)
)
def downgrade():
pass
|
Create migration for specialist requirementLengths
|
Create migration for specialist requirementLengths
Due to the new functionality for specialist briefs to be live for one or
two weeks, it's necessary to migrate existing published briefs to have a
requirementsLength of '2 weeks' so they don't fail validation.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Create migration for specialist requirementLengths
Due to the new functionality for specialist briefs to be live for one or
two weeks, it's necessary to migrate existing published briefs to have a
requirementsLength of '2 weeks' so they don't fail validation.
|
"""Give published specialist briefs a requirementsLength of '2 weeks'
Revision ID: 690
Revises: 680
Create Date: 2016-07-28 12:30:11.406853
"""
# revision identifiers, used by Alembic.
revision = '690'
down_revision = '680'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('published_at', sa.DateTime),
column('data', postgresql.JSON),
column('lot_id', sa.Integer)
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# skip briefs that are unpublished (&) not a specialist brief (&) have requirements length set
if not brief.publishedAt or brief.lot_id != 6 or brief.data.get('requirementsLength') != None:
continue
brief.data['requirementsLength'] = '2 weeks'
conn.execute(
briefs.update().where(
briefs.c.id == brief.id
).values(
data=brief.data
)
)
def downgrade():
pass
|
<commit_before><commit_msg>Create migration for specialist requirementLengths
Due to the new functionality for specialist briefs to be live for one or
two weeks, it's necessary to migrate existing published briefs to have a
requirementsLength of '2 weeks' so they don't fail validation.<commit_after>
|
"""Give published specialist briefs a requirementsLength of '2 weeks'
Revision ID: 690
Revises: 680
Create Date: 2016-07-28 12:30:11.406853
"""
# revision identifiers, used by Alembic.
revision = '690'
down_revision = '680'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('published_at', sa.DateTime),
column('data', postgresql.JSON),
column('lot_id', sa.Integer)
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# skip briefs that are unpublished (&) not a specialist brief (&) have requirements length set
if not brief.publishedAt or brief.lot_id != 6 or brief.data.get('requirementsLength') != None:
continue
brief.data['requirementsLength'] = '2 weeks'
conn.execute(
briefs.update().where(
briefs.c.id == brief.id
).values(
data=brief.data
)
)
def downgrade():
pass
|
Create migration for specialist requirementLengths
Due to the new functionality for specialist briefs to be live for one or
two weeks, it's necessary to migrate existing published briefs to have a
requirementsLength of '2 weeks' so they don't fail validation."""Give published specialist briefs a requirementsLength of '2 weeks'
Revision ID: 690
Revises: 680
Create Date: 2016-07-28 12:30:11.406853
"""
# revision identifiers, used by Alembic.
revision = '690'
down_revision = '680'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('published_at', sa.DateTime),
column('data', postgresql.JSON),
column('lot_id', sa.Integer)
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# skip briefs that are unpublished (&) not a specialist brief (&) have requirements length set
if not brief.publishedAt or brief.lot_id != 6 or brief.data.get('requirementsLength') != None:
continue
brief.data['requirementsLength'] = '2 weeks'
conn.execute(
briefs.update().where(
briefs.c.id == brief.id
).values(
data=brief.data
)
)
def downgrade():
pass
|
<commit_before><commit_msg>Create migration for specialist requirementLengths
Due to the new functionality for specialist briefs to be live for one or
two weeks, it's necessary to migrate existing published briefs to have a
requirementsLength of '2 weeks' so they don't fail validation.<commit_after>"""Give published specialist briefs a requirementsLength of '2 weeks'
Revision ID: 690
Revises: 680
Create Date: 2016-07-28 12:30:11.406853
"""
# revision identifiers, used by Alembic.
revision = '690'
down_revision = '680'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('published_at', sa.DateTime),
column('data', postgresql.JSON),
column('lot_id', sa.Integer)
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# skip briefs that are unpublished (&) not a specialist brief (&) have requirements length set
if not brief.publishedAt or brief.lot_id != 6 or brief.data.get('requirementsLength') != None:
continue
brief.data['requirementsLength'] = '2 weeks'
conn.execute(
briefs.update().where(
briefs.c.id == brief.id
).values(
data=brief.data
)
)
def downgrade():
pass
|
|
a0bdba19b6f22363bed532a7872a1128679fafe6
|
scripts/remove_notification_subscriptions_from_registrations.py
|
scripts/remove_notification_subscriptions_from_registrations.py
|
""" Script for removing NotificationSubscriptions from registrations.
Registrations shouldn't have them!
"""
import logging
import sys
import django
django.setup()
from website.app import init_app
from django.apps import apps
logger = logging.getLogger(__name__)
def remove_notification_subscriptions_from_registrations(dry_run=True):
Registration = apps.get_model('osf.Registration')
NotificationSubscription = apps.get_model('osf.NotificationSubscription')
notifications_to_delete = NotificationSubscription.objects.filter(node__type='osf.registration')
registrations_affected = Registration.objects.filter(
id__in=notifications_to_delete.values_list(
'node_id', flat=True
)
)
logger.info('{} NotificationSubscriptions will be deleted.'.format(notifications_to_delete.count()))
logger.info('Registrations whose NodeSubscriptions are being deleted: {}'.format(registrations_affected.values_list('guids___id', flat=True)))
if not dry_run:
notifications_to_delete.delete()
logger.info('Registration Notification Subscriptions removed.')
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
init_app(routes=False)
remove_notification_subscriptions_from_registrations(dry_run=dry_run)
|
Add script to remove NotificationSubscriptions on Registrations.
|
Add script to remove NotificationSubscriptions on Registrations.
|
Python
|
apache-2.0
|
felliott/osf.io,brianjgeiger/osf.io,felliott/osf.io,baylee-d/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,mattclark/osf.io,adlius/osf.io,aaxelb/osf.io,mattclark/osf.io,mfraezz/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,mfraezz/osf.io,mattclark/osf.io,felliott/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,adlius/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,baylee-d/osf.io,saradbowman/osf.io,cslzchen/osf.io,saradbowman/osf.io,cslzchen/osf.io,Johnetordoff/osf.io
|
Add script to remove NotificationSubscriptions on Registrations.
|
""" Script for removing NotificationSubscriptions from registrations.
Registrations shouldn't have them!
"""
import logging
import sys
import django
django.setup()
from website.app import init_app
from django.apps import apps
logger = logging.getLogger(__name__)
def remove_notification_subscriptions_from_registrations(dry_run=True):
Registration = apps.get_model('osf.Registration')
NotificationSubscription = apps.get_model('osf.NotificationSubscription')
notifications_to_delete = NotificationSubscription.objects.filter(node__type='osf.registration')
registrations_affected = Registration.objects.filter(
id__in=notifications_to_delete.values_list(
'node_id', flat=True
)
)
logger.info('{} NotificationSubscriptions will be deleted.'.format(notifications_to_delete.count()))
logger.info('Registrations whose NodeSubscriptions are being deleted: {}'.format(registrations_affected.values_list('guids___id', flat=True)))
if not dry_run:
notifications_to_delete.delete()
logger.info('Registration Notification Subscriptions removed.')
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
init_app(routes=False)
remove_notification_subscriptions_from_registrations(dry_run=dry_run)
|
<commit_before><commit_msg>Add script to remove NotificationSubscriptions on Registrations.<commit_after>
|
""" Script for removing NotificationSubscriptions from registrations.
Registrations shouldn't have them!
"""
import logging
import sys
import django
django.setup()
from website.app import init_app
from django.apps import apps
logger = logging.getLogger(__name__)
def remove_notification_subscriptions_from_registrations(dry_run=True):
Registration = apps.get_model('osf.Registration')
NotificationSubscription = apps.get_model('osf.NotificationSubscription')
notifications_to_delete = NotificationSubscription.objects.filter(node__type='osf.registration')
registrations_affected = Registration.objects.filter(
id__in=notifications_to_delete.values_list(
'node_id', flat=True
)
)
logger.info('{} NotificationSubscriptions will be deleted.'.format(notifications_to_delete.count()))
logger.info('Registrations whose NodeSubscriptions are being deleted: {}'.format(registrations_affected.values_list('guids___id', flat=True)))
if not dry_run:
notifications_to_delete.delete()
logger.info('Registration Notification Subscriptions removed.')
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
init_app(routes=False)
remove_notification_subscriptions_from_registrations(dry_run=dry_run)
|
Add script to remove NotificationSubscriptions on Registrations.""" Script for removing NotificationSubscriptions from registrations.
Registrations shouldn't have them!
"""
import logging
import sys
import django
django.setup()
from website.app import init_app
from django.apps import apps
logger = logging.getLogger(__name__)
def remove_notification_subscriptions_from_registrations(dry_run=True):
Registration = apps.get_model('osf.Registration')
NotificationSubscription = apps.get_model('osf.NotificationSubscription')
notifications_to_delete = NotificationSubscription.objects.filter(node__type='osf.registration')
registrations_affected = Registration.objects.filter(
id__in=notifications_to_delete.values_list(
'node_id', flat=True
)
)
logger.info('{} NotificationSubscriptions will be deleted.'.format(notifications_to_delete.count()))
logger.info('Registrations whose NodeSubscriptions are being deleted: {}'.format(registrations_affected.values_list('guids___id', flat=True)))
if not dry_run:
notifications_to_delete.delete()
logger.info('Registration Notification Subscriptions removed.')
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
init_app(routes=False)
remove_notification_subscriptions_from_registrations(dry_run=dry_run)
|
<commit_before><commit_msg>Add script to remove NotificationSubscriptions on Registrations.<commit_after>""" Script for removing NotificationSubscriptions from registrations.
Registrations shouldn't have them!
"""
import logging
import sys
import django
django.setup()
from website.app import init_app
from django.apps import apps
logger = logging.getLogger(__name__)
def remove_notification_subscriptions_from_registrations(dry_run=True):
Registration = apps.get_model('osf.Registration')
NotificationSubscription = apps.get_model('osf.NotificationSubscription')
notifications_to_delete = NotificationSubscription.objects.filter(node__type='osf.registration')
registrations_affected = Registration.objects.filter(
id__in=notifications_to_delete.values_list(
'node_id', flat=True
)
)
logger.info('{} NotificationSubscriptions will be deleted.'.format(notifications_to_delete.count()))
logger.info('Registrations whose NodeSubscriptions are being deleted: {}'.format(registrations_affected.values_list('guids___id', flat=True)))
if not dry_run:
notifications_to_delete.delete()
logger.info('Registration Notification Subscriptions removed.')
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
init_app(routes=False)
remove_notification_subscriptions_from_registrations(dry_run=dry_run)
|
|
89423bdccbf5fbfc8a645e0066e1082de64a8eac
|
features/environment.py
|
features/environment.py
|
from behave import *
import shutil
import os
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
if not os.path.exists(backup):
os.mkdir(backup)
for filename in os.listdir(original):
shutil.copy2(os.path.join(original, filename), backup)
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove backups."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
for filename in os.listdir(backup):
shutil.copy2(os.path.join(backup, filename), original)
shutil.rmtree(backup)
|
Backup and restore config and journal files every time
|
Backup and restore config and journal files every time
|
Python
|
mit
|
cloudrave/jrnl-todos,flight16/jrnl,MinchinWeb/jrnl,philipsd6/jrnl,maebert/jrnl,notbalanced/jrnl,Shir0kamii/jrnl,MSylvia/jrnl,zdravi/jrnl,nikvdp/jrnl,dzeban/jrnl,beni55/jrnl,rzyns/jrnl
|
Backup and restore config and journal files every time
|
from behave import *
import shutil
import os
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
if not os.path.exists(backup):
os.mkdir(backup)
for filename in os.listdir(original):
shutil.copy2(os.path.join(original, filename), backup)
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove backups."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
for filename in os.listdir(backup):
shutil.copy2(os.path.join(backup, filename), original)
shutil.rmtree(backup)
|
<commit_before><commit_msg>Backup and restore config and journal files every time<commit_after>
|
from behave import *
import shutil
import os
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
if not os.path.exists(backup):
os.mkdir(backup)
for filename in os.listdir(original):
shutil.copy2(os.path.join(original, filename), backup)
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove backups."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
for filename in os.listdir(backup):
shutil.copy2(os.path.join(backup, filename), original)
shutil.rmtree(backup)
|
Backup and restore config and journal files every timefrom behave import *
import shutil
import os
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
if not os.path.exists(backup):
os.mkdir(backup)
for filename in os.listdir(original):
shutil.copy2(os.path.join(original, filename), backup)
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove backups."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
for filename in os.listdir(backup):
shutil.copy2(os.path.join(backup, filename), original)
shutil.rmtree(backup)
|
<commit_before><commit_msg>Backup and restore config and journal files every time<commit_after>from behave import *
import shutil
import os
def before_scenario(context, scenario):
"""Before each scenario, backup all config and journal test data."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
if not os.path.exists(backup):
os.mkdir(backup)
for filename in os.listdir(original):
shutil.copy2(os.path.join(original, filename), backup)
def after_scenario(context, scenario):
"""After each scenario, restore all test data and remove backups."""
for folder in ("configs", "journals"):
original = os.path.join("features", folder)
backup = os.path.join("features", folder+"_backup")
for filename in os.listdir(backup):
shutil.copy2(os.path.join(backup, filename), original)
shutil.rmtree(backup)
|
|
f0b009494ba743272e555c87bc9d1ea99377371f
|
sra_status.py
|
sra_status.py
|
"""
Report the status of an SRA run
"""
import os
import sys
import argparse
import requests
import json
def get_status(runids, verbose, url='https://www.ncbi.nlm.nih.gov/Traces/sra/status/srastatrep.fcgi/acc-mirroring?acc='):
"""
Get the status of the run
:param runid: the set of run ids to get
:param verbose: more output
:param url: the base url to append the status to
:return:
"""
req = url + ",".join(runids)
if args.v:
sys.stderr.write("Getting {}\n".format(req))
r = requests.get(req)
if args.v:
sys.stderr.write("Status: {}\n".format(r.status_code))
d = json.loads(r.text)
return d
def print_full(runids, data, verbose):
"""
Print the full output
:param runids: the set of run ids to check
:param data: the json object
:param verbose: more output
:return:
"""
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
for i, j in zip(data['column_names'], r):
print("{}\t{}".format(i, j))
def print_status(runids, data, verbose):
"""
Print the status of the run
:param runids: the set of run ids to check
:param data:
:param verbose:
:return:
"""
s = data['column_names'].index('Status')
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
print("{}\t{}".format(r[0], r[s]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Return the status of an SRA run")
parser.add_argument('-r', help='SRA Run ID', required=True, action='append')
parser.add_argument('-f', help="full output", action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
allruns = set()
for r in args.r:
allruns.update(r.split(','))
data = get_status(allruns, args.v)
if args.f:
print_full(allruns, data, args.v)
else:
print_status(allruns, data, args.v)
|
Check the status of a read
|
Check the status of a read
|
Python
|
mit
|
linsalrob/partie,linsalrob/partie,linsalrob/partie,linsalrob/partie
|
Check the status of a read
|
"""
Report the status of an SRA run
"""
import os
import sys
import argparse
import requests
import json
def get_status(runids, verbose, url='https://www.ncbi.nlm.nih.gov/Traces/sra/status/srastatrep.fcgi/acc-mirroring?acc='):
"""
Get the status of the run
:param runid: the set of run ids to get
:param verbose: more output
:param url: the base url to append the status to
:return:
"""
req = url + ",".join(runids)
if args.v:
sys.stderr.write("Getting {}\n".format(req))
r = requests.get(req)
if args.v:
sys.stderr.write("Status: {}\n".format(r.status_code))
d = json.loads(r.text)
return d
def print_full(runids, data, verbose):
"""
Print the full output
:param runids: the set of run ids to check
:param data: the json object
:param verbose: more output
:return:
"""
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
for i, j in zip(data['column_names'], r):
print("{}\t{}".format(i, j))
def print_status(runids, data, verbose):
"""
Print the status of the run
:param runids: the set of run ids to check
:param data:
:param verbose:
:return:
"""
s = data['column_names'].index('Status')
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
print("{}\t{}".format(r[0], r[s]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Return the status of an SRA run")
parser.add_argument('-r', help='SRA Run ID', required=True, action='append')
parser.add_argument('-f', help="full output", action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
allruns = set()
for r in args.r:
allruns.update(r.split(','))
data = get_status(allruns, args.v)
if args.f:
print_full(allruns, data, args.v)
else:
print_status(allruns, data, args.v)
|
<commit_before><commit_msg>Check the status of a read<commit_after>
|
"""
Report the status of an SRA run
"""
import os
import sys
import argparse
import requests
import json
def get_status(runids, verbose, url='https://www.ncbi.nlm.nih.gov/Traces/sra/status/srastatrep.fcgi/acc-mirroring?acc='):
"""
Get the status of the run
:param runid: the set of run ids to get
:param verbose: more output
:param url: the base url to append the status to
:return:
"""
req = url + ",".join(runids)
if args.v:
sys.stderr.write("Getting {}\n".format(req))
r = requests.get(req)
if args.v:
sys.stderr.write("Status: {}\n".format(r.status_code))
d = json.loads(r.text)
return d
def print_full(runids, data, verbose):
"""
Print the full output
:param runids: the set of run ids to check
:param data: the json object
:param verbose: more output
:return:
"""
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
for i, j in zip(data['column_names'], r):
print("{}\t{}".format(i, j))
def print_status(runids, data, verbose):
"""
Print the status of the run
:param runids: the set of run ids to check
:param data:
:param verbose:
:return:
"""
s = data['column_names'].index('Status')
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
print("{}\t{}".format(r[0], r[s]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Return the status of an SRA run")
parser.add_argument('-r', help='SRA Run ID', required=True, action='append')
parser.add_argument('-f', help="full output", action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
allruns = set()
for r in args.r:
allruns.update(r.split(','))
data = get_status(allruns, args.v)
if args.f:
print_full(allruns, data, args.v)
else:
print_status(allruns, data, args.v)
|
Check the status of a read"""
Report the status of an SRA run
"""
import os
import sys
import argparse
import requests
import json
def get_status(runids, verbose, url='https://www.ncbi.nlm.nih.gov/Traces/sra/status/srastatrep.fcgi/acc-mirroring?acc='):
"""
Get the status of the run
:param runid: the set of run ids to get
:param verbose: more output
:param url: the base url to append the status to
:return:
"""
req = url + ",".join(runids)
if args.v:
sys.stderr.write("Getting {}\n".format(req))
r = requests.get(req)
if args.v:
sys.stderr.write("Status: {}\n".format(r.status_code))
d = json.loads(r.text)
return d
def print_full(runids, data, verbose):
"""
Print the full output
:param runids: the set of run ids to check
:param data: the json object
:param verbose: more output
:return:
"""
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
for i, j in zip(data['column_names'], r):
print("{}\t{}".format(i, j))
def print_status(runids, data, verbose):
"""
Print the status of the run
:param runids: the set of run ids to check
:param data:
:param verbose:
:return:
"""
s = data['column_names'].index('Status')
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
print("{}\t{}".format(r[0], r[s]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Return the status of an SRA run")
parser.add_argument('-r', help='SRA Run ID', required=True, action='append')
parser.add_argument('-f', help="full output", action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
allruns = set()
for r in args.r:
allruns.update(r.split(','))
data = get_status(allruns, args.v)
if args.f:
print_full(allruns, data, args.v)
else:
print_status(allruns, data, args.v)
|
<commit_before><commit_msg>Check the status of a read<commit_after>"""
Report the status of an SRA run
"""
import os
import sys
import argparse
import requests
import json
def get_status(runids, verbose, url='https://www.ncbi.nlm.nih.gov/Traces/sra/status/srastatrep.fcgi/acc-mirroring?acc='):
"""
Get the status of the run
:param runid: the set of run ids to get
:param verbose: more output
:param url: the base url to append the status to
:return:
"""
req = url + ",".join(runids)
if args.v:
sys.stderr.write("Getting {}\n".format(req))
r = requests.get(req)
if args.v:
sys.stderr.write("Status: {}\n".format(r.status_code))
d = json.loads(r.text)
return d
def print_full(runids, data, verbose):
"""
Print the full output
:param runids: the set of run ids to check
:param data: the json object
:param verbose: more output
:return:
"""
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
for i, j in zip(data['column_names'], r):
print("{}\t{}".format(i, j))
def print_status(runids, data, verbose):
"""
Print the status of the run
:param runids: the set of run ids to check
:param data:
:param verbose:
:return:
"""
s = data['column_names'].index('Status')
for r in data['rows']:
if r[0] not in runids:
sys.stderr.write("Expected an accession of {} but found {}\n".format(runid))
print("{}\t{}".format(r[0], r[s]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Return the status of an SRA run")
parser.add_argument('-r', help='SRA Run ID', required=True, action='append')
parser.add_argument('-f', help="full output", action='store_true')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
allruns = set()
for r in args.r:
allruns.update(r.split(','))
data = get_status(allruns, args.v)
if args.f:
print_full(allruns, data, args.v)
else:
print_status(allruns, data, args.v)
|
|
69b3d3619ce08940277c811cb9e4c24a137831da
|
st2actions/tests/unit/test_action_runner_worker.py
|
st2actions/tests/unit/test_action_runner_worker.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from mock import Mock
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.models.db.liveaction import LiveActionDB
from st2tests import config as test_config
test_config.parse_args()
__all__ = [
'ActionsQueueConsumerTestCase'
]
class ActionsQueueConsumerTestCase(TestCase):
def test_process_right_dispatcher_is_used(self):
handler = Mock()
handler.message_type = LiveActionDB
consumer = ActionsQueueConsumer(connection=None, queues=None, handler=handler)
# Non-workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=False)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 0)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 1)
# Workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=True)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 1)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 0)
|
Add test for ActionsQueueConsumer class which verifies that the right BufferedDispatcher class is used.
|
Add test for ActionsQueueConsumer class which verifies that the right
BufferedDispatcher class is used.
|
Python
|
apache-2.0
|
emedvedev/st2,nzlosh/st2,peak6/st2,punalpatel/st2,peak6/st2,Plexxi/st2,punalpatel/st2,Plexxi/st2,StackStorm/st2,nzlosh/st2,peak6/st2,punalpatel/st2,pixelrebel/st2,nzlosh/st2,tonybaloney/st2,tonybaloney/st2,Plexxi/st2,StackStorm/st2,pixelrebel/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,lakshmi-kannan/st2,pixelrebel/st2,emedvedev/st2,lakshmi-kannan/st2,emedvedev/st2,Plexxi/st2,tonybaloney/st2,lakshmi-kannan/st2
|
Add test for ActionsQueueConsumer class which verifies that the right
BufferedDispatcher class is used.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from mock import Mock
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.models.db.liveaction import LiveActionDB
from st2tests import config as test_config
test_config.parse_args()
__all__ = [
'ActionsQueueConsumerTestCase'
]
class ActionsQueueConsumerTestCase(TestCase):
def test_process_right_dispatcher_is_used(self):
handler = Mock()
handler.message_type = LiveActionDB
consumer = ActionsQueueConsumer(connection=None, queues=None, handler=handler)
# Non-workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=False)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 0)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 1)
# Workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=True)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 1)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 0)
|
<commit_before><commit_msg>Add test for ActionsQueueConsumer class which verifies that the right
BufferedDispatcher class is used.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from mock import Mock
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.models.db.liveaction import LiveActionDB
from st2tests import config as test_config
test_config.parse_args()
__all__ = [
'ActionsQueueConsumerTestCase'
]
class ActionsQueueConsumerTestCase(TestCase):
def test_process_right_dispatcher_is_used(self):
handler = Mock()
handler.message_type = LiveActionDB
consumer = ActionsQueueConsumer(connection=None, queues=None, handler=handler)
# Non-workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=False)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 0)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 1)
# Workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=True)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 1)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 0)
|
Add test for ActionsQueueConsumer class which verifies that the right
BufferedDispatcher class is used.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from mock import Mock
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.models.db.liveaction import LiveActionDB
from st2tests import config as test_config
test_config.parse_args()
__all__ = [
'ActionsQueueConsumerTestCase'
]
class ActionsQueueConsumerTestCase(TestCase):
def test_process_right_dispatcher_is_used(self):
handler = Mock()
handler.message_type = LiveActionDB
consumer = ActionsQueueConsumer(connection=None, queues=None, handler=handler)
# Non-workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=False)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 0)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 1)
# Workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=True)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 1)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 0)
|
<commit_before><commit_msg>Add test for ActionsQueueConsumer class which verifies that the right
BufferedDispatcher class is used.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest2 import TestCase
from mock import Mock
from st2common.transport.consumers import ActionsQueueConsumer
from st2common.models.db.liveaction import LiveActionDB
from st2tests import config as test_config
test_config.parse_args()
__all__ = [
'ActionsQueueConsumerTestCase'
]
class ActionsQueueConsumerTestCase(TestCase):
def test_process_right_dispatcher_is_used(self):
handler = Mock()
handler.message_type = LiveActionDB
consumer = ActionsQueueConsumer(connection=None, queues=None, handler=handler)
# Non-workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=False)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 0)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 1)
# Workflow action
consumer._workflows_dispatcher = Mock()
consumer._actions_dispatcher = Mock()
body = LiveActionDB(status='scheduled', action='core.local', action_is_workflow=True)
message = Mock()
consumer.process(body=body, message=message)
self.assertEqual(consumer._workflows_dispatcher.dispatch.call_count, 1)
self.assertEqual(consumer._actions_dispatcher.dispatch.call_count, 0)
|
|
61d1020d1a96e6384426414ee122a013c6d75ea9
|
djconnectwise/migrations/0046_auto_20180104_1504.py
|
djconnectwise/migrations/0046_auto_20180104_1504.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0045_auto_20171222_1725'),
]
operations = [
migrations.AlterField(
model_name='scheduleentry',
name='name',
field=models.CharField(null=True, max_length=250, blank=True),
),
]
|
Add migration for last commit
|
Add migration for last commit
|
Python
|
mit
|
KerkhoffTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise
|
Add migration for last commit
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0045_auto_20171222_1725'),
]
operations = [
migrations.AlterField(
model_name='scheduleentry',
name='name',
field=models.CharField(null=True, max_length=250, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for last commit<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0045_auto_20171222_1725'),
]
operations = [
migrations.AlterField(
model_name='scheduleentry',
name='name',
field=models.CharField(null=True, max_length=250, blank=True),
),
]
|
Add migration for last commit# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0045_auto_20171222_1725'),
]
operations = [
migrations.AlterField(
model_name='scheduleentry',
name='name',
field=models.CharField(null=True, max_length=250, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for last commit<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0045_auto_20171222_1725'),
]
operations = [
migrations.AlterField(
model_name='scheduleentry',
name='name',
field=models.CharField(null=True, max_length=250, blank=True),
),
]
|
|
158d8722b8a232dceefc040cb2414201ddc7a059
|
test_all.py
|
test_all.py
|
import pytest
import random
import pymongo
from pymongo import MongoClient
_connection = None
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self, connstr, testdb):
global _connection
if _connection is None:
_connection = MongoClient(connstr)
self.db = getattr(_connection, testdb)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='module')
def db():
return DbTests('mongodb://127.0.0.1:27017', 'logstertest')
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
|
Add fixture for database related testing
|
Add fixture for database related testing
|
Python
|
mit
|
irvind/logster,irvind/logster,irvind/logster
|
Add fixture for database related testing
|
import pytest
import random
import pymongo
from pymongo import MongoClient
_connection = None
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self, connstr, testdb):
global _connection
if _connection is None:
_connection = MongoClient(connstr)
self.db = getattr(_connection, testdb)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='module')
def db():
return DbTests('mongodb://127.0.0.1:27017', 'logstertest')
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
|
<commit_before><commit_msg>Add fixture for database related testing<commit_after>
|
import pytest
import random
import pymongo
from pymongo import MongoClient
_connection = None
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self, connstr, testdb):
global _connection
if _connection is None:
_connection = MongoClient(connstr)
self.db = getattr(_connection, testdb)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='module')
def db():
return DbTests('mongodb://127.0.0.1:27017', 'logstertest')
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
|
Add fixture for database related testingimport pytest
import random
import pymongo
from pymongo import MongoClient
_connection = None
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self, connstr, testdb):
global _connection
if _connection is None:
_connection = MongoClient(connstr)
self.db = getattr(_connection, testdb)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='module')
def db():
return DbTests('mongodb://127.0.0.1:27017', 'logstertest')
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
|
<commit_before><commit_msg>Add fixture for database related testing<commit_after>import pytest
import random
import pymongo
from pymongo import MongoClient
_connection = None
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self, connstr, testdb):
global _connection
if _connection is None:
_connection = MongoClient(connstr)
self.db = getattr(_connection, testdb)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='module')
def db():
return DbTests('mongodb://127.0.0.1:27017', 'logstertest')
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
|
|
34f8c0a4a0a9f78c124cd07b121ce5b2fbf00136
|
onadata/libs/utils/csv_import.py
|
onadata/libs/utils/csv_import.py
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
for row in csv_reader:
xml_file = StringIO(dict2xform(dict_lists2strings(row),
row.get('_uuid')))
safe_create_instance(username, xml_file, [], None, None)
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
from django.db import transaction
class CSVImportException(Exception):
pass
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
with transaction.atomic():
for row in csv_reader:
# fetch submission uuid before nuking row metadata
_uuid = row.get('_uuid')
# nuke metadata (keys starting with '_')
for key in row.keys():
if key.startswith('_'):
del row[key]
xml_file = StringIO(dict2xform(dict_lists2strings(row), _uuid))
error, instance = safe_create_instance(
username, xml_file, [], None, None)
if error is None:
raise CSVImportException(error)
|
Implement atomicity for CSV imports
|
JZ: Implement atomicity for CSV imports
CSV imports should happen for all rows or nothing at all!
Use `django.transactions` for rollbacks on submission on errors
Also remove metadata from CSV rows before submitting
|
Python
|
bsd-2-clause
|
awemulya/fieldsight-kobocat,mainakibui/kobocat,qlands/onadata,smn/onadata,sounay/flaminggo-test,piqoni/onadata,qlands/onadata,jomolinare/kobocat,sounay/flaminggo-test,mainakibui/kobocat,kobotoolbox/kobocat,kobotoolbox/kobocat,smn/onadata,piqoni/onadata,hnjamba/onaclone,awemulya/fieldsight-kobocat,smn/onadata,jomolinare/kobocat,jomolinare/kobocat,smn/onadata,spatialdev/onadata,qlands/onadata,GeoODK/onadata,kobotoolbox/kobocat,spatialdev/onadata,hnjamba/onaclone,GeoODK/onadata,spatialdev/onadata,awemulya/fieldsight-kobocat,sounay/flaminggo-test,awemulya/fieldsight-kobocat,sounay/flaminggo-test,kobotoolbox/kobocat,piqoni/onadata,GeoODK/onadata,hnjamba/onaclone,qlands/onadata,hnjamba/onaclone,GeoODK/onadata,piqoni/onadata,spatialdev/onadata,mainakibui/kobocat,mainakibui/kobocat,jomolinare/kobocat
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
for row in csv_reader:
xml_file = StringIO(dict2xform(dict_lists2strings(row),
row.get('_uuid')))
safe_create_instance(username, xml_file, [], None, None)
JZ: Implement atomicity for CSV imports
CSV imports should happen for all rows or nothing at all!
Use `django.transactions` for rollbacks on submission on errors
Also remove metadata from CSV rows before submitting
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
from django.db import transaction
class CSVImportException(Exception):
pass
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
with transaction.atomic():
for row in csv_reader:
# fetch submission uuid before nuking row metadata
_uuid = row.get('_uuid')
# nuke metadata (keys starting with '_')
for key in row.keys():
if key.startswith('_'):
del row[key]
xml_file = StringIO(dict2xform(dict_lists2strings(row), _uuid))
error, instance = safe_create_instance(
username, xml_file, [], None, None)
if error is None:
raise CSVImportException(error)
|
<commit_before>import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
for row in csv_reader:
xml_file = StringIO(dict2xform(dict_lists2strings(row),
row.get('_uuid')))
safe_create_instance(username, xml_file, [], None, None)
<commit_msg>JZ: Implement atomicity for CSV imports
CSV imports should happen for all rows or nothing at all!
Use `django.transactions` for rollbacks on submission on errors
Also remove metadata from CSV rows before submitting<commit_after>
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
from django.db import transaction
class CSVImportException(Exception):
pass
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
with transaction.atomic():
for row in csv_reader:
# fetch submission uuid before nuking row metadata
_uuid = row.get('_uuid')
# nuke metadata (keys starting with '_')
for key in row.keys():
if key.startswith('_'):
del row[key]
xml_file = StringIO(dict2xform(dict_lists2strings(row), _uuid))
error, instance = safe_create_instance(
username, xml_file, [], None, None)
if error is None:
raise CSVImportException(error)
|
import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
for row in csv_reader:
xml_file = StringIO(dict2xform(dict_lists2strings(row),
row.get('_uuid')))
safe_create_instance(username, xml_file, [], None, None)
JZ: Implement atomicity for CSV imports
CSV imports should happen for all rows or nothing at all!
Use `django.transactions` for rollbacks on submission on errors
Also remove metadata from CSV rows before submittingimport unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
from django.db import transaction
class CSVImportException(Exception):
pass
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
with transaction.atomic():
for row in csv_reader:
# fetch submission uuid before nuking row metadata
_uuid = row.get('_uuid')
# nuke metadata (keys starting with '_')
for key in row.keys():
if key.startswith('_'):
del row[key]
xml_file = StringIO(dict2xform(dict_lists2strings(row), _uuid))
error, instance = safe_create_instance(
username, xml_file, [], None, None)
if error is None:
raise CSVImportException(error)
|
<commit_before>import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
for row in csv_reader:
xml_file = StringIO(dict2xform(dict_lists2strings(row),
row.get('_uuid')))
safe_create_instance(username, xml_file, [], None, None)
<commit_msg>JZ: Implement atomicity for CSV imports
CSV imports should happen for all rows or nothing at all!
Use `django.transactions` for rollbacks on submission on errors
Also remove metadata from CSV rows before submitting<commit_after>import unicodecsv as ucsv
from cStringIO import StringIO
from ondata.apps.api.viewsets.xform_submission_api import dict_lists2strings
from onadata.libs.utils.logger_tools import dict2xform, safe_create_instance
from django.db import transaction
class CSVImportException(Exception):
pass
def submit_csv(username, request, csv_data):
if isinstance(csv_data, (str, unicode)):
csv_data = StringIO(csv_data)
elif not isinstance(csv_data, file):
raise TypeError(u'Invalid param type for `csv_data`. '
'Expected file, String or Unicode but '
'got {} instead.'.format(type(csv_data).__name__))
csv_reader = ucsv.DictReader(csv_data)
with transaction.atomic():
for row in csv_reader:
# fetch submission uuid before nuking row metadata
_uuid = row.get('_uuid')
# nuke metadata (keys starting with '_')
for key in row.keys():
if key.startswith('_'):
del row[key]
xml_file = StringIO(dict2xform(dict_lists2strings(row), _uuid))
error, instance = safe_create_instance(
username, xml_file, [], None, None)
if error is None:
raise CSVImportException(error)
|
23d34308206013033f22204f8720ef01263ab07b
|
examples/plot_ransac.py
|
examples/plot_ransac.py
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.utils import ransac
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.LinearRegression()
inlier_mask = ransac(X, y, model_robust, min_n_samples=2, residual_threshold=2)
outlier_mask = ~inlier_mask
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
Add example plot script for RANSAC
|
Add example plot script for RANSAC
|
Python
|
bsd-3-clause
|
maheshakya/scikit-learn,meduz/scikit-learn,tawsifkhan/scikit-learn,thilbern/scikit-learn,abimannans/scikit-learn,mikebenfield/scikit-learn,fyffyt/scikit-learn,xwolf12/scikit-learn,nhejazi/scikit-learn,btabibian/scikit-learn,Lawrence-Liu/scikit-learn,ngoix/OCRF,loli/semisupervisedforests,mugizico/scikit-learn,pnedunuri/scikit-learn,zhenv5/scikit-learn,tomlof/scikit-learn,q1ang/scikit-learn,Windy-Ground/scikit-learn,qifeigit/scikit-learn,zuku1985/scikit-learn,eickenberg/scikit-learn,liangz0707/scikit-learn,dsquareindia/scikit-learn,arabenjamin/scikit-learn,ankurankan/scikit-learn,JosmanPS/scikit-learn,espg/scikit-learn,yyjiang/scikit-learn,saiwing-yeung/scikit-learn,NunoEdgarGub1/scikit-learn,belltailjp/scikit-learn,RachitKansal/scikit-learn,untom/scikit-learn,akionakamura/scikit-learn,loli/sklearn-ensembletrees,pythonvietnam/scikit-learn,Garrett-R/scikit-learn,ilyes14/scikit-learn,wlamond/scikit-learn,wazeerzulfikar/scikit-learn,bigdataelephants/scikit-learn,mattilyra/scikit-learn,cauchycui/scikit-learn,xiaoxiamii/scikit-learn,PatrickChrist/scikit-learn,0x0all/scikit-learn,Adai0808/scikit-learn,djgagne/scikit-learn,HolgerPeters/scikit-learn,rahuldhote/scikit-learn,arahuja/scikit-learn,IndraVikas/scikit-learn,jkarnows/scikit-learn,schets/scikit-learn,betatim/scikit-learn,glemaitre/scikit-learn,xavierwu/scikit-learn,appapantula/scikit-learn,jseabold/scikit-learn,iismd17/scikit-learn,aewhatley/scikit-learn,rahul-c1/scikit-learn,wanggang3333/scikit-learn,lesteve/scikit-learn,hsiaoyi0504/scikit-learn,ilyes14/scikit-learn,fabioticconi/scikit-learn,fyffyt/scikit-learn,loli/semisupervisedforests,spallavolu/scikit-learn,deepesch/scikit-learn,potash/scikit-learn,Adai0808/scikit-learn,ivannz/scikit-learn,macks22/scikit-learn,huzq/scikit-learn,q1ang/scikit-learn,ankurankan/scikit-learn,vinayak-mehta/scikit-learn,mattgiguere/scikit-learn,AnasGhrab/scikit-learn,RPGOne/scikit-learn,pianomania/scikit-learn,ilo10/scikit-learn,costypetrisor/scikit-learn,dingocuster/scikit-learn,phdowling/scikit-learn,liberatorqjw/scikit-learn,wazeerzulfikar/scikit-learn,RayMick/scikit-learn,bikong2/scikit-learn,Akshay0724/scikit-learn,wlamond/scikit-learn,imaculate/scikit-learn,aabadie/scikit-learn,alexeyum/scikit-learn,shyamalschandra/scikit-learn,etkirsch/scikit-learn,pv/scikit-learn,gclenaghan/scikit-learn,liangz0707/scikit-learn,mattilyra/scikit-learn,nelson-liu/scikit-learn,clemkoa/scikit-learn,shikhardb/scikit-learn,ElDeveloper/scikit-learn,PrashntS/scikit-learn,fzalkow/scikit-learn,manashmndl/scikit-learn,mojoboss/scikit-learn,shenzebang/scikit-learn,clemkoa/scikit-learn,tosolveit/scikit-learn,anurag313/scikit-learn,rahul-c1/scikit-learn,shyamalschandra/scikit-learn,mikebenfield/scikit-learn,stylianos-kampakis/scikit-learn,kylerbrown/scikit-learn,0x0all/scikit-learn,fbagirov/scikit-learn,khkaminska/scikit-learn,nhejazi/scikit-learn,jblackburne/scikit-learn,Adai0808/scikit-learn,mjgrav2001/scikit-learn,Nyker510/scikit-learn,kevin-intel/scikit-learn,tmhm/scikit-learn,AIML/scikit-learn,treycausey/scikit-learn,jmschrei/scikit-learn,mattgiguere/scikit-learn,liberatorqjw/scikit-learn,aflaxman/scikit-learn,fredhusser/scikit-learn,r-mart/scikit-learn,pratapvardhan/scikit-learn,fabianp/scikit-learn,xubenben/scikit-learn,alexsavio/scikit-learn,eickenberg/scikit-learn,bigdataelephants/scikit-learn,procoder317/scikit-learn,nrhine1/scikit-learn,sergeyf/scikit-learn,pv/scikit-learn,sinhrks/scikit-learn,AlexRobson/scikit-learn,heli522/scikit-learn,vshtanko/scikit-learn,rrohan/scikit-learn,heli522/scikit-learn,cybernet14/scikit-learn,abhishekkrthakur/scikit-learn,lesteve/scikit-learn,cl4rke/scikit-learn,akionakamura/scikit-learn,jmetzen/scikit-learn,treycausey/scikit-learn,Myasuka/scikit-learn,thilbern/scikit-learn,mhue/scikit-learn,samzhang111/scikit-learn,anntzer/scikit-learn,carrillo/scikit-learn,xiaoxiamii/scikit-learn,belltailjp/scikit-learn,MechCoder/scikit-learn,wzbozon/scikit-learn,jmschrei/scikit-learn,iismd17/scikit-learn,rexshihaoren/scikit-learn,vinayak-mehta/scikit-learn,mayblue9/scikit-learn,anntzer/scikit-learn,rrohan/scikit-learn,cauchycui/scikit-learn,amueller/scikit-learn,bikong2/scikit-learn,luo66/scikit-learn,jmetzen/scikit-learn,equialgo/scikit-learn,russel1237/scikit-learn,mugizico/scikit-learn,zaxtax/scikit-learn,aminert/scikit-learn,mhdella/scikit-learn,liyu1990/sklearn,rohanp/scikit-learn,cainiaocome/scikit-learn,ElDeveloper/scikit-learn,olologin/scikit-learn,ngoix/OCRF,lbishal/scikit-learn,anntzer/scikit-learn,themrmax/scikit-learn,MartinDelzant/scikit-learn,dsquareindia/scikit-learn,YinongLong/scikit-learn,themrmax/scikit-learn,ilo10/scikit-learn,vibhorag/scikit-learn,adamgreenhall/scikit-learn,ankurankan/scikit-learn,Windy-Ground/scikit-learn,mjudsp/Tsallis,roxyboy/scikit-learn,abimannans/scikit-learn,AlexanderFabisch/scikit-learn,robin-lai/scikit-learn,raghavrv/scikit-learn,vinayak-mehta/scikit-learn,Fireblend/scikit-learn,dhruv13J/scikit-learn,MohammedWasim/scikit-learn,jzt5132/scikit-learn,phdowling/scikit-learn,equialgo/scikit-learn,abhishekkrthakur/scikit-learn,tdhopper/scikit-learn,qifeigit/scikit-learn,Jimmy-Morzaria/scikit-learn,manashmndl/scikit-learn,hitszxp/scikit-learn,trankmichael/scikit-learn,hrjn/scikit-learn,ilo10/scikit-learn,etkirsch/scikit-learn,mhue/scikit-learn,schets/scikit-learn,ashhher3/scikit-learn,sumspr/scikit-learn,AnasGhrab/scikit-learn,zhenv5/scikit-learn,glemaitre/scikit-learn,alvarofierroclavero/scikit-learn,xwolf12/scikit-learn,ycaihua/scikit-learn,mwv/scikit-learn,elkingtonmcb/scikit-learn,maheshakya/scikit-learn,ndingwall/scikit-learn,icdishb/scikit-learn,mwv/scikit-learn,AlexandreAbraham/scikit-learn,sarahgrogan/scikit-learn,JosmanPS/scikit-learn,raghavrv/scikit-learn,abhishekkrthakur/scikit-learn,Nyker510/scikit-learn,ndingwall/scikit-learn,eg-zhang/scikit-learn,ZENGXH/scikit-learn,etkirsch/scikit-learn,pypot/scikit-learn,Myasuka/scikit-learn,krez13/scikit-learn,larsmans/scikit-learn,ahoyosid/scikit-learn,andaag/scikit-learn,ivannz/scikit-learn,chrsrds/scikit-learn,billy-inn/scikit-learn,MohammedWasim/scikit-learn,xzh86/scikit-learn,Obus/scikit-learn,tosolveit/scikit-learn,mattilyra/scikit-learn,moutai/scikit-learn,victorbergelin/scikit-learn,ndingwall/scikit-learn,fabianp/scikit-learn,walterreade/scikit-learn,rohanp/scikit-learn,PrashntS/scikit-learn,icdishb/scikit-learn,arahuja/scikit-learn,Vimos/scikit-learn,RPGOne/scikit-learn,tdhopper/scikit-learn,abhishekgahlot/scikit-learn,raghavrv/scikit-learn,maheshakya/scikit-learn,kagayakidan/scikit-learn,jpautom/scikit-learn,chrisburr/scikit-learn,roxyboy/scikit-learn,tawsifkhan/scikit-learn,alexeyum/scikit-learn,victorbergelin/scikit-learn,pnedunuri/scikit-learn,aflaxman/scikit-learn,mfjb/scikit-learn,nrhine1/scikit-learn,krez13/scikit-learn,sumspr/scikit-learn,liyu1990/sklearn,shahankhatch/scikit-learn,nmayorov/scikit-learn,hsiaoyi0504/scikit-learn,wlamond/scikit-learn,anurag313/scikit-learn,tmhm/scikit-learn,ZenDevelopmentSystems/scikit-learn,BiaDarkia/scikit-learn,MatthieuBizien/scikit-learn,hugobowne/scikit-learn,devanshdalal/scikit-learn,abhishekgahlot/scikit-learn,idlead/scikit-learn,elkingtonmcb/scikit-learn,petosegan/scikit-learn,harshaneelhg/scikit-learn,ldirer/scikit-learn,nomadcube/scikit-learn,kevin-intel/scikit-learn,imaculate/scikit-learn,zhenv5/scikit-learn,andrewnc/scikit-learn,DonBeo/scikit-learn,heli522/scikit-learn,trungnt13/scikit-learn,liyu1990/sklearn,bhargav/scikit-learn,espg/scikit-learn,frank-tancf/scikit-learn,Lawrence-Liu/scikit-learn,treycausey/scikit-learn,kevin-intel/scikit-learn,moutai/scikit-learn,kjung/scikit-learn,bnaul/scikit-learn,idlead/scikit-learn,vortex-ape/scikit-learn,walterreade/scikit-learn,mjudsp/Tsallis,Titan-C/scikit-learn,smartscheduling/scikit-learn-categorical-tree,xiaoxiamii/scikit-learn,jseabold/scikit-learn,nikitasingh981/scikit-learn,tomlof/scikit-learn,akionakamura/scikit-learn,yask123/scikit-learn,rishikksh20/scikit-learn,mojoboss/scikit-learn,Garrett-R/scikit-learn,mattilyra/scikit-learn,treycausey/scikit-learn,ngoix/OCRF,quheng/scikit-learn,terkkila/scikit-learn,zorroblue/scikit-learn,ashhher3/scikit-learn,qifeigit/scikit-learn,hugobowne/scikit-learn,Clyde-fare/scikit-learn,michigraber/scikit-learn,bthirion/scikit-learn,thilbern/scikit-learn,joshloyal/scikit-learn,billy-inn/scikit-learn,dsullivan7/scikit-learn,herilalaina/scikit-learn,plissonf/scikit-learn,amueller/scikit-learn,meduz/scikit-learn,xuewei4d/scikit-learn,vivekmishra1991/scikit-learn,henridwyer/scikit-learn,pratapvardhan/scikit-learn,cybernet14/scikit-learn,giorgiop/scikit-learn,TomDLT/scikit-learn,khkaminska/scikit-learn,yunfeilu/scikit-learn,Windy-Ground/scikit-learn,xwolf12/scikit-learn,equialgo/scikit-learn,aetilley/scikit-learn,abhishekgahlot/scikit-learn,ltiao/scikit-learn,PatrickOReilly/scikit-learn,ningchi/scikit-learn,tosolveit/scikit-learn,nrhine1/scikit-learn,jblackburne/scikit-learn,evgchz/scikit-learn,vigilv/scikit-learn,jorge2703/scikit-learn,pompiduskus/scikit-learn,theoryno3/scikit-learn,dhruv13J/scikit-learn,OshynSong/scikit-learn,voxlol/scikit-learn,vybstat/scikit-learn,ClimbsRocks/scikit-learn,mxjl620/scikit-learn,jereze/scikit-learn,CforED/Machine-Learning,hugobowne/scikit-learn,sinhrks/scikit-learn,xavierwu/scikit-learn,ashhher3/scikit-learn,YinongLong/scikit-learn,procoder317/scikit-learn,deepesch/scikit-learn,f3r/scikit-learn,dingocuster/scikit-learn,roxyboy/scikit-learn,Nyker510/scikit-learn,pv/scikit-learn,murali-munna/scikit-learn,Barmaley-exe/scikit-learn,rahuldhote/scikit-learn,OshynSong/scikit-learn,jereze/scikit-learn,stylianos-kampakis/scikit-learn,vigilv/scikit-learn,alvarofierroclavero/scikit-learn,466152112/scikit-learn,mattgiguere/scikit-learn,devanshdalal/scikit-learn,0x0all/scikit-learn,jkarnows/scikit-learn,plissonf/scikit-learn,fzalkow/scikit-learn,robbymeals/scikit-learn,rahuldhote/scikit-learn,frank-tancf/scikit-learn,bnaul/scikit-learn,aetilley/scikit-learn,RachitKansal/scikit-learn,sarahgrogan/scikit-learn,jjx02230808/project0223,JPFrancoia/scikit-learn,fzalkow/scikit-learn,eickenberg/scikit-learn,IshankGulati/scikit-learn,PatrickChrist/scikit-learn,zorroblue/scikit-learn,lenovor/scikit-learn,btabibian/scikit-learn,CVML/scikit-learn,anirudhjayaraman/scikit-learn,untom/scikit-learn,xubenben/scikit-learn,jorik041/scikit-learn,jorge2703/scikit-learn,h2educ/scikit-learn,IssamLaradji/scikit-learn,IndraVikas/scikit-learn,RomainBrault/scikit-learn,samuel1208/scikit-learn,liangz0707/scikit-learn,robbymeals/scikit-learn,mwv/scikit-learn,h2educ/scikit-learn,RomainBrault/scikit-learn,robbymeals/scikit-learn,MohammedWasim/scikit-learn,AlexanderFabisch/scikit-learn,dhruv13J/scikit-learn,PatrickChrist/scikit-learn,cauchycui/scikit-learn,arahuja/scikit-learn,JeanKossaifi/scikit-learn,cl4rke/scikit-learn,IssamLaradji/scikit-learn,3manuek/scikit-learn,nelson-liu/scikit-learn,spallavolu/scikit-learn,joernhees/scikit-learn,abimannans/scikit-learn,jaidevd/scikit-learn,OshynSong/scikit-learn,Vimos/scikit-learn,sinhrks/scikit-learn,RayMick/scikit-learn,tdhopper/scikit-learn,ElDeveloper/scikit-learn,lbishal/scikit-learn,alexsavio/scikit-learn,andrewnc/scikit-learn,mlyundin/scikit-learn,Barmaley-exe/scikit-learn,russel1237/scikit-learn,poryfly/scikit-learn,AlexRobson/scikit-learn,dingocuster/scikit-learn,wanggang3333/scikit-learn,bigdataelephants/scikit-learn,JeanKossaifi/scikit-learn,henrykironde/scikit-learn,icdishb/scikit-learn,petosegan/scikit-learn,pratapvardhan/scikit-learn,schets/scikit-learn,yunfeilu/scikit-learn,equialgo/scikit-learn,murali-munna/scikit-learn,jkarnows/scikit-learn,bthirion/scikit-learn,beepee14/scikit-learn,3manuek/scikit-learn,joernhees/scikit-learn,vibhorag/scikit-learn,0asa/scikit-learn,mehdidc/scikit-learn,zaxtax/scikit-learn,evgchz/scikit-learn,mjudsp/Tsallis,huobaowangxi/scikit-learn,amueller/scikit-learn,jakobworldpeace/scikit-learn,schets/scikit-learn,wanggang3333/scikit-learn,giorgiop/scikit-learn,ishanic/scikit-learn,kashif/scikit-learn,ldirer/scikit-learn,michigraber/scikit-learn,hsiaoyi0504/scikit-learn,Sentient07/scikit-learn,terkkila/scikit-learn,hsuantien/scikit-learn,Titan-C/scikit-learn,justincassidy/scikit-learn,Obus/scikit-learn,maheshakya/scikit-learn,PatrickOReilly/scikit-learn,ZenDevelopmentSystems/scikit-learn,luo66/scikit-learn,shyamalschandra/scikit-learn,IshankGulati/scikit-learn,jorge2703/scikit-learn,MohammedWasim/scikit-learn,HolgerPeters/scikit-learn,AlexRobson/scikit-learn,NunoEdgarGub1/scikit-learn,walterreade/scikit-learn,mayblue9/scikit-learn,xyguo/scikit-learn,wazeerzulfikar/scikit-learn,tosolveit/scikit-learn,victorbergelin/scikit-learn,CforED/Machine-Learning,Srisai85/scikit-learn,fabioticconi/scikit-learn,DSLituiev/scikit-learn,gclenaghan/scikit-learn,massmutual/scikit-learn,JosmanPS/scikit-learn,xavierwu/scikit-learn,mblondel/scikit-learn,466152112/scikit-learn,rajat1994/scikit-learn,LiaoPan/scikit-learn,nmayorov/scikit-learn,toastedcornflakes/scikit-learn,phdowling/scikit-learn,0asa/scikit-learn,cwu2011/scikit-learn,DSLituiev/scikit-learn,AnasGhrab/scikit-learn,larsmans/scikit-learn,rexshihaoren/scikit-learn,wanggang3333/scikit-learn,ephes/scikit-learn,vermouthmjl/scikit-learn,larsmans/scikit-learn,poryfly/scikit-learn,NelisVerhoef/scikit-learn,lazywei/scikit-learn,bnaul/scikit-learn,davidgbe/scikit-learn,mlyundin/scikit-learn,petosegan/scikit-learn,justincassidy/scikit-learn,ChanChiChoi/scikit-learn,shusenl/scikit-learn,MatthieuBizien/scikit-learn,kaichogami/scikit-learn,sarahgrogan/scikit-learn,ltiao/scikit-learn,Barmaley-exe/scikit-learn,michigraber/scikit-learn,ishanic/scikit-learn,saiwing-yeung/scikit-learn,shenzebang/scikit-learn,shusenl/scikit-learn,tomlof/scikit-learn,lbishal/scikit-learn,rvraghav93/scikit-learn,LiaoPan/scikit-learn,zihua/scikit-learn,massmutual/scikit-learn,pkruskal/scikit-learn,nhejazi/scikit-learn,kaichogami/scikit-learn,macks22/scikit-learn,espg/scikit-learn,iismd17/scikit-learn,IssamLaradji/scikit-learn,cl4rke/scikit-learn,macks22/scikit-learn,jm-begon/scikit-learn,nesterione/scikit-learn,ashhher3/scikit-learn,xubenben/scikit-learn,davidgbe/scikit-learn,ZenDevelopmentSystems/scikit-learn,nomadcube/scikit-learn,quheng/scikit-learn,zorroblue/scikit-learn,henridwyer/scikit-learn,toastedcornflakes/scikit-learn,jmschrei/scikit-learn,jereze/scikit-learn,vibhorag/scikit-learn,trungnt13/scikit-learn,JsNoNo/scikit-learn,lazywei/scikit-learn,LiaoPan/scikit-learn,zihua/scikit-learn,mwv/scikit-learn,jlegendary/scikit-learn,samzhang111/scikit-learn,AIML/scikit-learn,MartinDelzant/scikit-learn,UNR-AERIAL/scikit-learn,marcocaccin/scikit-learn,michigraber/scikit-learn,ogrisel/scikit-learn,466152112/scikit-learn,NunoEdgarGub1/scikit-learn,arjoly/scikit-learn,alvarofierroclavero/scikit-learn,vybstat/scikit-learn,shikhardb/scikit-learn,r-mart/scikit-learn,hitszxp/scikit-learn,hitszxp/scikit-learn,3manuek/scikit-learn,robin-lai/scikit-learn,glennq/scikit-learn,alexsavio/scikit-learn,huobaowangxi/scikit-learn,NunoEdgarGub1/scikit-learn,ChanderG/scikit-learn,vivekmishra1991/scikit-learn,NelisVerhoef/scikit-learn,costypetrisor/scikit-learn,cainiaocome/scikit-learn,glennq/scikit-learn,djgagne/scikit-learn,procoder317/scikit-learn,q1ang/scikit-learn,ldirer/scikit-learn,justincassidy/scikit-learn,zuku1985/scikit-learn,fabioticconi/scikit-learn,mayblue9/scikit-learn,zaxtax/scikit-learn,rishikksh20/scikit-learn,Akshay0724/scikit-learn,dsquareindia/scikit-learn,vshtanko/scikit-learn,JeanKossaifi/scikit-learn,cainiaocome/scikit-learn,lesteve/scikit-learn,nesterione/scikit-learn,jakirkham/scikit-learn,pythonvietnam/scikit-learn,Clyde-fare/scikit-learn,yanlend/scikit-learn,saiwing-yeung/scikit-learn,chrsrds/scikit-learn,thientu/scikit-learn,eickenberg/scikit-learn,wazeerzulfikar/scikit-learn,sanketloke/scikit-learn,raghavrv/scikit-learn,ssaeger/scikit-learn,robin-lai/scikit-learn,yonglehou/scikit-learn,xiaoxiamii/scikit-learn,BiaDarkia/scikit-learn,AlexanderFabisch/scikit-learn,arabenjamin/scikit-learn,RPGOne/scikit-learn,clemkoa/scikit-learn,cauchycui/scikit-learn,pypot/scikit-learn,jmetzen/scikit-learn,ZenDevelopmentSystems/scikit-learn,spallavolu/scikit-learn,fyffyt/scikit-learn,0asa/scikit-learn,anirudhjayaraman/scikit-learn,AnasGhrab/scikit-learn,olologin/scikit-learn,terkkila/scikit-learn,djgagne/scikit-learn,TomDLT/scikit-learn,xuewei4d/scikit-learn,ogrisel/scikit-learn,ky822/scikit-learn,yask123/scikit-learn,Achuth17/scikit-learn,r-mart/scikit-learn,AlexandreAbraham/scikit-learn,chrisburr/scikit-learn,Aasmi/scikit-learn,shangwuhencc/scikit-learn,harshaneelhg/scikit-learn,aabadie/scikit-learn,ningchi/scikit-learn,DonBeo/scikit-learn,yyjiang/scikit-learn,hsuantien/scikit-learn,Obus/scikit-learn,heli522/scikit-learn,JsNoNo/scikit-learn,eickenberg/scikit-learn,davidgbe/scikit-learn,HolgerPeters/scikit-learn,yanlend/scikit-learn,sergeyf/scikit-learn,anirudhjayaraman/scikit-learn,466152112/scikit-learn,andaag/scikit-learn,vigilv/scikit-learn,Vimos/scikit-learn,tawsifkhan/scikit-learn,potash/scikit-learn,kagayakidan/scikit-learn,ilyes14/scikit-learn,vortex-ape/scikit-learn,ChanChiChoi/scikit-learn,ishanic/scikit-learn,Sentient07/scikit-learn,andaag/scikit-learn,fengzhyuan/scikit-learn,mjgrav2001/scikit-learn,AlexRobson/scikit-learn,idlead/scikit-learn,wzbozon/scikit-learn,vibhorag/scikit-learn,glouppe/scikit-learn,kevin-intel/scikit-learn,aewhatley/scikit-learn,samzhang111/scikit-learn,ndingwall/scikit-learn,mehdidc/scikit-learn,ycaihua/scikit-learn,ZENGXH/scikit-learn,ivannz/scikit-learn,kylerbrown/scikit-learn,hsuantien/scikit-learn,moutai/scikit-learn,kaichogami/scikit-learn,mattgiguere/scikit-learn,ilyes14/scikit-learn,tomlof/scikit-learn,cybernet14/scikit-learn,massmutual/scikit-learn,hrjn/scikit-learn,rexshihaoren/scikit-learn,jpautom/scikit-learn,q1ang/scikit-learn,cainiaocome/scikit-learn,ldirer/scikit-learn,ChanderG/scikit-learn,glemaitre/scikit-learn,loli/semisupervisedforests,bnaul/scikit-learn,hainm/scikit-learn,Fireblend/scikit-learn,cl4rke/scikit-learn,xubenben/scikit-learn,TomDLT/scikit-learn,kashif/scikit-learn,RomainBrault/scikit-learn,mjgrav2001/scikit-learn,phdowling/scikit-learn,dsullivan7/scikit-learn,IshankGulati/scikit-learn,siutanwong/scikit-learn,vybstat/scikit-learn,procoder317/scikit-learn,0x0all/scikit-learn,ycaihua/scikit-learn,ahoyosid/scikit-learn,mfjb/scikit-learn,russel1237/scikit-learn,manhhomienbienthuy/scikit-learn,Jimmy-Morzaria/scikit-learn,mojoboss/scikit-learn,zorroblue/scikit-learn,aetilley/scikit-learn,glemaitre/scikit-learn,dsquareindia/scikit-learn,aewhatley/scikit-learn,Jimmy-Morzaria/scikit-learn,waterponey/scikit-learn,gotomypc/scikit-learn,wzbozon/scikit-learn,rajat1994/scikit-learn,arjoly/scikit-learn,jaidevd/scikit-learn,jorik041/scikit-learn,r-mart/scikit-learn,Djabbz/scikit-learn,ogrisel/scikit-learn,deepesch/scikit-learn,ycaihua/scikit-learn,f3r/scikit-learn,abimannans/scikit-learn,sergeyf/scikit-learn,DonBeo/scikit-learn,ChanChiChoi/scikit-learn,RayMick/scikit-learn,aflaxman/scikit-learn,simon-pepin/scikit-learn,Myasuka/scikit-learn,rajat1994/scikit-learn,ephes/scikit-learn,ClimbsRocks/scikit-learn,fyffyt/scikit-learn,xyguo/scikit-learn,hrjn/scikit-learn,BiaDarkia/scikit-learn,devanshdalal/scikit-learn,herilalaina/scikit-learn,plissonf/scikit-learn,fbagirov/scikit-learn,Djabbz/scikit-learn,mfjb/scikit-learn,rvraghav93/scikit-learn,sanketloke/scikit-learn,potash/scikit-learn,andrewnc/scikit-learn,siutanwong/scikit-learn,plissonf/scikit-learn,btabibian/scikit-learn,iismd17/scikit-learn,UNR-AERIAL/scikit-learn,chrsrds/scikit-learn,sumspr/scikit-learn,JeanKossaifi/scikit-learn,dsullivan7/scikit-learn,Garrett-R/scikit-learn,pkruskal/scikit-learn,mayblue9/scikit-learn,mhue/scikit-learn,untom/scikit-learn,pianomania/scikit-learn,jlegendary/scikit-learn,scikit-learn/scikit-learn,theoryno3/scikit-learn,IssamLaradji/scikit-learn,siutanwong/scikit-learn,voxlol/scikit-learn,imaculate/scikit-learn,zhenv5/scikit-learn,LohithBlaze/scikit-learn,gotomypc/scikit-learn,thilbern/scikit-learn,henridwyer/scikit-learn,ClimbsRocks/scikit-learn,khkaminska/scikit-learn,evgchz/scikit-learn,trankmichael/scikit-learn,waterponey/scikit-learn,rohanp/scikit-learn,Titan-C/scikit-learn,fabioticconi/scikit-learn,Barmaley-exe/scikit-learn,jayflo/scikit-learn,Sentient07/scikit-learn,sergeyf/scikit-learn,frank-tancf/scikit-learn,trankmichael/scikit-learn,mxjl620/scikit-learn,sonnyhu/scikit-learn,alvarofierroclavero/scikit-learn,espg/scikit-learn,simon-pepin/scikit-learn,0asa/scikit-learn,kylerbrown/scikit-learn,tmhm/scikit-learn,ChanChiChoi/scikit-learn,adamgreenhall/scikit-learn,dhruv13J/scikit-learn,kagayakidan/scikit-learn,anurag313/scikit-learn,JosmanPS/scikit-learn,pv/scikit-learn,mhdella/scikit-learn,pnedunuri/scikit-learn,siutanwong/scikit-learn,toastedcornflakes/scikit-learn,AlexanderFabisch/scikit-learn,Windy-Ground/scikit-learn,nikitasingh981/scikit-learn,betatim/scikit-learn,aminert/scikit-learn,appapantula/scikit-learn,PrashntS/scikit-learn,billy-inn/scikit-learn,zuku1985/scikit-learn,samuel1208/scikit-learn,robbymeals/scikit-learn,hlin117/scikit-learn,jpautom/scikit-learn,loli/sklearn-ensembletrees,mblondel/scikit-learn,sonnyhu/scikit-learn,samuel1208/scikit-learn,AlexandreAbraham/scikit-learn,kjung/scikit-learn,jm-begon/scikit-learn,joernhees/scikit-learn,amueller/scikit-learn,rrohan/scikit-learn,mjudsp/Tsallis,JsNoNo/scikit-learn,bikong2/scikit-learn,eg-zhang/scikit-learn,samzhang111/scikit-learn,madjelan/scikit-learn,nomadcube/scikit-learn,madjelan/scikit-learn,samuel1208/scikit-learn,Garrett-R/scikit-learn,PatrickChrist/scikit-learn,jakirkham/scikit-learn,arabenjamin/scikit-learn,fzalkow/scikit-learn,mxjl620/scikit-learn,arjoly/scikit-learn,themrmax/scikit-learn,Vimos/scikit-learn,hrjn/scikit-learn,lenovor/scikit-learn,Srisai85/scikit-learn,mblondel/scikit-learn,smartscheduling/scikit-learn-categorical-tree,zorojean/scikit-learn,pratapvardhan/scikit-learn,aflaxman/scikit-learn,appapantula/scikit-learn,MartinDelzant/scikit-learn,RachitKansal/scikit-learn,theoryno3/scikit-learn,jayflo/scikit-learn,chrsrds/scikit-learn,lazywei/scikit-learn,jjx02230808/project0223,h2educ/scikit-learn,zorojean/scikit-learn,lenovor/scikit-learn,hsiaoyi0504/scikit-learn,madjelan/scikit-learn,zihua/scikit-learn,ngoix/OCRF,jlegendary/scikit-learn,petosegan/scikit-learn,adamgreenhall/scikit-learn,vshtanko/scikit-learn,harshaneelhg/scikit-learn,fredhusser/scikit-learn,mugizico/scikit-learn,evgchz/scikit-learn,giorgiop/scikit-learn,ningchi/scikit-learn,yunfeilu/scikit-learn,joshloyal/scikit-learn,altairpearl/scikit-learn,joshloyal/scikit-learn,UNR-AERIAL/scikit-learn,herilalaina/scikit-learn,sonnyhu/scikit-learn,Obus/scikit-learn,cwu2011/scikit-learn,andrewnc/scikit-learn,PrashntS/scikit-learn,appapantula/scikit-learn,nvoron23/scikit-learn,hitszxp/scikit-learn,ishanic/scikit-learn,wlamond/scikit-learn,Nyker510/scikit-learn,IndraVikas/scikit-learn,betatim/scikit-learn,xuewei4d/scikit-learn,cybernet14/scikit-learn,mhdella/scikit-learn,hdmetor/scikit-learn,jzt5132/scikit-learn,NelisVerhoef/scikit-learn,jlegendary/scikit-learn,glennq/scikit-learn,shusenl/scikit-learn,yonglehou/scikit-learn,harshaneelhg/scikit-learn,3manuek/scikit-learn,yonglehou/scikit-learn,jmschrei/scikit-learn,nmayorov/scikit-learn,giorgiop/scikit-learn,nmayorov/scikit-learn,RPGOne/scikit-learn,CVML/scikit-learn,AIML/scikit-learn,rrohan/scikit-learn,eg-zhang/scikit-learn,elkingtonmcb/scikit-learn,shangwuhencc/scikit-learn,vigilv/scikit-learn,pompiduskus/scikit-learn,saiwing-yeung/scikit-learn,vybstat/scikit-learn,devanshdalal/scikit-learn,jseabold/scikit-learn,evgchz/scikit-learn,nomadcube/scikit-learn,anntzer/scikit-learn,beepee14/scikit-learn,JsNoNo/scikit-learn,glouppe/scikit-learn,Titan-C/scikit-learn,frank-tancf/scikit-learn,altairpearl/scikit-learn,Aasmi/scikit-learn,fbagirov/scikit-learn,fengzhyuan/scikit-learn,yonglehou/scikit-learn,mjudsp/Tsallis,vshtanko/scikit-learn,ltiao/scikit-learn,loli/sklearn-ensembletrees,loli/semisupervisedforests,huzq/scikit-learn,justincassidy/scikit-learn,carrillo/scikit-learn,lazywei/scikit-learn,nvoron23/scikit-learn,simon-pepin/scikit-learn,UNR-AERIAL/scikit-learn,ElDeveloper/scikit-learn,rahuldhote/scikit-learn,lin-credible/scikit-learn,sumspr/scikit-learn,smartscheduling/scikit-learn-categorical-tree,kjung/scikit-learn,LohithBlaze/scikit-learn,akionakamura/scikit-learn,victorbergelin/scikit-learn,lesteve/scikit-learn,meduz/scikit-learn,hdmetor/scikit-learn,fredhusser/scikit-learn,elkingtonmcb/scikit-learn,dingocuster/scikit-learn,ky822/scikit-learn,shenzebang/scikit-learn,abhishekgahlot/scikit-learn,ilo10/scikit-learn,jm-begon/scikit-learn,nelson-liu/scikit-learn,gclenaghan/scikit-learn,cwu2011/scikit-learn,jakirkham/scikit-learn,madjelan/scikit-learn,jkarnows/scikit-learn,yask123/scikit-learn,MartinDelzant/scikit-learn,Lawrence-Liu/scikit-learn,ephes/scikit-learn,abhishekkrthakur/scikit-learn,pkruskal/scikit-learn,krez13/scikit-learn,MechCoder/scikit-learn,fbagirov/scikit-learn,AIML/scikit-learn,MatthieuBizien/scikit-learn,shusenl/scikit-learn,billy-inn/scikit-learn,LiaoPan/scikit-learn,khkaminska/scikit-learn,fabianp/scikit-learn,HolgerPeters/scikit-learn,trungnt13/scikit-learn,arahuja/scikit-learn,yanlend/scikit-learn,kaichogami/scikit-learn,marcocaccin/scikit-learn,quheng/scikit-learn,pompiduskus/scikit-learn,Clyde-fare/scikit-learn,rahul-c1/scikit-learn,shikhardb/scikit-learn,mattilyra/scikit-learn,xwolf12/scikit-learn,Adai0808/scikit-learn,nikitasingh981/scikit-learn,thientu/scikit-learn,ClimbsRocks/scikit-learn,ephes/scikit-learn,massmutual/scikit-learn,olologin/scikit-learn,rvraghav93/scikit-learn,hsuantien/scikit-learn,shenzebang/scikit-learn,JPFrancoia/scikit-learn,russel1237/scikit-learn,jjx02230808/project0223,zuku1985/scikit-learn,lbishal/scikit-learn,stylianos-kampakis/scikit-learn,joshloyal/scikit-learn,zaxtax/scikit-learn,rvraghav93/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jzt5132/scikit-learn,rohanp/scikit-learn,mugizico/scikit-learn,davidgbe/scikit-learn,herilalaina/scikit-learn,NelisVerhoef/scikit-learn,yyjiang/scikit-learn,robin-lai/scikit-learn,theoryno3/scikit-learn,pypot/scikit-learn,yask123/scikit-learn,jseabold/scikit-learn,vermouthmjl/scikit-learn,jaidevd/scikit-learn,henrykironde/scikit-learn,ltiao/scikit-learn,Achuth17/scikit-learn,Srisai85/scikit-learn,luo66/scikit-learn,idlead/scikit-learn,Djabbz/scikit-learn,ivannz/scikit-learn,larsmans/scikit-learn,huzq/scikit-learn,0asa/scikit-learn,CforED/Machine-Learning,mojoboss/scikit-learn,treycausey/scikit-learn,pypot/scikit-learn,bhargav/scikit-learn,alexsavio/scikit-learn,shangwuhencc/scikit-learn,vermouthmjl/scikit-learn,jereze/scikit-learn,Aasmi/scikit-learn,xuewei4d/scikit-learn,hdmetor/scikit-learn,PatrickOReilly/scikit-learn,jblackburne/scikit-learn,altairpearl/scikit-learn,zorojean/scikit-learn,vivekmishra1991/scikit-learn,mikebenfield/scikit-learn,pompiduskus/scikit-learn,maheshakya/scikit-learn,Clyde-fare/scikit-learn,MechCoder/scikit-learn,waterponey/scikit-learn,sanketloke/scikit-learn,PatrickOReilly/scikit-learn,olologin/scikit-learn,MartinSavc/scikit-learn,sonnyhu/scikit-learn,IndraVikas/scikit-learn,cwu2011/scikit-learn,loli/sklearn-ensembletrees,RachitKansal/scikit-learn,Lawrence-Liu/scikit-learn,poryfly/scikit-learn,nesterione/scikit-learn,jorik041/scikit-learn,ankurankan/scikit-learn,pythonvietnam/scikit-learn,manashmndl/scikit-learn,nesterione/scikit-learn,ssaeger/scikit-learn,icdishb/scikit-learn,bthirion/scikit-learn,belltailjp/scikit-learn,bhargav/scikit-learn,hainm/scikit-learn,liberatorqjw/scikit-learn,jakobworldpeace/scikit-learn,pianomania/scikit-learn,djgagne/scikit-learn,Fireblend/scikit-learn,kylerbrown/scikit-learn,terkkila/scikit-learn,jakirkham/scikit-learn,waterponey/scikit-learn,aewhatley/scikit-learn,manhhomienbienthuy/scikit-learn,altairpearl/scikit-learn,manashmndl/scikit-learn,aetilley/scikit-learn,pkruskal/scikit-learn,manhhomienbienthuy/scikit-learn,liberatorqjw/scikit-learn,aminert/scikit-learn,Myasuka/scikit-learn,jaidevd/scikit-learn,betatim/scikit-learn,ChanderG/scikit-learn,rishikksh20/scikit-learn,gclenaghan/scikit-learn,henrykironde/scikit-learn,shahankhatch/scikit-learn,jzt5132/scikit-learn,tdhopper/scikit-learn,ahoyosid/scikit-learn,nelson-liu/scikit-learn,DSLituiev/scikit-learn,hlin117/scikit-learn,vermouthmjl/scikit-learn,AlexandreAbraham/scikit-learn,zihua/scikit-learn,f3r/scikit-learn,anirudhjayaraman/scikit-learn,xzh86/scikit-learn,dsullivan7/scikit-learn,chrisburr/scikit-learn,xavierwu/scikit-learn,mblondel/scikit-learn,jmetzen/scikit-learn,kagayakidan/scikit-learn,ngoix/OCRF,roxyboy/scikit-learn,henrykironde/scikit-learn,liangz0707/scikit-learn,Akshay0724/scikit-learn,mjgrav2001/scikit-learn,clemkoa/scikit-learn,CVML/scikit-learn,themrmax/scikit-learn,manhhomienbienthuy/scikit-learn,ankurankan/scikit-learn,mlyundin/scikit-learn,aabadie/scikit-learn,zorojean/scikit-learn,shahankhatch/scikit-learn,Jimmy-Morzaria/scikit-learn,vinayak-mehta/scikit-learn,thientu/scikit-learn,CVML/scikit-learn,andaag/scikit-learn,jblackburne/scikit-learn,Sentient07/scikit-learn,shangwuhencc/scikit-learn,h2educ/scikit-learn,moutai/scikit-learn,jm-begon/scikit-learn,ycaihua/scikit-learn,Fireblend/scikit-learn,BiaDarkia/scikit-learn,sarahgrogan/scikit-learn,jayflo/scikit-learn,TomDLT/scikit-learn,tmhm/scikit-learn,mhdella/scikit-learn,xzh86/scikit-learn,mikebenfield/scikit-learn,jjx02230808/project0223,ngoix/OCRF,ogrisel/scikit-learn,nrhine1/scikit-learn,jayflo/scikit-learn,OshynSong/scikit-learn,rahul-c1/scikit-learn,pythonvietnam/scikit-learn,adamgreenhall/scikit-learn,kjung/scikit-learn,gotomypc/scikit-learn,bikong2/scikit-learn,simon-pepin/scikit-learn,scikit-learn/scikit-learn,MartinSavc/scikit-learn,xyguo/scikit-learn,Srisai85/scikit-learn,huobaowangxi/scikit-learn,DSLituiev/scikit-learn,MartinSavc/scikit-learn,Garrett-R/scikit-learn,sanketloke/scikit-learn,ky822/scikit-learn,meduz/scikit-learn,trankmichael/scikit-learn,kashif/scikit-learn,wzbozon/scikit-learn,murali-munna/scikit-learn,hdmetor/scikit-learn,eg-zhang/scikit-learn,CforED/Machine-Learning,RayMick/scikit-learn,scikit-learn/scikit-learn,mhue/scikit-learn,xyguo/scikit-learn,huobaowangxi/scikit-learn,gotomypc/scikit-learn,bigdataelephants/scikit-learn,larsmans/scikit-learn,MatthieuBizien/scikit-learn,voxlol/scikit-learn,kashif/scikit-learn,LohithBlaze/scikit-learn,bhargav/scikit-learn,glouppe/scikit-learn,JPFrancoia/scikit-learn,shyamalschandra/scikit-learn,etkirsch/scikit-learn,alexeyum/scikit-learn,potash/scikit-learn,mxjl620/scikit-learn,joernhees/scikit-learn,ky822/scikit-learn,ahoyosid/scikit-learn,rajat1994/scikit-learn,untom/scikit-learn,vortex-ape/scikit-learn,macks22/scikit-learn,fabianp/scikit-learn,MartinSavc/scikit-learn,ZENGXH/scikit-learn,yanlend/scikit-learn,JPFrancoia/scikit-learn,thientu/scikit-learn,scikit-learn/scikit-learn,ningchi/scikit-learn,spallavolu/scikit-learn,nhejazi/scikit-learn,beepee14/scikit-learn,henridwyer/scikit-learn,huzq/scikit-learn,costypetrisor/scikit-learn,marcocaccin/scikit-learn,yyjiang/scikit-learn,xzh86/scikit-learn,yunfeilu/scikit-learn,voxlol/scikit-learn,ZENGXH/scikit-learn,jorge2703/scikit-learn,YinongLong/scikit-learn,aminert/scikit-learn,lin-credible/scikit-learn,murali-munna/scikit-learn,belltailjp/scikit-learn,IshankGulati/scikit-learn,nikitasingh981/scikit-learn,carrillo/scikit-learn,mlyundin/scikit-learn,RomainBrault/scikit-learn,rishikksh20/scikit-learn,ssaeger/scikit-learn,mehdidc/scikit-learn,DonBeo/scikit-learn,pnedunuri/scikit-learn,deepesch/scikit-learn,qifeigit/scikit-learn,glennq/scikit-learn,lin-credible/scikit-learn,fengzhyuan/scikit-learn,jpautom/scikit-learn,YinongLong/scikit-learn,abhishekgahlot/scikit-learn,liyu1990/sklearn,lin-credible/scikit-learn,hlin117/scikit-learn,Achuth17/scikit-learn,arjoly/scikit-learn,trungnt13/scikit-learn,nvoron23/scikit-learn,ssaeger/scikit-learn,0x0all/scikit-learn,glouppe/scikit-learn,nvoron23/scikit-learn,mfjb/scikit-learn,rexshihaoren/scikit-learn,loli/sklearn-ensembletrees,imaculate/scikit-learn,quheng/scikit-learn,shahankhatch/scikit-learn,lenovor/scikit-learn,btabibian/scikit-learn,carrillo/scikit-learn,arabenjamin/scikit-learn,shikhardb/scikit-learn,fengzhyuan/scikit-learn,Akshay0724/scikit-learn,marcocaccin/scikit-learn,hitszxp/scikit-learn,jakobworldpeace/scikit-learn,MechCoder/scikit-learn,aabadie/scikit-learn,alexeyum/scikit-learn,pianomania/scikit-learn,vivekmishra1991/scikit-learn,luo66/scikit-learn,hlin117/scikit-learn,Djabbz/scikit-learn,mehdidc/scikit-learn,sinhrks/scikit-learn,Achuth17/scikit-learn,hainm/scikit-learn,Aasmi/scikit-learn,hugobowne/scikit-learn,anurag313/scikit-learn,costypetrisor/scikit-learn,jakobworldpeace/scikit-learn,chrisburr/scikit-learn,stylianos-kampakis/scikit-learn,beepee14/scikit-learn,toastedcornflakes/scikit-learn,walterreade/scikit-learn,f3r/scikit-learn,fredhusser/scikit-learn,LohithBlaze/scikit-learn,vortex-ape/scikit-learn,tawsifkhan/scikit-learn,ChanderG/scikit-learn,poryfly/scikit-learn,bthirion/scikit-learn,krez13/scikit-learn,jorik041/scikit-learn,hainm/scikit-learn
|
Add example plot script for RANSAC
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.utils import ransac
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.LinearRegression()
inlier_mask = ransac(X, y, model_robust, min_n_samples=2, residual_threshold=2)
outlier_mask = ~inlier_mask
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
<commit_before><commit_msg>Add example plot script for RANSAC<commit_after>
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.utils import ransac
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.LinearRegression()
inlier_mask = ransac(X, y, model_robust, min_n_samples=2, residual_threshold=2)
outlier_mask = ~inlier_mask
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
Add example plot script for RANSAC"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.utils import ransac
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.LinearRegression()
inlier_mask = ransac(X, y, model_robust, min_n_samples=2, residual_threshold=2)
outlier_mask = ~inlier_mask
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
<commit_before><commit_msg>Add example plot script for RANSAC<commit_after>"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
from sklearn.utils import ransac
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.LinearRegression()
inlier_mask = ransac(X, y, model_robust, min_n_samples=2, residual_threshold=2)
outlier_mask = ~inlier_mask
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
|
a6407fe9a3b77372ed3e93b92f77bcea32e77393
|
kive/archive/migrations/0014_dataset_name_length.py
|
kive/archive/migrations/0014_dataset_name_length.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0013_methodoutput_are_checksums_ok'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(help_text='Name of this Dataset.', max_length=260),
preserve_default=True,
),
]
|
Add a migration for dataset name length.
|
Add a migration for dataset name length.
|
Python
|
bsd-3-clause
|
cfe-lab/Kive,cfe-lab/Kive,cfe-lab/Kive,cfe-lab/Kive,cfe-lab/Kive
|
Add a migration for dataset name length.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0013_methodoutput_are_checksums_ok'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(help_text='Name of this Dataset.', max_length=260),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration for dataset name length.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0013_methodoutput_are_checksums_ok'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(help_text='Name of this Dataset.', max_length=260),
preserve_default=True,
),
]
|
Add a migration for dataset name length.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0013_methodoutput_are_checksums_ok'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(help_text='Name of this Dataset.', max_length=260),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration for dataset name length.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('archive', '0013_methodoutput_are_checksums_ok'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='name',
field=models.CharField(help_text='Name of this Dataset.', max_length=260),
preserve_default=True,
),
]
|
|
db20a15c34fc6bb43ad8e0a4860dd252e49a8033
|
Regression/MultipleLinearRegression/regularMultipleRegression.py
|
Regression/MultipleLinearRegression/regularMultipleRegression.py
|
# -*- coding: utf-8 -*-
"""Multiple linear regression for machine learning.
A linear regression model that contains more than one predictor variable is
called a multiple linear regression model. It is basically the same as Simple
Linear regression, but with more predictor variables (features). The idea is
that linearly related predictor variables can approximate the labels with a
'best fitted' hyperplane or surface.
Example:
$ python regularMultipleRegression.py
Todo:
*
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import (LabelEncoder,
OneHotEncoder)
from sklearn.model_selection import train_test_split
# importing the dataset
dataset = pd.read_csv('50_Startups.csv')
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, 4].values
# encode State column
labelencoder_features = LabelEncoder()
features[:, 3] = labelencoder_features.fit_transform(features[:, 3])
onehotencoder = OneHotEncoder(categorical_features=[3])
features = onehotencoder.fit_transform(features).toarray()
# Avoiding the Dummy variable trap
features = features[:, 1:] # is done automatically, but just to show I know
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
# Fit the training set naively with the mutliple linear regression model
regressor = LinearRegression()
regressor.fit(feature_train, label_train)
# Predict the test set
label_pred = regressor.predict(feature_test)
# Building the optimal model using the Backward Elimenation method
# Due to statsmodels we need to add an intercept column
features = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1)
columnlist = list(range(features.shape[1])) # liste med num rader
significant = 0.05
while True:
features_opt = features[:, columnlist]
regressor_OLS = sm.OLS(endog=labels, exog=features_opt).fit()
pvalues = regressor_OLS.pvalues
if (np.max(pvalues) > significant):
i = int(np.where(pvalues == np.max(pvalues))[0])
columnlist.pop(i)
else:
break
regressor_OLS.summary()
|
Add Python file for Multiple linear regression
|
Add Python file for Multiple linear regression
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add Python file for Multiple linear regression
|
# -*- coding: utf-8 -*-
"""Multiple linear regression for machine learning.
A linear regression model that contains more than one predictor variable is
called a multiple linear regression model. It is basically the same as Simple
Linear regression, but with more predictor variables (features). The idea is
that linearly related predictor variables can approximate the labels with a
'best fitted' hyperplane or surface.
Example:
$ python regularMultipleRegression.py
Todo:
*
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import (LabelEncoder,
OneHotEncoder)
from sklearn.model_selection import train_test_split
# importing the dataset
dataset = pd.read_csv('50_Startups.csv')
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, 4].values
# encode State column
labelencoder_features = LabelEncoder()
features[:, 3] = labelencoder_features.fit_transform(features[:, 3])
onehotencoder = OneHotEncoder(categorical_features=[3])
features = onehotencoder.fit_transform(features).toarray()
# Avoiding the Dummy variable trap
features = features[:, 1:] # is done automatically, but just to show I know
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
# Fit the training set naively with the mutliple linear regression model
regressor = LinearRegression()
regressor.fit(feature_train, label_train)
# Predict the test set
label_pred = regressor.predict(feature_test)
# Building the optimal model using the Backward Elimenation method
# Due to statsmodels we need to add an intercept column
features = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1)
columnlist = list(range(features.shape[1])) # liste med num rader
significant = 0.05
while True:
features_opt = features[:, columnlist]
regressor_OLS = sm.OLS(endog=labels, exog=features_opt).fit()
pvalues = regressor_OLS.pvalues
if (np.max(pvalues) > significant):
i = int(np.where(pvalues == np.max(pvalues))[0])
columnlist.pop(i)
else:
break
regressor_OLS.summary()
|
<commit_before><commit_msg>Add Python file for Multiple linear regression<commit_after>
|
# -*- coding: utf-8 -*-
"""Multiple linear regression for machine learning.
A linear regression model that contains more than one predictor variable is
called a multiple linear regression model. It is basically the same as Simple
Linear regression, but with more predictor variables (features). The idea is
that linearly related predictor variables can approximate the labels with a
'best fitted' hyperplane or surface.
Example:
$ python regularMultipleRegression.py
Todo:
*
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import (LabelEncoder,
OneHotEncoder)
from sklearn.model_selection import train_test_split
# importing the dataset
dataset = pd.read_csv('50_Startups.csv')
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, 4].values
# encode State column
labelencoder_features = LabelEncoder()
features[:, 3] = labelencoder_features.fit_transform(features[:, 3])
onehotencoder = OneHotEncoder(categorical_features=[3])
features = onehotencoder.fit_transform(features).toarray()
# Avoiding the Dummy variable trap
features = features[:, 1:] # is done automatically, but just to show I know
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
# Fit the training set naively with the mutliple linear regression model
regressor = LinearRegression()
regressor.fit(feature_train, label_train)
# Predict the test set
label_pred = regressor.predict(feature_test)
# Building the optimal model using the Backward Elimenation method
# Due to statsmodels we need to add an intercept column
features = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1)
columnlist = list(range(features.shape[1])) # liste med num rader
significant = 0.05
while True:
features_opt = features[:, columnlist]
regressor_OLS = sm.OLS(endog=labels, exog=features_opt).fit()
pvalues = regressor_OLS.pvalues
if (np.max(pvalues) > significant):
i = int(np.where(pvalues == np.max(pvalues))[0])
columnlist.pop(i)
else:
break
regressor_OLS.summary()
|
Add Python file for Multiple linear regression# -*- coding: utf-8 -*-
"""Multiple linear regression for machine learning.
A linear regression model that contains more than one predictor variable is
called a multiple linear regression model. It is basically the same as Simple
Linear regression, but with more predictor variables (features). The idea is
that linearly related predictor variables can approximate the labels with a
'best fitted' hyperplane or surface.
Example:
$ python regularMultipleRegression.py
Todo:
*
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import (LabelEncoder,
OneHotEncoder)
from sklearn.model_selection import train_test_split
# importing the dataset
dataset = pd.read_csv('50_Startups.csv')
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, 4].values
# encode State column
labelencoder_features = LabelEncoder()
features[:, 3] = labelencoder_features.fit_transform(features[:, 3])
onehotencoder = OneHotEncoder(categorical_features=[3])
features = onehotencoder.fit_transform(features).toarray()
# Avoiding the Dummy variable trap
features = features[:, 1:] # is done automatically, but just to show I know
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
# Fit the training set naively with the mutliple linear regression model
regressor = LinearRegression()
regressor.fit(feature_train, label_train)
# Predict the test set
label_pred = regressor.predict(feature_test)
# Building the optimal model using the Backward Elimenation method
# Due to statsmodels we need to add an intercept column
features = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1)
columnlist = list(range(features.shape[1])) # liste med num rader
significant = 0.05
while True:
features_opt = features[:, columnlist]
regressor_OLS = sm.OLS(endog=labels, exog=features_opt).fit()
pvalues = regressor_OLS.pvalues
if (np.max(pvalues) > significant):
i = int(np.where(pvalues == np.max(pvalues))[0])
columnlist.pop(i)
else:
break
regressor_OLS.summary()
|
<commit_before><commit_msg>Add Python file for Multiple linear regression<commit_after># -*- coding: utf-8 -*-
"""Multiple linear regression for machine learning.
A linear regression model that contains more than one predictor variable is
called a multiple linear regression model. It is basically the same as Simple
Linear regression, but with more predictor variables (features). The idea is
that linearly related predictor variables can approximate the labels with a
'best fitted' hyperplane or surface.
Example:
$ python regularMultipleRegression.py
Todo:
*
"""
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import (LabelEncoder,
OneHotEncoder)
from sklearn.model_selection import train_test_split
# importing the dataset
dataset = pd.read_csv('50_Startups.csv')
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, 4].values
# encode State column
labelencoder_features = LabelEncoder()
features[:, 3] = labelencoder_features.fit_transform(features[:, 3])
onehotencoder = OneHotEncoder(categorical_features=[3])
features = onehotencoder.fit_transform(features).toarray()
# Avoiding the Dummy variable trap
features = features[:, 1:] # is done automatically, but just to show I know
# Splitting the Dataset into a Training set and a Test set
feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)
# Fit the training set naively with the mutliple linear regression model
regressor = LinearRegression()
regressor.fit(feature_train, label_train)
# Predict the test set
label_pred = regressor.predict(feature_test)
# Building the optimal model using the Backward Elimenation method
# Due to statsmodels we need to add an intercept column
features = np.append(arr=np.ones((50, 1)).astype(int), values=features, axis=1)
columnlist = list(range(features.shape[1])) # liste med num rader
significant = 0.05
while True:
features_opt = features[:, columnlist]
regressor_OLS = sm.OLS(endog=labels, exog=features_opt).fit()
pvalues = regressor_OLS.pvalues
if (np.max(pvalues) > significant):
i = int(np.where(pvalues == np.max(pvalues))[0])
columnlist.pop(i)
else:
break
regressor_OLS.summary()
|
|
5fee82b0ef269993ebc4147bfd825718a460616c
|
neural_style/convert_model_cpu.py
|
neural_style/convert_model_cpu.py
|
from transformer_net import TransformerNet
import argparse
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-model", type=str, required=True)
parser.add_argument("--cpu-model", type=str, required=True)
args = parser.parse_args()
tr = torch.load(args.gpu_model)
tr.eval()
tr.cpu()
torch.save(tr, args.cpu_model)
if __name__ == "__main__":
main()
|
Convert gpu model to cpu model
|
Convert gpu model to cpu model
|
Python
|
mit
|
onai/fast-neural-style
|
Convert gpu model to cpu model
|
from transformer_net import TransformerNet
import argparse
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-model", type=str, required=True)
parser.add_argument("--cpu-model", type=str, required=True)
args = parser.parse_args()
tr = torch.load(args.gpu_model)
tr.eval()
tr.cpu()
torch.save(tr, args.cpu_model)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Convert gpu model to cpu model<commit_after>
|
from transformer_net import TransformerNet
import argparse
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-model", type=str, required=True)
parser.add_argument("--cpu-model", type=str, required=True)
args = parser.parse_args()
tr = torch.load(args.gpu_model)
tr.eval()
tr.cpu()
torch.save(tr, args.cpu_model)
if __name__ == "__main__":
main()
|
Convert gpu model to cpu modelfrom transformer_net import TransformerNet
import argparse
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-model", type=str, required=True)
parser.add_argument("--cpu-model", type=str, required=True)
args = parser.parse_args()
tr = torch.load(args.gpu_model)
tr.eval()
tr.cpu()
torch.save(tr, args.cpu_model)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Convert gpu model to cpu model<commit_after>from transformer_net import TransformerNet
import argparse
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu-model", type=str, required=True)
parser.add_argument("--cpu-model", type=str, required=True)
args = parser.parse_args()
tr = torch.load(args.gpu_model)
tr.eval()
tr.cpu()
torch.save(tr, args.cpu_model)
if __name__ == "__main__":
main()
|
|
a7c952cce7c006913727adf1be29e6d94e3b9f6a
|
orges/test/unit/test_pluggable.py
|
orges/test/unit/test_pluggable.py
|
from mock import Mock
from orges.invoker.pluggable import PluggableInvoker
from orges.args import ArgsCreator
import orges.param as param
@param.int("a", interval=(0, 1))
def f(a):
return a
def test_before_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.before_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.before_invoke.called
def test_on_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.on_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_invoke.called
def test_on_result_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_result = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_result(0, fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_result.called
def test_on_error_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_error = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_error(fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_error.called
if __name__ == '__main__':
import nose
nose.runmodule()
|
Add unit tests for invocation plugin hooks
|
Add unit tests for invocation plugin hooks
|
Python
|
bsd-3-clause
|
cigroup-ol/metaopt,cigroup-ol/metaopt,cigroup-ol/metaopt
|
Add unit tests for invocation plugin hooks
|
from mock import Mock
from orges.invoker.pluggable import PluggableInvoker
from orges.args import ArgsCreator
import orges.param as param
@param.int("a", interval=(0, 1))
def f(a):
return a
def test_before_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.before_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.before_invoke.called
def test_on_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.on_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_invoke.called
def test_on_result_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_result = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_result(0, fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_result.called
def test_on_error_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_error = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_error(fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_error.called
if __name__ == '__main__':
import nose
nose.runmodule()
|
<commit_before><commit_msg>Add unit tests for invocation plugin hooks<commit_after>
|
from mock import Mock
from orges.invoker.pluggable import PluggableInvoker
from orges.args import ArgsCreator
import orges.param as param
@param.int("a", interval=(0, 1))
def f(a):
return a
def test_before_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.before_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.before_invoke.called
def test_on_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.on_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_invoke.called
def test_on_result_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_result = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_result(0, fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_result.called
def test_on_error_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_error = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_error(fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_error.called
if __name__ == '__main__':
import nose
nose.runmodule()
|
Add unit tests for invocation plugin hooksfrom mock import Mock
from orges.invoker.pluggable import PluggableInvoker
from orges.args import ArgsCreator
import orges.param as param
@param.int("a", interval=(0, 1))
def f(a):
return a
def test_before_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.before_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.before_invoke.called
def test_on_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.on_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_invoke.called
def test_on_result_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_result = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_result(0, fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_result.called
def test_on_error_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_error = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_error(fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_error.called
if __name__ == '__main__':
import nose
nose.runmodule()
|
<commit_before><commit_msg>Add unit tests for invocation plugin hooks<commit_after>from mock import Mock
from orges.invoker.pluggable import PluggableInvoker
from orges.args import ArgsCreator
import orges.param as param
@param.int("a", interval=(0, 1))
def f(a):
return a
def test_before_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.before_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.before_invoke.called
def test_on_invoke_calls_plugins():
mock_plugin = Mock()
mock_plugin.on_invoke = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_invoke.called
def test_on_result_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_result = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_result(0, fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_result.called
def test_on_error_calls_plugins():
stub_caller = Mock()
mock_plugin = Mock()
mock_plugin.on_error = Mock(spec=[])
plugins = [mock_plugin]
stub_invoker = Mock()
invoker = PluggableInvoker(None, stub_invoker, plugins=plugins)
invoker.caller = stub_caller
def stub_invoke(f, fargs, **kwargs):
invoker.on_error(fargs, **kwargs)
stub_invoker.invoke = Mock(spec=[])
stub_invoker.invoke.side_effect = stub_invoke
args = ArgsCreator(f.param_spec).args()
invoker.invoke(f, args)
assert mock_plugin.on_error.called
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
45ef1d56e77b1b5414c7c5d596441295c8cef497
|
scripts/generate_csv_files.py
|
scripts/generate_csv_files.py
|
# -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv("data/all.anonymes.csv", dtype=object, encoding='utf-8')
df['DECL_AVANT_MONTANT'] = df.DECL_AVANT_MONTANT.astype('float32')
# by LABO
labos = df.groupby(['LABO', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
labos.columns = labos.columns.droplevel(0)
labos.to_csv('public/labos.csv', encoding='utf-8')
# by ORIGIN
origins = df.groupby(['ORIGIN', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
origins.columns = origins.columns.droplevel(0)
origins.to_csv('public/origins.csv', encoding='utf-8')
# by DECL_AVANT_NATURE
natures = df.groupby(['DECL_AVANT_NATURE', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
natures.columns = natures.columns.droplevel(0)
natures.to_csv('public/natures.csv', encoding='utf-8')
|
Add script to generate csv for dataviz
|
Add script to generate csv for dataviz
|
Python
|
agpl-3.0
|
regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data
|
Add script to generate csv for dataviz
|
# -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv("data/all.anonymes.csv", dtype=object, encoding='utf-8')
df['DECL_AVANT_MONTANT'] = df.DECL_AVANT_MONTANT.astype('float32')
# by LABO
labos = df.groupby(['LABO', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
labos.columns = labos.columns.droplevel(0)
labos.to_csv('public/labos.csv', encoding='utf-8')
# by ORIGIN
origins = df.groupby(['ORIGIN', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
origins.columns = origins.columns.droplevel(0)
origins.to_csv('public/origins.csv', encoding='utf-8')
# by DECL_AVANT_NATURE
natures = df.groupby(['DECL_AVANT_NATURE', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
natures.columns = natures.columns.droplevel(0)
natures.to_csv('public/natures.csv', encoding='utf-8')
|
<commit_before><commit_msg>Add script to generate csv for dataviz<commit_after>
|
# -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv("data/all.anonymes.csv", dtype=object, encoding='utf-8')
df['DECL_AVANT_MONTANT'] = df.DECL_AVANT_MONTANT.astype('float32')
# by LABO
labos = df.groupby(['LABO', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
labos.columns = labos.columns.droplevel(0)
labos.to_csv('public/labos.csv', encoding='utf-8')
# by ORIGIN
origins = df.groupby(['ORIGIN', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
origins.columns = origins.columns.droplevel(0)
origins.to_csv('public/origins.csv', encoding='utf-8')
# by DECL_AVANT_NATURE
natures = df.groupby(['DECL_AVANT_NATURE', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
natures.columns = natures.columns.droplevel(0)
natures.to_csv('public/natures.csv', encoding='utf-8')
|
Add script to generate csv for dataviz# -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv("data/all.anonymes.csv", dtype=object, encoding='utf-8')
df['DECL_AVANT_MONTANT'] = df.DECL_AVANT_MONTANT.astype('float32')
# by LABO
labos = df.groupby(['LABO', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
labos.columns = labos.columns.droplevel(0)
labos.to_csv('public/labos.csv', encoding='utf-8')
# by ORIGIN
origins = df.groupby(['ORIGIN', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
origins.columns = origins.columns.droplevel(0)
origins.to_csv('public/origins.csv', encoding='utf-8')
# by DECL_AVANT_NATURE
natures = df.groupby(['DECL_AVANT_NATURE', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
natures.columns = natures.columns.droplevel(0)
natures.to_csv('public/natures.csv', encoding='utf-8')
|
<commit_before><commit_msg>Add script to generate csv for dataviz<commit_after># -*- coding: utf-8 -*-
import pandas as pd
df = pd.read_csv("data/all.anonymes.csv", dtype=object, encoding='utf-8')
df['DECL_AVANT_MONTANT'] = df.DECL_AVANT_MONTANT.astype('float32')
# by LABO
labos = df.groupby(['LABO', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
labos.columns = labos.columns.droplevel(0)
labos.to_csv('public/labos.csv', encoding='utf-8')
# by ORIGIN
origins = df.groupby(['ORIGIN', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
origins.columns = origins.columns.droplevel(0)
origins.to_csv('public/origins.csv', encoding='utf-8')
# by DECL_AVANT_NATURE
natures = df.groupby(['DECL_AVANT_NATURE', 'BENEF_PS_DEPARTEMENT', 'DECL_TYPE']).agg({'DECL_AVANT_MONTANT': {'DECL_AVANT_SOMME': 'sum', 'DECL_AVANT_NOMBRE': 'count'}})
natures.columns = natures.columns.droplevel(0)
natures.to_csv('public/natures.csv', encoding='utf-8')
|
|
dedfec08bbec4c97ff8e7e1242ac8d406cc73b0b
|
init.py
|
init.py
|
url = "https://pub.orcid.org/0000-0002-2907-3313"
import requests
import json
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
print json.dumps(resp.json(),
sort_keys=True,
indent=4, separators=(',', ': '))
|
Load JSON of Melodee ORCID
|
Load JSON of Melodee ORCID
|
Python
|
mit
|
njall/Orctrix,njall/Orctrix,njall/Orctrix
|
Load JSON of Melodee ORCID
|
url = "https://pub.orcid.org/0000-0002-2907-3313"
import requests
import json
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
print json.dumps(resp.json(),
sort_keys=True,
indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Load JSON of Melodee ORCID<commit_after>
|
url = "https://pub.orcid.org/0000-0002-2907-3313"
import requests
import json
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
print json.dumps(resp.json(),
sort_keys=True,
indent=4, separators=(',', ': '))
|
Load JSON of Melodee ORCIDurl = "https://pub.orcid.org/0000-0002-2907-3313"
import requests
import json
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
print json.dumps(resp.json(),
sort_keys=True,
indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Load JSON of Melodee ORCID<commit_after>url = "https://pub.orcid.org/0000-0002-2907-3313"
import requests
import json
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
print json.dumps(resp.json(),
sort_keys=True,
indent=4, separators=(',', ': '))
|
|
1b8bf0b171532d366f0823ea9afb9ac500262488
|
python/array/RemoveElement.py
|
python/array/RemoveElement.py
|
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
result = len(A)
start = 0
for i in xrange(len(A)):
A[start] = A[i]
if A[i] != elem:
start += 1
else:
result -= 1
return result
|
Remove Element -- keep streak!
|
Remove Element -- keep streak!
|
Python
|
mit
|
sureleo/leetcode,sureleo/leetcode,lsingal/leetcode,sureleo/leetcode,lsingal/leetcode,lsingal/leetcode
|
Remove Element -- keep streak!
|
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
result = len(A)
start = 0
for i in xrange(len(A)):
A[start] = A[i]
if A[i] != elem:
start += 1
else:
result -= 1
return result
|
<commit_before><commit_msg>Remove Element -- keep streak!<commit_after>
|
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
result = len(A)
start = 0
for i in xrange(len(A)):
A[start] = A[i]
if A[i] != elem:
start += 1
else:
result -= 1
return result
|
Remove Element -- keep streak!class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
result = len(A)
start = 0
for i in xrange(len(A)):
A[start] = A[i]
if A[i] != elem:
start += 1
else:
result -= 1
return result
|
<commit_before><commit_msg>Remove Element -- keep streak!<commit_after>class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
result = len(A)
start = 0
for i in xrange(len(A)):
A[start] = A[i]
if A[i] != elem:
start += 1
else:
result -= 1
return result
|
|
0f5c38b1c7bcd8a26c415c8d0c93edc132263087
|
CodeFights/firstDigit.py
|
CodeFights/firstDigit.py
|
#!/usr/local/bin/python
# Code Fights First Digit Problem
import re
def firstDigit(inputString):
match = re.search(r'\d', inputString)
return match.group(0)
def main():
tests = [
["var_1__Int", "1"],
["q2q-q", "2"],
["0ss", "0"]
]
for t in tests:
res = firstDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: firstDigit({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: firstDigit({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights first digit problem
|
Solve Code Fights first digit problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights first digit problem
|
#!/usr/local/bin/python
# Code Fights First Digit Problem
import re
def firstDigit(inputString):
match = re.search(r'\d', inputString)
return match.group(0)
def main():
tests = [
["var_1__Int", "1"],
["q2q-q", "2"],
["0ss", "0"]
]
for t in tests:
res = firstDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: firstDigit({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: firstDigit({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights first digit problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights First Digit Problem
import re
def firstDigit(inputString):
match = re.search(r'\d', inputString)
return match.group(0)
def main():
tests = [
["var_1__Int", "1"],
["q2q-q", "2"],
["0ss", "0"]
]
for t in tests:
res = firstDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: firstDigit({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: firstDigit({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights first digit problem#!/usr/local/bin/python
# Code Fights First Digit Problem
import re
def firstDigit(inputString):
match = re.search(r'\d', inputString)
return match.group(0)
def main():
tests = [
["var_1__Int", "1"],
["q2q-q", "2"],
["0ss", "0"]
]
for t in tests:
res = firstDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: firstDigit({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: firstDigit({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights first digit problem<commit_after>#!/usr/local/bin/python
# Code Fights First Digit Problem
import re
def firstDigit(inputString):
match = re.search(r'\d', inputString)
return match.group(0)
def main():
tests = [
["var_1__Int", "1"],
["q2q-q", "2"],
["0ss", "0"]
]
for t in tests:
res = firstDigit(t[0])
ans = t[1]
if ans == res:
print("PASSED: firstDigit({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: firstDigit({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
dcd80232189743962d12a3df15bbb2708b1966b8
|
tests/test_decider.py
|
tests/test_decider.py
|
from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
|
Add some tests for the decider.
|
Add some tests for the decider.
|
Python
|
mit
|
xethorn/garcon,theorchard/garcon,pkuong/garcon,rantonmattei/garcon,someboredkiddo/garcon
|
Add some tests for the decider.
|
from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
|
<commit_before><commit_msg>Add some tests for the decider.<commit_after>
|
from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
|
Add some tests for the decider.from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
|
<commit_before><commit_msg>Add some tests for the decider.<commit_after>from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
|
|
1ec0c1d949e9379fc2a01bf480a782bb4d75afb9
|
test.py
|
test.py
|
#!/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the 'send_morse' module.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(('*'*80 + '\n%s\n' + '*'*80) % msg)
print("\n"
"CLI program to continually send a morse string.\n\n"
"Usage: %s [-h] [-s c,w] <string>\n\n"
"where -h means print this help and stop\n"
" -s c,w means set char and word speeds\n\n"
"and <string> is the morse string to repeatedly send\n"
"The morse sound is created in a separate thread." % prog_name)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while True:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
print('')
StopThread = True
if thread:
thread.join()
break
print('*', end='')
sys.stdout.flush()
|
Test program for thread debugging
|
Test program for thread debugging
|
Python
|
mit
|
rzzzwilson/morse_trainer,rzzzwilson/morse_trainer
|
Test program for thread debugging
|
#!/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the 'send_morse' module.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(('*'*80 + '\n%s\n' + '*'*80) % msg)
print("\n"
"CLI program to continually send a morse string.\n\n"
"Usage: %s [-h] [-s c,w] <string>\n\n"
"where -h means print this help and stop\n"
" -s c,w means set char and word speeds\n\n"
"and <string> is the morse string to repeatedly send\n"
"The morse sound is created in a separate thread." % prog_name)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while True:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
print('')
StopThread = True
if thread:
thread.join()
break
print('*', end='')
sys.stdout.flush()
|
<commit_before><commit_msg>Test program for thread debugging<commit_after>
|
#!/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the 'send_morse' module.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(('*'*80 + '\n%s\n' + '*'*80) % msg)
print("\n"
"CLI program to continually send a morse string.\n\n"
"Usage: %s [-h] [-s c,w] <string>\n\n"
"where -h means print this help and stop\n"
" -s c,w means set char and word speeds\n\n"
"and <string> is the morse string to repeatedly send\n"
"The morse sound is created in a separate thread." % prog_name)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while True:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
print('')
StopThread = True
if thread:
thread.join()
break
print('*', end='')
sys.stdout.flush()
|
Test program for thread debugging#!/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the 'send_morse' module.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(('*'*80 + '\n%s\n' + '*'*80) % msg)
print("\n"
"CLI program to continually send a morse string.\n\n"
"Usage: %s [-h] [-s c,w] <string>\n\n"
"where -h means print this help and stop\n"
" -s c,w means set char and word speeds\n\n"
"and <string> is the morse string to repeatedly send\n"
"The morse sound is created in a separate thread." % prog_name)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while True:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
print('')
StopThread = True
if thread:
thread.join()
break
print('*', end='')
sys.stdout.flush()
|
<commit_before><commit_msg>Test program for thread debugging<commit_after>#!/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the 'send_morse' module.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(('*'*80 + '\n%s\n' + '*'*80) % msg)
print("\n"
"CLI program to continually send a morse string.\n\n"
"Usage: %s [-h] [-s c,w] <string>\n\n"
"where -h means print this help and stop\n"
" -s c,w means set char and word speeds\n\n"
"and <string> is the morse string to repeatedly send\n"
"The morse sound is created in a separate thread." % prog_name)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while True:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
print('')
StopThread = True
if thread:
thread.join()
break
print('*', end='')
sys.stdout.flush()
|
|
fa87821a4a4b282e5cc0d9311a0c4dfb5fbc37db
|
comics/aggregator/utils.py
|
comics/aggregator/utils.py
|
from comics.comics import get_comic_module
SCHEDULE_DAYS = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(',')]
|
Add helper for getting schedule
|
Add helper for getting schedule
|
Python
|
agpl-3.0
|
jodal/comics,klette/comics,klette/comics,jodal/comics,klette/comics,datagutten/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics
|
Add helper for getting schedule
|
from comics.comics import get_comic_module
SCHEDULE_DAYS = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(',')]
|
<commit_before><commit_msg>Add helper for getting schedule<commit_after>
|
from comics.comics import get_comic_module
SCHEDULE_DAYS = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(',')]
|
Add helper for getting schedulefrom comics.comics import get_comic_module
SCHEDULE_DAYS = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(',')]
|
<commit_before><commit_msg>Add helper for getting schedule<commit_after>from comics.comics import get_comic_module
SCHEDULE_DAYS = ['Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa']
def get_comic_schedule(comic):
module = get_comic_module(comic.slug)
schedule = module.Crawler(comic).schedule
if not schedule:
return []
return [SCHEDULE_DAYS.index(day) for day in schedule.split(',')]
|
|
6db2c7298a4111ce540743099cabed7aed4439c8
|
dbaas/workflow/steps/mysql/region_migration/remove_nfs_snapshot.py
|
dbaas/workflow/steps/mysql/region_migration/remove_nfs_snapshot.py
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RemoveNfsSnapshot(BaseStep):
def __unicode__(self):
return "Removing nfs snapshot..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
instance = workflow_dict['source_instances'][0]
NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=instance.hostname,
snapshopt=workflow_dict['snapshopt_id'])
del workflow_dict['snapshopt_id']
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Add step to remove nfs snapshot
|
Add step to remove nfs snapshot
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Add step to remove nfs snapshot
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RemoveNfsSnapshot(BaseStep):
def __unicode__(self):
return "Removing nfs snapshot..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
instance = workflow_dict['source_instances'][0]
NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=instance.hostname,
snapshopt=workflow_dict['snapshopt_id'])
del workflow_dict['snapshopt_id']
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Add step to remove nfs snapshot<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RemoveNfsSnapshot(BaseStep):
def __unicode__(self):
return "Removing nfs snapshot..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
instance = workflow_dict['source_instances'][0]
NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=instance.hostname,
snapshopt=workflow_dict['snapshopt_id'])
del workflow_dict['snapshopt_id']
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Add step to remove nfs snapshot# -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RemoveNfsSnapshot(BaseStep):
def __unicode__(self):
return "Removing nfs snapshot..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
instance = workflow_dict['source_instances'][0]
NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=instance.hostname,
snapshopt=workflow_dict['snapshopt_id'])
del workflow_dict['snapshopt_id']
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Add step to remove nfs snapshot<commit_after># -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RemoveNfsSnapshot(BaseStep):
def __unicode__(self):
return "Removing nfs snapshot..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
instance = workflow_dict['source_instances'][0]
NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=instance.hostname,
snapshopt=workflow_dict['snapshopt_id'])
del workflow_dict['snapshopt_id']
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
|
27439c063c51015c1405c3c855ce96157892da72
|
opps/channel/search_indexes.py
|
opps/channel/search_indexes.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from .models import Channel
class ChannelIndex(SearchIndex):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
def get_updated_field(self):
return 'date_available'
def index_queryset(self):
return Channel.objects.filter(
date_available__lte=datetime.datetime.now(),
published=True)
site.register(Channel, ChannelIndex)
|
Create channel search index, indexed all channel name
|
Create channel search index, indexed all channel name
|
Python
|
mit
|
williamroot/opps,YACOWS/opps,opps/opps,jeanmask/opps,jeanmask/opps,YACOWS/opps,opps/opps,williamroot/opps,williamroot/opps,YACOWS/opps,opps/opps,opps/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,YACOWS/opps
|
Create channel search index, indexed all channel name
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from .models import Channel
class ChannelIndex(SearchIndex):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
def get_updated_field(self):
return 'date_available'
def index_queryset(self):
return Channel.objects.filter(
date_available__lte=datetime.datetime.now(),
published=True)
site.register(Channel, ChannelIndex)
|
<commit_before><commit_msg>Create channel search index, indexed all channel name<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from .models import Channel
class ChannelIndex(SearchIndex):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
def get_updated_field(self):
return 'date_available'
def index_queryset(self):
return Channel.objects.filter(
date_available__lte=datetime.datetime.now(),
published=True)
site.register(Channel, ChannelIndex)
|
Create channel search index, indexed all channel name#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from .models import Channel
class ChannelIndex(SearchIndex):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
def get_updated_field(self):
return 'date_available'
def index_queryset(self):
return Channel.objects.filter(
date_available__lte=datetime.datetime.now(),
published=True)
site.register(Channel, ChannelIndex)
|
<commit_before><commit_msg>Create channel search index, indexed all channel name<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from haystack.indexes import SearchIndex, CharField, DateTimeField
from haystack import site
from .models import Channel
class ChannelIndex(SearchIndex):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
def get_updated_field(self):
return 'date_available'
def index_queryset(self):
return Channel.objects.filter(
date_available__lte=datetime.datetime.now(),
published=True)
site.register(Channel, ChannelIndex)
|
|
f4bb9685d4ba6661cdda34635c772fb39dc3e246
|
wsgi.py
|
wsgi.py
|
""" WSGI Entry Point
"""
from portal.app import create_app
from werkzeug.contrib.fixers import ProxyFix
app = create_app()
if app.config.get('PREFERRED_URL_SCHEME', '').lower() == 'https':
app.wsgi_app = ProxyFix(app.wsgi_app)
|
Add proxy-fixing middleware if serving over HTTPS proxy
|
Add proxy-fixing middleware if serving over HTTPS proxy
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Add proxy-fixing middleware if serving over HTTPS proxy
|
""" WSGI Entry Point
"""
from portal.app import create_app
from werkzeug.contrib.fixers import ProxyFix
app = create_app()
if app.config.get('PREFERRED_URL_SCHEME', '').lower() == 'https':
app.wsgi_app = ProxyFix(app.wsgi_app)
|
<commit_before><commit_msg>Add proxy-fixing middleware if serving over HTTPS proxy<commit_after>
|
""" WSGI Entry Point
"""
from portal.app import create_app
from werkzeug.contrib.fixers import ProxyFix
app = create_app()
if app.config.get('PREFERRED_URL_SCHEME', '').lower() == 'https':
app.wsgi_app = ProxyFix(app.wsgi_app)
|
Add proxy-fixing middleware if serving over HTTPS proxy""" WSGI Entry Point
"""
from portal.app import create_app
from werkzeug.contrib.fixers import ProxyFix
app = create_app()
if app.config.get('PREFERRED_URL_SCHEME', '').lower() == 'https':
app.wsgi_app = ProxyFix(app.wsgi_app)
|
<commit_before><commit_msg>Add proxy-fixing middleware if serving over HTTPS proxy<commit_after>""" WSGI Entry Point
"""
from portal.app import create_app
from werkzeug.contrib.fixers import ProxyFix
app = create_app()
if app.config.get('PREFERRED_URL_SCHEME', '').lower() == 'https':
app.wsgi_app = ProxyFix(app.wsgi_app)
|
|
6c53c55204955cf0682c655321219b4e67481030
|
lab/10/template_10_c.py
|
lab/10/template_10_c.py
|
# Template Binary Search Tree
class Node:
'''
Class untuk node dari Binary Search Tree
Terdiri dari value dari node
dan reference ke left child dan right child
'''
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Benny-ry Search Tree
# *binary
class BST:
'''
Class untuk Binary Search Tree
Terdiri dari kumpulan-kumpulan node
yang sudah tersusun dalam bentuk tree
'''
def __init__(self):
self.root = None
'''
Method untuk set root dari tree
'''
def set_root(self, value):
self.root = Node(value)
'''
Method insert
Digunakan untuk memasukkan nilai ke dalam tree
Jika tree masih kosong, nilai yang dimasukkan menjadi root
Jika tree sudah terisi, nilai yang dimasukkan akan dicek,
kalau lebih kecil daripada elemen yang akan dibandingkan akan dimasukkan
sebagai left child, sebaliknya akan ke right child
-------------------------------------------------------------------------
@param value
'''
def insert(self, value):
if(self.root is None):
self.set_root(value)
else:
self.insert_node(self.root, value)
def insert_node(self, current_node, value):
if(value < current_node.value):
if(current_node.left):
self.insert_node(current_node.left, value)
else:
current_node.left = Node(value)
elif(value > current_node.value):
if(current_node.right):
self.insert_node(current_node.right, value)
else:
current_node.right = Node(value)
'''
Method find
Digunakan untuk mencari sebuah nilai di dalam tree
--------------------------------------------------
@param value
@return boolean
'''
def find(self, value):
return self.find_node(self.root, value)
def find_node(self, current_node, value):
# TODO implementasikan code ini menggunakan recursion
# current_node adalah node yang sedang ditunjuk
# value adalah nilai yang dicari
# method ini mengembalikan True jika value terdapat di dalam BST,
# atau False jika value tidak terdapat di dalam BST
pass
def main():
# Inisiasi binary search tree
bst = BST()
# Memasukkan elemen ke dalam tree
angka = [50,40,60,30,45,57,75,21,44,47,65,90]
for i in angka:
bst.insert(i)
while(True):
# Meminta input dari user, angka yang yang akan dicek apakah ada dalam tree
x = int(input('Masukkan sebuah integer yang ingin dicari di dalam tree: '))
if(bst.find(x)):
print("{} terdapat dalam tree".format(x))
else:
print("{} tidak ada dalam tree".format(x))
main()
|
Add lab 10 template for class C
|
Add lab 10 template for class C
|
Python
|
mit
|
giovanism/TarungLab,laymonage/TarungLab
|
Add lab 10 template for class C
|
# Template Binary Search Tree
class Node:
'''
Class untuk node dari Binary Search Tree
Terdiri dari value dari node
dan reference ke left child dan right child
'''
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Benny-ry Search Tree
# *binary
class BST:
'''
Class untuk Binary Search Tree
Terdiri dari kumpulan-kumpulan node
yang sudah tersusun dalam bentuk tree
'''
def __init__(self):
self.root = None
'''
Method untuk set root dari tree
'''
def set_root(self, value):
self.root = Node(value)
'''
Method insert
Digunakan untuk memasukkan nilai ke dalam tree
Jika tree masih kosong, nilai yang dimasukkan menjadi root
Jika tree sudah terisi, nilai yang dimasukkan akan dicek,
kalau lebih kecil daripada elemen yang akan dibandingkan akan dimasukkan
sebagai left child, sebaliknya akan ke right child
-------------------------------------------------------------------------
@param value
'''
def insert(self, value):
if(self.root is None):
self.set_root(value)
else:
self.insert_node(self.root, value)
def insert_node(self, current_node, value):
if(value < current_node.value):
if(current_node.left):
self.insert_node(current_node.left, value)
else:
current_node.left = Node(value)
elif(value > current_node.value):
if(current_node.right):
self.insert_node(current_node.right, value)
else:
current_node.right = Node(value)
'''
Method find
Digunakan untuk mencari sebuah nilai di dalam tree
--------------------------------------------------
@param value
@return boolean
'''
def find(self, value):
return self.find_node(self.root, value)
def find_node(self, current_node, value):
# TODO implementasikan code ini menggunakan recursion
# current_node adalah node yang sedang ditunjuk
# value adalah nilai yang dicari
# method ini mengembalikan True jika value terdapat di dalam BST,
# atau False jika value tidak terdapat di dalam BST
pass
def main():
# Inisiasi binary search tree
bst = BST()
# Memasukkan elemen ke dalam tree
angka = [50,40,60,30,45,57,75,21,44,47,65,90]
for i in angka:
bst.insert(i)
while(True):
# Meminta input dari user, angka yang yang akan dicek apakah ada dalam tree
x = int(input('Masukkan sebuah integer yang ingin dicari di dalam tree: '))
if(bst.find(x)):
print("{} terdapat dalam tree".format(x))
else:
print("{} tidak ada dalam tree".format(x))
main()
|
<commit_before><commit_msg>Add lab 10 template for class C<commit_after>
|
# Template Binary Search Tree
class Node:
'''
Class untuk node dari Binary Search Tree
Terdiri dari value dari node
dan reference ke left child dan right child
'''
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Benny-ry Search Tree
# *binary
class BST:
'''
Class untuk Binary Search Tree
Terdiri dari kumpulan-kumpulan node
yang sudah tersusun dalam bentuk tree
'''
def __init__(self):
self.root = None
'''
Method untuk set root dari tree
'''
def set_root(self, value):
self.root = Node(value)
'''
Method insert
Digunakan untuk memasukkan nilai ke dalam tree
Jika tree masih kosong, nilai yang dimasukkan menjadi root
Jika tree sudah terisi, nilai yang dimasukkan akan dicek,
kalau lebih kecil daripada elemen yang akan dibandingkan akan dimasukkan
sebagai left child, sebaliknya akan ke right child
-------------------------------------------------------------------------
@param value
'''
def insert(self, value):
if(self.root is None):
self.set_root(value)
else:
self.insert_node(self.root, value)
def insert_node(self, current_node, value):
if(value < current_node.value):
if(current_node.left):
self.insert_node(current_node.left, value)
else:
current_node.left = Node(value)
elif(value > current_node.value):
if(current_node.right):
self.insert_node(current_node.right, value)
else:
current_node.right = Node(value)
'''
Method find
Digunakan untuk mencari sebuah nilai di dalam tree
--------------------------------------------------
@param value
@return boolean
'''
def find(self, value):
return self.find_node(self.root, value)
def find_node(self, current_node, value):
# TODO implementasikan code ini menggunakan recursion
# current_node adalah node yang sedang ditunjuk
# value adalah nilai yang dicari
# method ini mengembalikan True jika value terdapat di dalam BST,
# atau False jika value tidak terdapat di dalam BST
pass
def main():
# Inisiasi binary search tree
bst = BST()
# Memasukkan elemen ke dalam tree
angka = [50,40,60,30,45,57,75,21,44,47,65,90]
for i in angka:
bst.insert(i)
while(True):
# Meminta input dari user, angka yang yang akan dicek apakah ada dalam tree
x = int(input('Masukkan sebuah integer yang ingin dicari di dalam tree: '))
if(bst.find(x)):
print("{} terdapat dalam tree".format(x))
else:
print("{} tidak ada dalam tree".format(x))
main()
|
Add lab 10 template for class C# Template Binary Search Tree
class Node:
'''
Class untuk node dari Binary Search Tree
Terdiri dari value dari node
dan reference ke left child dan right child
'''
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Benny-ry Search Tree
# *binary
class BST:
'''
Class untuk Binary Search Tree
Terdiri dari kumpulan-kumpulan node
yang sudah tersusun dalam bentuk tree
'''
def __init__(self):
self.root = None
'''
Method untuk set root dari tree
'''
def set_root(self, value):
self.root = Node(value)
'''
Method insert
Digunakan untuk memasukkan nilai ke dalam tree
Jika tree masih kosong, nilai yang dimasukkan menjadi root
Jika tree sudah terisi, nilai yang dimasukkan akan dicek,
kalau lebih kecil daripada elemen yang akan dibandingkan akan dimasukkan
sebagai left child, sebaliknya akan ke right child
-------------------------------------------------------------------------
@param value
'''
def insert(self, value):
if(self.root is None):
self.set_root(value)
else:
self.insert_node(self.root, value)
def insert_node(self, current_node, value):
if(value < current_node.value):
if(current_node.left):
self.insert_node(current_node.left, value)
else:
current_node.left = Node(value)
elif(value > current_node.value):
if(current_node.right):
self.insert_node(current_node.right, value)
else:
current_node.right = Node(value)
'''
Method find
Digunakan untuk mencari sebuah nilai di dalam tree
--------------------------------------------------
@param value
@return boolean
'''
def find(self, value):
return self.find_node(self.root, value)
def find_node(self, current_node, value):
# TODO implementasikan code ini menggunakan recursion
# current_node adalah node yang sedang ditunjuk
# value adalah nilai yang dicari
# method ini mengembalikan True jika value terdapat di dalam BST,
# atau False jika value tidak terdapat di dalam BST
pass
def main():
# Inisiasi binary search tree
bst = BST()
# Memasukkan elemen ke dalam tree
angka = [50,40,60,30,45,57,75,21,44,47,65,90]
for i in angka:
bst.insert(i)
while(True):
# Meminta input dari user, angka yang yang akan dicek apakah ada dalam tree
x = int(input('Masukkan sebuah integer yang ingin dicari di dalam tree: '))
if(bst.find(x)):
print("{} terdapat dalam tree".format(x))
else:
print("{} tidak ada dalam tree".format(x))
main()
|
<commit_before><commit_msg>Add lab 10 template for class C<commit_after># Template Binary Search Tree
class Node:
'''
Class untuk node dari Binary Search Tree
Terdiri dari value dari node
dan reference ke left child dan right child
'''
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Benny-ry Search Tree
# *binary
class BST:
'''
Class untuk Binary Search Tree
Terdiri dari kumpulan-kumpulan node
yang sudah tersusun dalam bentuk tree
'''
def __init__(self):
self.root = None
'''
Method untuk set root dari tree
'''
def set_root(self, value):
self.root = Node(value)
'''
Method insert
Digunakan untuk memasukkan nilai ke dalam tree
Jika tree masih kosong, nilai yang dimasukkan menjadi root
Jika tree sudah terisi, nilai yang dimasukkan akan dicek,
kalau lebih kecil daripada elemen yang akan dibandingkan akan dimasukkan
sebagai left child, sebaliknya akan ke right child
-------------------------------------------------------------------------
@param value
'''
def insert(self, value):
if(self.root is None):
self.set_root(value)
else:
self.insert_node(self.root, value)
def insert_node(self, current_node, value):
if(value < current_node.value):
if(current_node.left):
self.insert_node(current_node.left, value)
else:
current_node.left = Node(value)
elif(value > current_node.value):
if(current_node.right):
self.insert_node(current_node.right, value)
else:
current_node.right = Node(value)
'''
Method find
Digunakan untuk mencari sebuah nilai di dalam tree
--------------------------------------------------
@param value
@return boolean
'''
def find(self, value):
return self.find_node(self.root, value)
def find_node(self, current_node, value):
# TODO implementasikan code ini menggunakan recursion
# current_node adalah node yang sedang ditunjuk
# value adalah nilai yang dicari
# method ini mengembalikan True jika value terdapat di dalam BST,
# atau False jika value tidak terdapat di dalam BST
pass
def main():
# Inisiasi binary search tree
bst = BST()
# Memasukkan elemen ke dalam tree
angka = [50,40,60,30,45,57,75,21,44,47,65,90]
for i in angka:
bst.insert(i)
while(True):
# Meminta input dari user, angka yang yang akan dicek apakah ada dalam tree
x = int(input('Masukkan sebuah integer yang ingin dicari di dalam tree: '))
if(bst.find(x)):
print("{} terdapat dalam tree".format(x))
else:
print("{} tidak ada dalam tree".format(x))
main()
|
|
127338c9584b6ac0e74ef0009d2769dd43d080f9
|
particle-beam/particle-beam.py
|
particle-beam/particle-beam.py
|
#!/usr/bin/env python3
import random
import const
from particle import Particle, propagate
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
beam = Beam(
profile=Profile(
centre=0,
diameter=50,
shape=const.UNIFORM,
),
energy=Energy(
mean=25,
width=5,
shape=const.GAUSSIAN
),
divergence=30
)
detector = Detector(
voxels=5,
size=100
)
for _ in range(N):
particle = Particle(beam)
while particle.alive:
particle = propagate(particle, detector)
if detector.voxel(particle.position) is None:
particle.alive = False
detector.output()
|
Add the 'shell' for the MC simulation.
|
Add the 'shell' for the MC simulation.
|
Python
|
mpl-2.0
|
DanielBrookRoberge/MonteCarloExamples
|
Add the 'shell' for the MC simulation.
|
#!/usr/bin/env python3
import random
import const
from particle import Particle, propagate
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
beam = Beam(
profile=Profile(
centre=0,
diameter=50,
shape=const.UNIFORM,
),
energy=Energy(
mean=25,
width=5,
shape=const.GAUSSIAN
),
divergence=30
)
detector = Detector(
voxels=5,
size=100
)
for _ in range(N):
particle = Particle(beam)
while particle.alive:
particle = propagate(particle, detector)
if detector.voxel(particle.position) is None:
particle.alive = False
detector.output()
|
<commit_before><commit_msg>Add the 'shell' for the MC simulation.<commit_after>
|
#!/usr/bin/env python3
import random
import const
from particle import Particle, propagate
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
beam = Beam(
profile=Profile(
centre=0,
diameter=50,
shape=const.UNIFORM,
),
energy=Energy(
mean=25,
width=5,
shape=const.GAUSSIAN
),
divergence=30
)
detector = Detector(
voxels=5,
size=100
)
for _ in range(N):
particle = Particle(beam)
while particle.alive:
particle = propagate(particle, detector)
if detector.voxel(particle.position) is None:
particle.alive = False
detector.output()
|
Add the 'shell' for the MC simulation.#!/usr/bin/env python3
import random
import const
from particle import Particle, propagate
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
beam = Beam(
profile=Profile(
centre=0,
diameter=50,
shape=const.UNIFORM,
),
energy=Energy(
mean=25,
width=5,
shape=const.GAUSSIAN
),
divergence=30
)
detector = Detector(
voxels=5,
size=100
)
for _ in range(N):
particle = Particle(beam)
while particle.alive:
particle = propagate(particle, detector)
if detector.voxel(particle.position) is None:
particle.alive = False
detector.output()
|
<commit_before><commit_msg>Add the 'shell' for the MC simulation.<commit_after>#!/usr/bin/env python3
import random
import const
from particle import Particle, propagate
from detector import Detector
from initial import Beam, Profile, Energy
random.seed(91400)
N = 1000
beam = Beam(
profile=Profile(
centre=0,
diameter=50,
shape=const.UNIFORM,
),
energy=Energy(
mean=25,
width=5,
shape=const.GAUSSIAN
),
divergence=30
)
detector = Detector(
voxels=5,
size=100
)
for _ in range(N):
particle = Particle(beam)
while particle.alive:
particle = propagate(particle, detector)
if detector.voxel(particle.position) is None:
particle.alive = False
detector.output()
|
|
8fb60650f8ff1da16d537402e7227f78667b434e
|
tests/test_schema_loader.py
|
tests/test_schema_loader.py
|
import contextlib
import json
import os
import tempfile
import unittest
from faker_schema.schema_loader import load_json_from_file, load_json_from_string
class TestFakerSchema(unittest.TestCase):
def test_load_json_from_string(self):
schema_json_string = '{"Full Name": "name", "Address": "address", "Email": "email"}'
schema = load_json_from_string(schema_json_string)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_string_incorrect_json(self):
schema_json_string = '{"Full Name": "name", }'
with self.assertRaises(ValueError):
load_json_from_string(schema_json_string)
@contextlib.contextmanager
def _write_to_temp_file(self, data, write_to_json=False):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
if write_to_json:
json.dump(data, temp_file)
else:
temp_file.write(data)
try:
yield temp_file.name
finally:
os.remove(temp_file.name)
def test_load_json_from_file(self):
schema = {'Full Name': 'name', 'Address': 'address', 'Email': 'email'}
with self._write_to_temp_file(schema, write_to_json=True) as temp_file:
schema = load_json_from_file(temp_file)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_file_incorrect_json(self):
schema = '{"Full Name": ["name", "place", ]}'
with self._write_to_temp_file(schema) as temp_file:
with self.assertRaises(ValueError):
load_json_from_file(temp_file)
|
Add unit tests for schema loader module
|
Add unit tests for schema loader module
|
Python
|
mit
|
ueg1990/faker-schema
|
Add unit tests for schema loader module
|
import contextlib
import json
import os
import tempfile
import unittest
from faker_schema.schema_loader import load_json_from_file, load_json_from_string
class TestFakerSchema(unittest.TestCase):
def test_load_json_from_string(self):
schema_json_string = '{"Full Name": "name", "Address": "address", "Email": "email"}'
schema = load_json_from_string(schema_json_string)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_string_incorrect_json(self):
schema_json_string = '{"Full Name": "name", }'
with self.assertRaises(ValueError):
load_json_from_string(schema_json_string)
@contextlib.contextmanager
def _write_to_temp_file(self, data, write_to_json=False):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
if write_to_json:
json.dump(data, temp_file)
else:
temp_file.write(data)
try:
yield temp_file.name
finally:
os.remove(temp_file.name)
def test_load_json_from_file(self):
schema = {'Full Name': 'name', 'Address': 'address', 'Email': 'email'}
with self._write_to_temp_file(schema, write_to_json=True) as temp_file:
schema = load_json_from_file(temp_file)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_file_incorrect_json(self):
schema = '{"Full Name": ["name", "place", ]}'
with self._write_to_temp_file(schema) as temp_file:
with self.assertRaises(ValueError):
load_json_from_file(temp_file)
|
<commit_before><commit_msg>Add unit tests for schema loader module<commit_after>
|
import contextlib
import json
import os
import tempfile
import unittest
from faker_schema.schema_loader import load_json_from_file, load_json_from_string
class TestFakerSchema(unittest.TestCase):
def test_load_json_from_string(self):
schema_json_string = '{"Full Name": "name", "Address": "address", "Email": "email"}'
schema = load_json_from_string(schema_json_string)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_string_incorrect_json(self):
schema_json_string = '{"Full Name": "name", }'
with self.assertRaises(ValueError):
load_json_from_string(schema_json_string)
@contextlib.contextmanager
def _write_to_temp_file(self, data, write_to_json=False):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
if write_to_json:
json.dump(data, temp_file)
else:
temp_file.write(data)
try:
yield temp_file.name
finally:
os.remove(temp_file.name)
def test_load_json_from_file(self):
schema = {'Full Name': 'name', 'Address': 'address', 'Email': 'email'}
with self._write_to_temp_file(schema, write_to_json=True) as temp_file:
schema = load_json_from_file(temp_file)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_file_incorrect_json(self):
schema = '{"Full Name": ["name", "place", ]}'
with self._write_to_temp_file(schema) as temp_file:
with self.assertRaises(ValueError):
load_json_from_file(temp_file)
|
Add unit tests for schema loader moduleimport contextlib
import json
import os
import tempfile
import unittest
from faker_schema.schema_loader import load_json_from_file, load_json_from_string
class TestFakerSchema(unittest.TestCase):
def test_load_json_from_string(self):
schema_json_string = '{"Full Name": "name", "Address": "address", "Email": "email"}'
schema = load_json_from_string(schema_json_string)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_string_incorrect_json(self):
schema_json_string = '{"Full Name": "name", }'
with self.assertRaises(ValueError):
load_json_from_string(schema_json_string)
@contextlib.contextmanager
def _write_to_temp_file(self, data, write_to_json=False):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
if write_to_json:
json.dump(data, temp_file)
else:
temp_file.write(data)
try:
yield temp_file.name
finally:
os.remove(temp_file.name)
def test_load_json_from_file(self):
schema = {'Full Name': 'name', 'Address': 'address', 'Email': 'email'}
with self._write_to_temp_file(schema, write_to_json=True) as temp_file:
schema = load_json_from_file(temp_file)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_file_incorrect_json(self):
schema = '{"Full Name": ["name", "place", ]}'
with self._write_to_temp_file(schema) as temp_file:
with self.assertRaises(ValueError):
load_json_from_file(temp_file)
|
<commit_before><commit_msg>Add unit tests for schema loader module<commit_after>import contextlib
import json
import os
import tempfile
import unittest
from faker_schema.schema_loader import load_json_from_file, load_json_from_string
class TestFakerSchema(unittest.TestCase):
def test_load_json_from_string(self):
schema_json_string = '{"Full Name": "name", "Address": "address", "Email": "email"}'
schema = load_json_from_string(schema_json_string)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_string_incorrect_json(self):
schema_json_string = '{"Full Name": "name", }'
with self.assertRaises(ValueError):
load_json_from_string(schema_json_string)
@contextlib.contextmanager
def _write_to_temp_file(self, data, write_to_json=False):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
if write_to_json:
json.dump(data, temp_file)
else:
temp_file.write(data)
try:
yield temp_file.name
finally:
os.remove(temp_file.name)
def test_load_json_from_file(self):
schema = {'Full Name': 'name', 'Address': 'address', 'Email': 'email'}
with self._write_to_temp_file(schema, write_to_json=True) as temp_file:
schema = load_json_from_file(temp_file)
self.assertEqual(schema, {'Full Name': 'name', 'Address': 'address', 'Email': 'email'})
def test_load_json_from_file_incorrect_json(self):
schema = '{"Full Name": ["name", "place", ]}'
with self._write_to_temp_file(schema) as temp_file:
with self.assertRaises(ValueError):
load_json_from_file(temp_file)
|
|
b38edc2a192151324855d50e9e172f0fd96b9064
|
mda/finance.py
|
mda/finance.py
|
from __future__ import division
import numpy as np
import pandas as pd
import urllib2
# __author__ = 'mattmcd'
class LseReader:
def __init__(self):
dataLoc = '/home/mattmcd/Work/Data/'
ftseFile = dataLoc + 'FTSE100.csv'
self.ftse100 = pd.read_csv( ftseFile )
self.prefixURL = 'https://www.google.com/finance/getprices?'
def read_history(self, ticker, interval=300, period=10):
"""Read intraday history for selected ticker on LSE"""
txt = urllib2.urlopen(self.prefixURL +
'q={Ticker}&x=LON&i={Interval}&p={Period}d&f=d,o,h,l,c,v'.format(
Ticker=ticker, Interval=interval, Period=period)).read()
return txt
|
Read prices from Google Finance
|
Read prices from Google Finance
|
Python
|
apache-2.0
|
mattmcd/PyAnalysis
|
Read prices from Google Finance
|
from __future__ import division
import numpy as np
import pandas as pd
import urllib2
# __author__ = 'mattmcd'
class LseReader:
def __init__(self):
dataLoc = '/home/mattmcd/Work/Data/'
ftseFile = dataLoc + 'FTSE100.csv'
self.ftse100 = pd.read_csv( ftseFile )
self.prefixURL = 'https://www.google.com/finance/getprices?'
def read_history(self, ticker, interval=300, period=10):
"""Read intraday history for selected ticker on LSE"""
txt = urllib2.urlopen(self.prefixURL +
'q={Ticker}&x=LON&i={Interval}&p={Period}d&f=d,o,h,l,c,v'.format(
Ticker=ticker, Interval=interval, Period=period)).read()
return txt
|
<commit_before><commit_msg>Read prices from Google Finance<commit_after>
|
from __future__ import division
import numpy as np
import pandas as pd
import urllib2
# __author__ = 'mattmcd'
class LseReader:
def __init__(self):
dataLoc = '/home/mattmcd/Work/Data/'
ftseFile = dataLoc + 'FTSE100.csv'
self.ftse100 = pd.read_csv( ftseFile )
self.prefixURL = 'https://www.google.com/finance/getprices?'
def read_history(self, ticker, interval=300, period=10):
"""Read intraday history for selected ticker on LSE"""
txt = urllib2.urlopen(self.prefixURL +
'q={Ticker}&x=LON&i={Interval}&p={Period}d&f=d,o,h,l,c,v'.format(
Ticker=ticker, Interval=interval, Period=period)).read()
return txt
|
Read prices from Google Financefrom __future__ import division
import numpy as np
import pandas as pd
import urllib2
# __author__ = 'mattmcd'
class LseReader:
def __init__(self):
dataLoc = '/home/mattmcd/Work/Data/'
ftseFile = dataLoc + 'FTSE100.csv'
self.ftse100 = pd.read_csv( ftseFile )
self.prefixURL = 'https://www.google.com/finance/getprices?'
def read_history(self, ticker, interval=300, period=10):
"""Read intraday history for selected ticker on LSE"""
txt = urllib2.urlopen(self.prefixURL +
'q={Ticker}&x=LON&i={Interval}&p={Period}d&f=d,o,h,l,c,v'.format(
Ticker=ticker, Interval=interval, Period=period)).read()
return txt
|
<commit_before><commit_msg>Read prices from Google Finance<commit_after>from __future__ import division
import numpy as np
import pandas as pd
import urllib2
# __author__ = 'mattmcd'
class LseReader:
def __init__(self):
dataLoc = '/home/mattmcd/Work/Data/'
ftseFile = dataLoc + 'FTSE100.csv'
self.ftse100 = pd.read_csv( ftseFile )
self.prefixURL = 'https://www.google.com/finance/getprices?'
def read_history(self, ticker, interval=300, period=10):
"""Read intraday history for selected ticker on LSE"""
txt = urllib2.urlopen(self.prefixURL +
'q={Ticker}&x=LON&i={Interval}&p={Period}d&f=d,o,h,l,c,v'.format(
Ticker=ticker, Interval=interval, Period=period)).read()
return txt
|
|
0dfe3084cf7d4832d14c027e646bfd74cc096177
|
mongonaut/views.py
|
mongonaut/views.py
|
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
import importlib
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
queryset = []
def get_queryset(self):
queryset = super(DocumentListView, self).get_queryset(**kwargs):
app_label = self.kwargs.get('app_label')
document_name = self.kwargs.get('document_name')
# TODO Allow this to be assigned via url variable
models_name = self.kwargs.get('models_name', 'models.py')
# import the models file
model_name = "{0}.{1}".format(document_name, models_name)
models = importlib.import_module(model_name)
# now get the document
document = getattr(models, document_name)
return document.objects.all()
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
Work on the document list view
|
Work on the document list view
|
Python
|
mit
|
jazzband/django-mongonaut,pydanny/django-mongonaut,jazzband/django-mongonaut,lchsk/django-mongonaut,jazzband/django-mongonaut,lchsk/django-mongonaut,pydanny/django-mongonaut,lchsk/django-mongonaut,pydanny/django-mongonaut
|
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"Work on the document list view
|
import importlib
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
queryset = []
def get_queryset(self):
queryset = super(DocumentListView, self).get_queryset(**kwargs):
app_label = self.kwargs.get('app_label')
document_name = self.kwargs.get('document_name')
# TODO Allow this to be assigned via url variable
models_name = self.kwargs.get('models_name', 'models.py')
# import the models file
model_name = "{0}.{1}".format(document_name, models_name)
models = importlib.import_module(model_name)
# now get the document
document = getattr(models, document_name)
return document.objects.all()
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
<commit_before>from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"<commit_msg>Work on the document list view<commit_after>
|
import importlib
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
queryset = []
def get_queryset(self):
queryset = super(DocumentListView, self).get_queryset(**kwargs):
app_label = self.kwargs.get('app_label')
document_name = self.kwargs.get('document_name')
# TODO Allow this to be assigned via url variable
models_name = self.kwargs.get('models_name', 'models.py')
# import the models file
model_name = "{0}.{1}".format(document_name, models_name)
models = importlib.import_module(model_name)
# now get the document
document = getattr(models, document_name)
return document.objects.all()
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"Work on the document list viewimport importlib
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
queryset = []
def get_queryset(self):
queryset = super(DocumentListView, self).get_queryset(**kwargs):
app_label = self.kwargs.get('app_label')
document_name = self.kwargs.get('document_name')
# TODO Allow this to be assigned via url variable
models_name = self.kwargs.get('models_name', 'models.py')
# import the models file
model_name = "{0}.{1}".format(document_name, models_name)
models = importlib.import_module(model_name)
# now get the document
document = getattr(models, document_name)
return document.objects.all()
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
<commit_before>from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"<commit_msg>Work on the document list view<commit_after>import importlib
from django.views.generic import DetailView
from django.views.generic import ListView
from mongonaut.sites import NautSite
class IndexView(ListView):
queryset = NautSite._registry.iteritems()
template_name = "mongonaut/index.html"
class AppListView(ListView):
""" :args: <app_label> """
template_name = "mongonaut/app_list.html"
class DocumentListView(ListView):
""" :args: <app_label> <document_name> """
template_name = "mongonaut/document_list.html"
queryset = []
def get_queryset(self):
queryset = super(DocumentListView, self).get_queryset(**kwargs):
app_label = self.kwargs.get('app_label')
document_name = self.kwargs.get('document_name')
# TODO Allow this to be assigned via url variable
models_name = self.kwargs.get('models_name', 'models.py')
# import the models file
model_name = "{0}.{1}".format(document_name, models_name)
models = importlib.import_module(model_name)
# now get the document
document = getattr(models, document_name)
return document.objects.all()
class DocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> """
template_name = "mongonaut/document_detail.html"
class EmbeddedDocumentDetailView(DetailView):
""" :args: <app_label> <document_name> <id> <???> """
template_name = "mongonaut/embedded_document_detail.html"
|
40f57a73adadf08e497464990a34860d03e04d39
|
mezzanine/core/urls.py
|
mezzanine/core/urls.py
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^static_proxy/$", "static_proxy", name="static_proxy"),
)
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
_proxy_url = getattr(settings, "STATIC_PROXY_URL", "static_proxy").strip("/")
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^%s/$" % _proxy_url, "static_proxy", name="static_proxy"),
)
|
Allow static proxy URL to be configured.
|
Allow static proxy URL to be configured.
|
Python
|
bsd-2-clause
|
scarcry/snm-mezzanine,vladir/mezzanine,fusionbox/mezzanine,jerivas/mezzanine,molokov/mezzanine,frankchin/mezzanine,adrian-the-git/mezzanine,fusionbox/mezzanine,mush42/mezzanine,damnfine/mezzanine,molokov/mezzanine,Kniyl/mezzanine,cccs-web/mezzanine,orlenko/sfpirg,sjdines/mezzanine,adrian-the-git/mezzanine,scarcry/snm-mezzanine,christianwgd/mezzanine,SoLoHiC/mezzanine,wyzex/mezzanine,spookylukey/mezzanine,ryneeverett/mezzanine,orlenko/plei,Cajoline/mezzanine,viaregio/mezzanine,eino-makitalo/mezzanine,eino-makitalo/mezzanine,tuxinhang1989/mezzanine,wbtuomela/mezzanine,douglaskastle/mezzanine,dustinrb/mezzanine,orlenko/sfpirg,jerivas/mezzanine,viaregio/mezzanine,sjuxax/mezzanine,dovydas/mezzanine,Skytorn86/mezzanine,ZeroXn/mezzanine,biomassives/mezzanine,scarcry/snm-mezzanine,ryneeverett/mezzanine,sjuxax/mezzanine,christianwgd/mezzanine,promil23/mezzanine,geodesign/mezzanine,Kniyl/mezzanine,stbarnabas/mezzanine,frankier/mezzanine,gbosh/mezzanine,dovydas/mezzanine,stephenmcd/mezzanine,Kniyl/mezzanine,joshcartme/mezzanine,nikolas/mezzanine,Cicero-Zhao/mezzanine,gradel/mezzanine,PegasusWang/mezzanine,eino-makitalo/mezzanine,readevalprint/mezzanine,cccs-web/mezzanine,PegasusWang/mezzanine,SoLoHiC/mezzanine,agepoly/mezzanine,stephenmcd/mezzanine,douglaskastle/mezzanine,geodesign/mezzanine,jjz/mezzanine,vladir/mezzanine,orlenko/sfpirg,vladir/mezzanine,dustinrb/mezzanine,orlenko/plei,saintbird/mezzanine,geodesign/mezzanine,gradel/mezzanine,emile2016/mezzanine,AlexHill/mezzanine,sjdines/mezzanine,webounty/mezzanine,industrydive/mezzanine,jerivas/mezzanine,readevalprint/mezzanine,damnfine/mezzanine,gbosh/mezzanine,molokov/mezzanine,wbtuomela/mezzanine,AlexHill/mezzanine,webounty/mezzanine,frankier/mezzanine,Skytorn86/mezzanine,nikolas/mezzanine,agepoly/mezzanine,mush42/mezzanine,ryneeverett/mezzanine,emile2016/mezzanine,ZeroXn/mezzanine,industrydive/mezzanine,mush42/mezzanine,spookylukey/mezzanine,douglaskastle/mezzanine,wrwrwr/mezzanine,saintbird/mezzanine,dsanders11/mezzanine,Cajoline/mezzanine,theclanks/mezzanine,dovydas/mezzanine,readevalprint/mezzanine,tuxinhang1989/mezzanine,damnfine/mezzanine,wyzex/mezzanine,spookylukey/mezzanine,joshcartme/mezzanine,SoLoHiC/mezzanine,sjdines/mezzanine,jjz/mezzanine,orlenko/plei,batpad/mezzanine,industrydive/mezzanine,jjz/mezzanine,gbosh/mezzanine,theclanks/mezzanine,Cicero-Zhao/mezzanine,frankier/mezzanine,batpad/mezzanine,dsanders11/mezzanine,frankchin/mezzanine,ZeroXn/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,dsanders11/mezzanine,webounty/mezzanine,viaregio/mezzanine,dustinrb/mezzanine,adrian-the-git/mezzanine,guibernardino/mezzanine,stbarnabas/mezzanine,promil23/mezzanine,emile2016/mezzanine,dekomote/mezzanine-modeltranslation-backport,gradel/mezzanine,wbtuomela/mezzanine,agepoly/mezzanine,frankchin/mezzanine,biomassives/mezzanine,guibernardino/mezzanine,PegasusWang/mezzanine,nikolas/mezzanine,biomassives/mezzanine,joshcartme/mezzanine,christianwgd/mezzanine,dekomote/mezzanine-modeltranslation-backport,Cajoline/mezzanine,tuxinhang1989/mezzanine,theclanks/mezzanine,wyzex/mezzanine,dekomote/mezzanine-modeltranslation-backport,Skytorn86/mezzanine,promil23/mezzanine,saintbird/mezzanine,wrwrwr/mezzanine
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^static_proxy/$", "static_proxy", name="static_proxy"),
)
Allow static proxy URL to be configured.
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
_proxy_url = getattr(settings, "STATIC_PROXY_URL", "static_proxy").strip("/")
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^%s/$" % _proxy_url, "static_proxy", name="static_proxy"),
)
|
<commit_before>
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^static_proxy/$", "static_proxy", name="static_proxy"),
)
<commit_msg>Allow static proxy URL to be configured.<commit_after>
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
_proxy_url = getattr(settings, "STATIC_PROXY_URL", "static_proxy").strip("/")
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^%s/$" % _proxy_url, "static_proxy", name="static_proxy"),
)
|
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^static_proxy/$", "static_proxy", name="static_proxy"),
)
Allow static proxy URL to be configured.
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
_proxy_url = getattr(settings, "STATIC_PROXY_URL", "static_proxy").strip("/")
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^%s/$" % _proxy_url, "static_proxy", name="static_proxy"),
)
|
<commit_before>
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^static_proxy/$", "static_proxy", name="static_proxy"),
)
<commit_msg>Allow static proxy URL to be configured.<commit_after>
from django.conf.urls.defaults import patterns, url
from mezzanine.conf import settings
urlpatterns = []
if "django.contrib.admin" in settings.INSTALLED_APPS:
urlpatterns += patterns("django.contrib.auth.views",
url("^password_reset/$", "password_reset", name="password_reset"),
("^password_reset/done/$", "password_reset_done"),
("^reset/(?P<uidb36>[-\w]+)/(?P<token>[-\w]+)/$",
"password_reset_confirm"),
("^reset/done/$", "password_reset_complete"),
)
_proxy_url = getattr(settings, "STATIC_PROXY_URL", "static_proxy").strip("/")
urlpatterns += patterns("mezzanine.core.views",
url("^edit/$", "edit", name="edit"),
url("^search/$", "search", name="search"),
url("^set_site/$", "set_site", name="set_site"),
url("^set_device/(?P<device>.*)/$", "set_device", name="set_device"),
url("^%s/$" % _proxy_url, "static_proxy", name="static_proxy"),
)
|
d3367d32dbc080d8e963542351c427fe2da48f18
|
talempd/amazing/MaxProfit.py
|
talempd/amazing/MaxProfit.py
|
def maxprofit(stockvals):
maxval, profit = 0, 0
for stockval in stockvals[::-1]:
maxval = max(maxval, stockval)
profit += maxval - stockval
return profit
stockvalues = [1, 3, 1, 2, 4]
print "Profit was: " + str(maxprofit(stockvalues))
|
Change name for maxprofit in amazing
|
Change name for maxprofit in amazing
|
Python
|
mit
|
cc13ny/Allin,Chasego/cod,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,Chasego/codi,cc13ny/algo,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/cod,Chasego/codirit,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/codi,Chasego/codi,Chasego/codirit,Chasego/codirit,Chasego/cod
|
Change name for maxprofit in amazing
|
def maxprofit(stockvals):
maxval, profit = 0, 0
for stockval in stockvals[::-1]:
maxval = max(maxval, stockval)
profit += maxval - stockval
return profit
stockvalues = [1, 3, 1, 2, 4]
print "Profit was: " + str(maxprofit(stockvalues))
|
<commit_before><commit_msg>Change name for maxprofit in amazing<commit_after>
|
def maxprofit(stockvals):
maxval, profit = 0, 0
for stockval in stockvals[::-1]:
maxval = max(maxval, stockval)
profit += maxval - stockval
return profit
stockvalues = [1, 3, 1, 2, 4]
print "Profit was: " + str(maxprofit(stockvalues))
|
Change name for maxprofit in amazingdef maxprofit(stockvals):
maxval, profit = 0, 0
for stockval in stockvals[::-1]:
maxval = max(maxval, stockval)
profit += maxval - stockval
return profit
stockvalues = [1, 3, 1, 2, 4]
print "Profit was: " + str(maxprofit(stockvalues))
|
<commit_before><commit_msg>Change name for maxprofit in amazing<commit_after>def maxprofit(stockvals):
maxval, profit = 0, 0
for stockval in stockvals[::-1]:
maxval = max(maxval, stockval)
profit += maxval - stockval
return profit
stockvalues = [1, 3, 1, 2, 4]
print "Profit was: " + str(maxprofit(stockvalues))
|
|
272f285f1c9caa19c7c05bf92bd362cda9f762d1
|
experimental/directshow.py
|
experimental/directshow.py
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.
|
Move win32 audio experiment to trunk.
|
Python
|
bsd-3-clause
|
mammadori/pyglet,oktayacikalin/pyglet,theblacklion/pyglet,theblacklion/pyglet,oktayacikalin/pyglet,oktayacikalin/pyglet,mammadori/pyglet,oktayacikalin/pyglet,oktayacikalin/pyglet,mammadori/pyglet,theblacklion/pyglet,theblacklion/pyglet,mammadori/pyglet,theblacklion/pyglet
|
Move win32 audio experiment to trunk.
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
|
4af2f405c1c09737bdb32842f8e87a730d9bedd3
|
photutils/psf/building/epsf.py
|
photutils/psf/building/epsf.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build an ePSF.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import SigmaClip
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter(object):
def __init__(self, psf, psf_fit_box=5, fitter=LevMarLSQFitter,
residuals=False, **kwargs):
self.psf = psf
self.psf_fit_box = psf_fit_box
self.fitter = fitter
self.residuals = residuals
self.fitter_kwargs = kwargs
def __call__(self, data, psf, star_table):
return self.fit_psf(data, psf, star_table)
def fit_psf(self, data, psf, star_table):
pass
class EPSFBuilder(object):
def __init__(self, peak_fit_box=5, peak_search_box='fitbox',
recenter_accuracy=1.0e-4, recenter_max_iters=1000,
ignore_badfit_stars=True, stat='median',
sigma_clip=SigmaClip(sigma=3., iters=10),
smoothing_kernel='quar', fitter=EPSFFitter(residuals=True),
max_iters=50, accuracy=1e-4, epsf=None):
self.peak_fit_box = peak_fit_box
self.peak_search_box = peak_search_box
self.recenter_accuracy = recenter_accuracy
self.recenter_max_iters = recenter_max_iters
self.ignore_badfit_stars = ignore_badfit_stars
self.stat = stat
self.sigma_clip = sigma_clip
self.smoothing_kernel = smoothing_kernel
self.fitter = fitter
self.max_iters = max_iters
self.accuracy = accuracy
self.epsf = epsf
def __call__(self, data, star_table):
return self.build_psf(data, star_table)
def build_psf(self, data, star_table):
pass
|
Add initial EPSFBuilder and EPSFFitter classes
|
Add initial EPSFBuilder and EPSFFitter classes
|
Python
|
bsd-3-clause
|
astropy/photutils,larrybradley/photutils
|
Add initial EPSFBuilder and EPSFFitter classes
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build an ePSF.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import SigmaClip
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter(object):
def __init__(self, psf, psf_fit_box=5, fitter=LevMarLSQFitter,
residuals=False, **kwargs):
self.psf = psf
self.psf_fit_box = psf_fit_box
self.fitter = fitter
self.residuals = residuals
self.fitter_kwargs = kwargs
def __call__(self, data, psf, star_table):
return self.fit_psf(data, psf, star_table)
def fit_psf(self, data, psf, star_table):
pass
class EPSFBuilder(object):
def __init__(self, peak_fit_box=5, peak_search_box='fitbox',
recenter_accuracy=1.0e-4, recenter_max_iters=1000,
ignore_badfit_stars=True, stat='median',
sigma_clip=SigmaClip(sigma=3., iters=10),
smoothing_kernel='quar', fitter=EPSFFitter(residuals=True),
max_iters=50, accuracy=1e-4, epsf=None):
self.peak_fit_box = peak_fit_box
self.peak_search_box = peak_search_box
self.recenter_accuracy = recenter_accuracy
self.recenter_max_iters = recenter_max_iters
self.ignore_badfit_stars = ignore_badfit_stars
self.stat = stat
self.sigma_clip = sigma_clip
self.smoothing_kernel = smoothing_kernel
self.fitter = fitter
self.max_iters = max_iters
self.accuracy = accuracy
self.epsf = epsf
def __call__(self, data, star_table):
return self.build_psf(data, star_table)
def build_psf(self, data, star_table):
pass
|
<commit_before><commit_msg>Add initial EPSFBuilder and EPSFFitter classes<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build an ePSF.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import SigmaClip
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter(object):
def __init__(self, psf, psf_fit_box=5, fitter=LevMarLSQFitter,
residuals=False, **kwargs):
self.psf = psf
self.psf_fit_box = psf_fit_box
self.fitter = fitter
self.residuals = residuals
self.fitter_kwargs = kwargs
def __call__(self, data, psf, star_table):
return self.fit_psf(data, psf, star_table)
def fit_psf(self, data, psf, star_table):
pass
class EPSFBuilder(object):
def __init__(self, peak_fit_box=5, peak_search_box='fitbox',
recenter_accuracy=1.0e-4, recenter_max_iters=1000,
ignore_badfit_stars=True, stat='median',
sigma_clip=SigmaClip(sigma=3., iters=10),
smoothing_kernel='quar', fitter=EPSFFitter(residuals=True),
max_iters=50, accuracy=1e-4, epsf=None):
self.peak_fit_box = peak_fit_box
self.peak_search_box = peak_search_box
self.recenter_accuracy = recenter_accuracy
self.recenter_max_iters = recenter_max_iters
self.ignore_badfit_stars = ignore_badfit_stars
self.stat = stat
self.sigma_clip = sigma_clip
self.smoothing_kernel = smoothing_kernel
self.fitter = fitter
self.max_iters = max_iters
self.accuracy = accuracy
self.epsf = epsf
def __call__(self, data, star_table):
return self.build_psf(data, star_table)
def build_psf(self, data, star_table):
pass
|
Add initial EPSFBuilder and EPSFFitter classes# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build an ePSF.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import SigmaClip
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter(object):
def __init__(self, psf, psf_fit_box=5, fitter=LevMarLSQFitter,
residuals=False, **kwargs):
self.psf = psf
self.psf_fit_box = psf_fit_box
self.fitter = fitter
self.residuals = residuals
self.fitter_kwargs = kwargs
def __call__(self, data, psf, star_table):
return self.fit_psf(data, psf, star_table)
def fit_psf(self, data, psf, star_table):
pass
class EPSFBuilder(object):
def __init__(self, peak_fit_box=5, peak_search_box='fitbox',
recenter_accuracy=1.0e-4, recenter_max_iters=1000,
ignore_badfit_stars=True, stat='median',
sigma_clip=SigmaClip(sigma=3., iters=10),
smoothing_kernel='quar', fitter=EPSFFitter(residuals=True),
max_iters=50, accuracy=1e-4, epsf=None):
self.peak_fit_box = peak_fit_box
self.peak_search_box = peak_search_box
self.recenter_accuracy = recenter_accuracy
self.recenter_max_iters = recenter_max_iters
self.ignore_badfit_stars = ignore_badfit_stars
self.stat = stat
self.sigma_clip = sigma_clip
self.smoothing_kernel = smoothing_kernel
self.fitter = fitter
self.max_iters = max_iters
self.accuracy = accuracy
self.epsf = epsf
def __call__(self, data, star_table):
return self.build_psf(data, star_table)
def build_psf(self, data, star_table):
pass
|
<commit_before><commit_msg>Add initial EPSFBuilder and EPSFFitter classes<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build an ePSF.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.stats import SigmaClip
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter(object):
def __init__(self, psf, psf_fit_box=5, fitter=LevMarLSQFitter,
residuals=False, **kwargs):
self.psf = psf
self.psf_fit_box = psf_fit_box
self.fitter = fitter
self.residuals = residuals
self.fitter_kwargs = kwargs
def __call__(self, data, psf, star_table):
return self.fit_psf(data, psf, star_table)
def fit_psf(self, data, psf, star_table):
pass
class EPSFBuilder(object):
def __init__(self, peak_fit_box=5, peak_search_box='fitbox',
recenter_accuracy=1.0e-4, recenter_max_iters=1000,
ignore_badfit_stars=True, stat='median',
sigma_clip=SigmaClip(sigma=3., iters=10),
smoothing_kernel='quar', fitter=EPSFFitter(residuals=True),
max_iters=50, accuracy=1e-4, epsf=None):
self.peak_fit_box = peak_fit_box
self.peak_search_box = peak_search_box
self.recenter_accuracy = recenter_accuracy
self.recenter_max_iters = recenter_max_iters
self.ignore_badfit_stars = ignore_badfit_stars
self.stat = stat
self.sigma_clip = sigma_clip
self.smoothing_kernel = smoothing_kernel
self.fitter = fitter
self.max_iters = max_iters
self.accuracy = accuracy
self.epsf = epsf
def __call__(self, data, star_table):
return self.build_psf(data, star_table)
def build_psf(self, data, star_table):
pass
|
|
96af482f65385ebe9a4da1606d12875c7eaf320f
|
pombola/south_africa/management/commands/south_africa_restart_constituency_contacts.py
|
pombola/south_africa/management/commands/south_africa_restart_constituency_contacts.py
|
from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
Add a script to restart constituency contact positions
|
Add a script to restart constituency contact positions
We need this because it will be some time before we have new
constituency contact information. It's reasonable to guess, however,
that people who were previously MPs or MPLs and constituency contacts
who have been re-elected will still be constituency contacts. This
script finds those people and restarts their constituency contact
positions.
This is part of the fix for #1466.
|
Python
|
agpl-3.0
|
hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola
|
Add a script to restart constituency contact positions
We need this because it will be some time before we have new
constituency contact information. It's reasonable to guess, however,
that people who were previously MPs or MPLs and constituency contacts
who have been re-elected will still be constituency contacts. This
script finds those people and restarts their constituency contact
positions.
This is part of the fix for #1466.
|
from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
<commit_before><commit_msg>Add a script to restart constituency contact positions
We need this because it will be some time before we have new
constituency contact information. It's reasonable to guess, however,
that people who were previously MPs or MPLs and constituency contacts
who have been re-elected will still be constituency contacts. This
script finds those people and restarts their constituency contact
positions.
This is part of the fix for #1466.<commit_after>
|
from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
Add a script to restart constituency contact positions
We need this because it will be some time before we have new
constituency contact information. It's reasonable to guess, however,
that people who were previously MPs or MPLs and constituency contacts
who have been re-elected will still be constituency contacts. This
script finds those people and restarts their constituency contact
positions.
This is part of the fix for #1466.from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
<commit_before><commit_msg>Add a script to restart constituency contact positions
We need this because it will be some time before we have new
constituency contact information. It's reasonable to guess, however,
that people who were previously MPs or MPLs and constituency contacts
who have been re-elected will still be constituency contacts. This
script finds those people and restarts their constituency contact
positions.
This is part of the fix for #1466.<commit_after>from datetime import date
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import PositionTitle
# A few days before the election:
date_for_last_active_check = date(2014, 5, 1)
# The date of the final results being announced:
date_to_start_new_positions = date(2014, 5, 10)
class Command(NoArgsCommand):
"""Restart constituency contact positions for re-elected MPs and MPLs"""
help = 'Restart constituency contact positions for re-elected MPs and MPLs'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
pt = PositionTitle.objects.get(name='Constituency Contact')
for old_position in pt.position_set.all(). \
currently_active(date_for_last_active_check):
person = old_position.person
print "Considering", old_position
active_positions = person.position_set.all().currently_active()
# Are they currently an MP or an MPL?
na_memberships = active_positions.filter(
organisation__slug='national-assembly',
title__slug='member')
# FIXME: Why are there two representations of MPLs?
pl_memberships = active_positions.filter(
title__slug='member',
organisation__kind__slug='provincial-legislature')
pl_memberships2 = active_positions.filter(
title__slug='member-of-the-provincial-legislature')
restart = False
if na_memberships:
print " Restarting because", person, "is currently a Member of the National Assembly"
restart = True
if pl_memberships or pl_memberships2:
print " Restarting because", person, "is currently a Member of a Provincial Legislature"
restart = True
if restart:
# Set the primary key to None so that when we save it,
# that creates a new row:
old_position.pk = None
old_position.start_date = ApproximateDate(
*date_to_start_new_positions.timetuple()[0:3]
)
old_position.end_date = ApproximateDate(future=True)
if options['commit']:
print " Saving the new position"
old_position.save()
else:
print " Not saving the new position (--commit not specified)"
|
|
0aa1f60fc00d488cf662f16cf966e7a6b6af43a0
|
scripts/collapse_xls_to_csv.py
|
scripts/collapse_xls_to_csv.py
|
#!/usr/bin/env python3
import os
import logging
import csv
import argparse
import xlrd
logger = logging.getLogger()
def main(args):
infile_path = os.path.abspath(args.infile)
if infile_path.endswith('.xls') or infile_path.endswith('.xlsx'):
book = xlrd.open_workbook(infile_path)
sheet_names = book.sheet_names()
logger.info(", ".format(sheet_names))
else:
logger.error("Input file should be an Excel file ending with .xls or .xlsx")
writer = csv.writer(args.outfile, quoting=csv.QUOTE_NONNUMERIC) if args.outfile else None
headers_written = False
for name in sheet_names:
sheet = book.sheet_by_name(name)
if not headers_written:
headerrow = sheet.row(0)
headers = [h.value for h in headerrow]
if writer:
writer.writerow(headers)
else:
logger.info("Headers: {}".format("|".join(headers)))
headers_written = True
print("Processing: '{}'".format(name))
for rowx in range(1, sheet.nrows):
row = sheet.row(rowx)
row_vals = [r.value for r in row]
if writer:
writer.writerow(row_vals)
else:
logger.info("|{}|".format("|".join(row_vals)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an Excel file of sheets with uniform columns to a single CSV file')
parser.add_argument('infile', type=str, help='Path to the Excel file to convert')
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
main(args)
|
Add script to convert a multi-sheet Excel to a single csv.
|
Add script to convert a multi-sheet Excel to a single csv.
|
Python
|
bsd-3-clause
|
dmc2015/hall-of-justice,dmc2015/hall-of-justice,sunlightlabs/hall-of-justice,dmc2015/hall-of-justice,sunlightlabs/hall-of-justice,sunlightlabs/hall-of-justice
|
Add script to convert a multi-sheet Excel to a single csv.
|
#!/usr/bin/env python3
import os
import logging
import csv
import argparse
import xlrd
logger = logging.getLogger()
def main(args):
infile_path = os.path.abspath(args.infile)
if infile_path.endswith('.xls') or infile_path.endswith('.xlsx'):
book = xlrd.open_workbook(infile_path)
sheet_names = book.sheet_names()
logger.info(", ".format(sheet_names))
else:
logger.error("Input file should be an Excel file ending with .xls or .xlsx")
writer = csv.writer(args.outfile, quoting=csv.QUOTE_NONNUMERIC) if args.outfile else None
headers_written = False
for name in sheet_names:
sheet = book.sheet_by_name(name)
if not headers_written:
headerrow = sheet.row(0)
headers = [h.value for h in headerrow]
if writer:
writer.writerow(headers)
else:
logger.info("Headers: {}".format("|".join(headers)))
headers_written = True
print("Processing: '{}'".format(name))
for rowx in range(1, sheet.nrows):
row = sheet.row(rowx)
row_vals = [r.value for r in row]
if writer:
writer.writerow(row_vals)
else:
logger.info("|{}|".format("|".join(row_vals)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an Excel file of sheets with uniform columns to a single CSV file')
parser.add_argument('infile', type=str, help='Path to the Excel file to convert')
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script to convert a multi-sheet Excel to a single csv.<commit_after>
|
#!/usr/bin/env python3
import os
import logging
import csv
import argparse
import xlrd
logger = logging.getLogger()
def main(args):
infile_path = os.path.abspath(args.infile)
if infile_path.endswith('.xls') or infile_path.endswith('.xlsx'):
book = xlrd.open_workbook(infile_path)
sheet_names = book.sheet_names()
logger.info(", ".format(sheet_names))
else:
logger.error("Input file should be an Excel file ending with .xls or .xlsx")
writer = csv.writer(args.outfile, quoting=csv.QUOTE_NONNUMERIC) if args.outfile else None
headers_written = False
for name in sheet_names:
sheet = book.sheet_by_name(name)
if not headers_written:
headerrow = sheet.row(0)
headers = [h.value for h in headerrow]
if writer:
writer.writerow(headers)
else:
logger.info("Headers: {}".format("|".join(headers)))
headers_written = True
print("Processing: '{}'".format(name))
for rowx in range(1, sheet.nrows):
row = sheet.row(rowx)
row_vals = [r.value for r in row]
if writer:
writer.writerow(row_vals)
else:
logger.info("|{}|".format("|".join(row_vals)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an Excel file of sheets with uniform columns to a single CSV file')
parser.add_argument('infile', type=str, help='Path to the Excel file to convert')
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
main(args)
|
Add script to convert a multi-sheet Excel to a single csv.#!/usr/bin/env python3
import os
import logging
import csv
import argparse
import xlrd
logger = logging.getLogger()
def main(args):
infile_path = os.path.abspath(args.infile)
if infile_path.endswith('.xls') or infile_path.endswith('.xlsx'):
book = xlrd.open_workbook(infile_path)
sheet_names = book.sheet_names()
logger.info(", ".format(sheet_names))
else:
logger.error("Input file should be an Excel file ending with .xls or .xlsx")
writer = csv.writer(args.outfile, quoting=csv.QUOTE_NONNUMERIC) if args.outfile else None
headers_written = False
for name in sheet_names:
sheet = book.sheet_by_name(name)
if not headers_written:
headerrow = sheet.row(0)
headers = [h.value for h in headerrow]
if writer:
writer.writerow(headers)
else:
logger.info("Headers: {}".format("|".join(headers)))
headers_written = True
print("Processing: '{}'".format(name))
for rowx in range(1, sheet.nrows):
row = sheet.row(rowx)
row_vals = [r.value for r in row]
if writer:
writer.writerow(row_vals)
else:
logger.info("|{}|".format("|".join(row_vals)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an Excel file of sheets with uniform columns to a single CSV file')
parser.add_argument('infile', type=str, help='Path to the Excel file to convert')
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add script to convert a multi-sheet Excel to a single csv.<commit_after>#!/usr/bin/env python3
import os
import logging
import csv
import argparse
import xlrd
logger = logging.getLogger()
def main(args):
infile_path = os.path.abspath(args.infile)
if infile_path.endswith('.xls') or infile_path.endswith('.xlsx'):
book = xlrd.open_workbook(infile_path)
sheet_names = book.sheet_names()
logger.info(", ".format(sheet_names))
else:
logger.error("Input file should be an Excel file ending with .xls or .xlsx")
writer = csv.writer(args.outfile, quoting=csv.QUOTE_NONNUMERIC) if args.outfile else None
headers_written = False
for name in sheet_names:
sheet = book.sheet_by_name(name)
if not headers_written:
headerrow = sheet.row(0)
headers = [h.value for h in headerrow]
if writer:
writer.writerow(headers)
else:
logger.info("Headers: {}".format("|".join(headers)))
headers_written = True
print("Processing: '{}'".format(name))
for rowx in range(1, sheet.nrows):
row = sheet.row(rowx)
row_vals = [r.value for r in row]
if writer:
writer.writerow(row_vals)
else:
logger.info("|{}|".format("|".join(row_vals)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an Excel file of sheets with uniform columns to a single CSV file')
parser.add_argument('infile', type=str, help='Path to the Excel file to convert')
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
main(args)
|
|
4c008042d01be6020a863855f74d8ee931dab46e
|
pdsspect/transforms.py
|
pdsspect/transforms.py
|
from qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
self.image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
self.image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
self.image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
|
Handle flipping the image with checl boxes
|
Handle flipping the image with checl boxes
|
Python
|
bsd-3-clause
|
planetarypy/pdsspect
|
Handle flipping the image with checl boxes
|
from qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
self.image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
self.image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
self.image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
|
<commit_before><commit_msg>Handle flipping the image with checl boxes<commit_after>
|
from qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
self.image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
self.image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
self.image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
|
Handle flipping the image with checl boxesfrom qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
self.image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
self.image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
self.image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
|
<commit_before><commit_msg>Handle flipping the image with checl boxes<commit_after>from qtpy import QtWidgets
from .pdsspect_image_set import PDSSpectImageSetViewBase
class TransformsController(object):
def __init__(self, image_set, view):
self.image_set = image_set
self.view = view
def set_flip_x(self, flip_x):
self.image_set.flip_x = flip_x
def set_flip_y(self, flip_y):
self.image_set.flip_y = flip_y
def set_swap_xy(self, swap_xy):
self.image_set.swap_xy = swap_xy
class Transforms(QtWidgets.QDialog, PDSSpectImageSetViewBase):
def __init__(self, image_set, view_canvas):
super(Transforms, self).__init__()
self.image_set = image_set
self.view_canvas = view_canvas
self.image_set.register(self)
self.controller = TransformsController(image_set, self)
self.flip_x_label = QtWidgets.QLabel("Flip X Axes")
self.flip_x_box = QtWidgets.QCheckBox()
self.flip_x_box.stateChanged.connect(self.flip_x_checked)
self.flip_y_label = QtWidgets.QLabel("Flip Y Axes")
self.flip_y_box = QtWidgets.QCheckBox()
self.flip_y_box.stateChanged.connect(self.flip_y_checked)
self.swap_xy_label = QtWidgets.QLabel("Swap X and Y Axes")
self.swap_xy_box = QtWidgets.QCheckBox()
self.swap_xy_box.stateChanged.connect(self.swap_xy_checked)
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.flip_x_label, 0, 0)
self.layout.addWidget(self.flip_x_box, 0, 1)
self.layout.addWidget(self.flip_y_label, 1, 0)
self.layout.addWidget(self.flip_y_box, 1, 1)
self.layout.addWidget(self.swap_xy_label, 2, 0)
self.layout.addWidget(self.swap_xy_box, 2, 1)
self.setWindowTitle("Tranformations")
self.setLayout(self.layout)
def flip_x_checked(self, state):
if self.flip_x_box.isChecked():
self.controller.set_flip_x(True)
else:
self.controller.set_flip_x(False)
def flip_y_checked(self, state):
if self.flip_y_box.isChecked():
self.controller.set_flip_y(True)
else:
self.controller.set_flip_y(False)
def swap_xy_checked(self, state):
if self.swap_xy_box.isChecked():
self.controller.set_swap_xy(True)
else:
self.controller.set_swap_xy(False)
|
|
0cb5f00975570a115322ab028dcb93c92c2e0872
|
tests/packets/test_message.py
|
tests/packets/test_message.py
|
from cactusbot.packets import MessagePacket
def _split(text, *args, **kwargs):
return [
component.text
for component in
MessagePacket(text).split(*args, **kwargs)
]
def test_split():
assert _split("0 1 2 3") == ['0', '1', '2', '3']
assert _split("0 1 2 3", "2") == ['0 1 ', ' 3']
assert _split("0 1 2 3", maximum=2) == ['0', '1', '2 3']
assert _split("0 1 2 3 ") == ['0', '1', '2', '3']
|
Add simple tests for MessagePacket
|
Add simple tests for MessagePacket
Currently only MessagePacket.split(). Needs to be improved.
|
Python
|
mit
|
CactusDev/CactusBot
|
Add simple tests for MessagePacket
Currently only MessagePacket.split(). Needs to be improved.
|
from cactusbot.packets import MessagePacket
def _split(text, *args, **kwargs):
return [
component.text
for component in
MessagePacket(text).split(*args, **kwargs)
]
def test_split():
assert _split("0 1 2 3") == ['0', '1', '2', '3']
assert _split("0 1 2 3", "2") == ['0 1 ', ' 3']
assert _split("0 1 2 3", maximum=2) == ['0', '1', '2 3']
assert _split("0 1 2 3 ") == ['0', '1', '2', '3']
|
<commit_before><commit_msg>Add simple tests for MessagePacket
Currently only MessagePacket.split(). Needs to be improved.<commit_after>
|
from cactusbot.packets import MessagePacket
def _split(text, *args, **kwargs):
return [
component.text
for component in
MessagePacket(text).split(*args, **kwargs)
]
def test_split():
assert _split("0 1 2 3") == ['0', '1', '2', '3']
assert _split("0 1 2 3", "2") == ['0 1 ', ' 3']
assert _split("0 1 2 3", maximum=2) == ['0', '1', '2 3']
assert _split("0 1 2 3 ") == ['0', '1', '2', '3']
|
Add simple tests for MessagePacket
Currently only MessagePacket.split(). Needs to be improved.from cactusbot.packets import MessagePacket
def _split(text, *args, **kwargs):
return [
component.text
for component in
MessagePacket(text).split(*args, **kwargs)
]
def test_split():
assert _split("0 1 2 3") == ['0', '1', '2', '3']
assert _split("0 1 2 3", "2") == ['0 1 ', ' 3']
assert _split("0 1 2 3", maximum=2) == ['0', '1', '2 3']
assert _split("0 1 2 3 ") == ['0', '1', '2', '3']
|
<commit_before><commit_msg>Add simple tests for MessagePacket
Currently only MessagePacket.split(). Needs to be improved.<commit_after>from cactusbot.packets import MessagePacket
def _split(text, *args, **kwargs):
return [
component.text
for component in
MessagePacket(text).split(*args, **kwargs)
]
def test_split():
assert _split("0 1 2 3") == ['0', '1', '2', '3']
assert _split("0 1 2 3", "2") == ['0 1 ', ' 3']
assert _split("0 1 2 3", maximum=2) == ['0', '1', '2 3']
assert _split("0 1 2 3 ") == ['0', '1', '2', '3']
|
|
cd121a2466887999062e4e674998af971cd416e2
|
check-wayback-machine.py
|
check-wayback-machine.py
|
#!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
|
Add script to verify that all pages(+comments) are in the Internet Archive
|
Add script to verify that all pages(+comments) are in the Internet Archive
|
Python
|
mit
|
squirrel2038/thearchdruidreport-archive,squirrel2038/thearchdruidreport-archive,squirrel2038/thearchdruidreport-archive
|
Add script to verify that all pages(+comments) are in the Internet Archive
|
#!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
|
<commit_before><commit_msg>Add script to verify that all pages(+comments) are in the Internet Archive<commit_after>
|
#!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
|
Add script to verify that all pages(+comments) are in the Internet Archive#!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
|
<commit_before><commit_msg>Add script to verify that all pages(+comments) are in the Internet Archive<commit_after>#!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
|
|
9fa3e7161764d1bc5a812bccc27837c9ddabef23
|
sleep_wake_hourly_pie_chart.py
|
sleep_wake_hourly_pie_chart.py
|
import plotly as py
import plotly.graph_objs as go
import plotly.tools as tools
from datetime import datetime, time, timedelta
from sys import argv
import utils.names as names
from utils.csvparser import parse
from utils.exporter import export
# load data from csv into an OrderedDict
data_file = argv[1]
raw_data = parse(data_file)
dates = list(raw_data.keys())
ndays = (dates[-1] - dates[0]).days + 2
# create the timeline
t0 = datetime.combine(dates[0], time(hour=0))
asleep = [False] * 24 * ndays
def datetime_to_index(dt):
return round((dt - t0).total_seconds() / 3600)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
st = datetime_to_index(rest)
en = datetime_to_index(wake)
asleep[st:en] = [True] * (en - st)
# creating the pie chart
labels = ['Asleep', 'Awake']
values = [[0, 0] for i in range(0, 24)]
traces = []
rows = 4
cols = 6
gridw = 1 / cols
gridh = 1 / rows
for i in range(0, 24):
for j in range(0, ndays):
if asleep[i + 24 * j]:
values[i][0] += 1
else:
values[i][1] += 1
name = '{:02}:00'.format(i)
x = i % cols
y = rows - i // cols - 1
traces.append(go.Pie(labels=labels,
values=values[i],
hole=0.5,
textposition='inside',
name=name,
text=name,
domain={'x': [x * gridw, (x + 1) * gridw],
'y': [y * gridh, (y + 1) * gridh]}))
layout = go.Layout(title=names.graph_title('Sleep/Wake Activity for each Hour of the Day', dates))
figure = go.Figure(data=traces, layout=layout)
export(figure, __file__, dates)
|
Add hourly sleep wake pie chart
|
Add hourly sleep wake pie chart
|
Python
|
mit
|
f-jiang/sleep-pattern-grapher
|
Add hourly sleep wake pie chart
|
import plotly as py
import plotly.graph_objs as go
import plotly.tools as tools
from datetime import datetime, time, timedelta
from sys import argv
import utils.names as names
from utils.csvparser import parse
from utils.exporter import export
# load data from csv into an OrderedDict
data_file = argv[1]
raw_data = parse(data_file)
dates = list(raw_data.keys())
ndays = (dates[-1] - dates[0]).days + 2
# create the timeline
t0 = datetime.combine(dates[0], time(hour=0))
asleep = [False] * 24 * ndays
def datetime_to_index(dt):
return round((dt - t0).total_seconds() / 3600)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
st = datetime_to_index(rest)
en = datetime_to_index(wake)
asleep[st:en] = [True] * (en - st)
# creating the pie chart
labels = ['Asleep', 'Awake']
values = [[0, 0] for i in range(0, 24)]
traces = []
rows = 4
cols = 6
gridw = 1 / cols
gridh = 1 / rows
for i in range(0, 24):
for j in range(0, ndays):
if asleep[i + 24 * j]:
values[i][0] += 1
else:
values[i][1] += 1
name = '{:02}:00'.format(i)
x = i % cols
y = rows - i // cols - 1
traces.append(go.Pie(labels=labels,
values=values[i],
hole=0.5,
textposition='inside',
name=name,
text=name,
domain={'x': [x * gridw, (x + 1) * gridw],
'y': [y * gridh, (y + 1) * gridh]}))
layout = go.Layout(title=names.graph_title('Sleep/Wake Activity for each Hour of the Day', dates))
figure = go.Figure(data=traces, layout=layout)
export(figure, __file__, dates)
|
<commit_before><commit_msg>Add hourly sleep wake pie chart<commit_after>
|
import plotly as py
import plotly.graph_objs as go
import plotly.tools as tools
from datetime import datetime, time, timedelta
from sys import argv
import utils.names as names
from utils.csvparser import parse
from utils.exporter import export
# load data from csv into an OrderedDict
data_file = argv[1]
raw_data = parse(data_file)
dates = list(raw_data.keys())
ndays = (dates[-1] - dates[0]).days + 2
# create the timeline
t0 = datetime.combine(dates[0], time(hour=0))
asleep = [False] * 24 * ndays
def datetime_to_index(dt):
return round((dt - t0).total_seconds() / 3600)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
st = datetime_to_index(rest)
en = datetime_to_index(wake)
asleep[st:en] = [True] * (en - st)
# creating the pie chart
labels = ['Asleep', 'Awake']
values = [[0, 0] for i in range(0, 24)]
traces = []
rows = 4
cols = 6
gridw = 1 / cols
gridh = 1 / rows
for i in range(0, 24):
for j in range(0, ndays):
if asleep[i + 24 * j]:
values[i][0] += 1
else:
values[i][1] += 1
name = '{:02}:00'.format(i)
x = i % cols
y = rows - i // cols - 1
traces.append(go.Pie(labels=labels,
values=values[i],
hole=0.5,
textposition='inside',
name=name,
text=name,
domain={'x': [x * gridw, (x + 1) * gridw],
'y': [y * gridh, (y + 1) * gridh]}))
layout = go.Layout(title=names.graph_title('Sleep/Wake Activity for each Hour of the Day', dates))
figure = go.Figure(data=traces, layout=layout)
export(figure, __file__, dates)
|
Add hourly sleep wake pie chartimport plotly as py
import plotly.graph_objs as go
import plotly.tools as tools
from datetime import datetime, time, timedelta
from sys import argv
import utils.names as names
from utils.csvparser import parse
from utils.exporter import export
# load data from csv into an OrderedDict
data_file = argv[1]
raw_data = parse(data_file)
dates = list(raw_data.keys())
ndays = (dates[-1] - dates[0]).days + 2
# create the timeline
t0 = datetime.combine(dates[0], time(hour=0))
asleep = [False] * 24 * ndays
def datetime_to_index(dt):
return round((dt - t0).total_seconds() / 3600)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
st = datetime_to_index(rest)
en = datetime_to_index(wake)
asleep[st:en] = [True] * (en - st)
# creating the pie chart
labels = ['Asleep', 'Awake']
values = [[0, 0] for i in range(0, 24)]
traces = []
rows = 4
cols = 6
gridw = 1 / cols
gridh = 1 / rows
for i in range(0, 24):
for j in range(0, ndays):
if asleep[i + 24 * j]:
values[i][0] += 1
else:
values[i][1] += 1
name = '{:02}:00'.format(i)
x = i % cols
y = rows - i // cols - 1
traces.append(go.Pie(labels=labels,
values=values[i],
hole=0.5,
textposition='inside',
name=name,
text=name,
domain={'x': [x * gridw, (x + 1) * gridw],
'y': [y * gridh, (y + 1) * gridh]}))
layout = go.Layout(title=names.graph_title('Sleep/Wake Activity for each Hour of the Day', dates))
figure = go.Figure(data=traces, layout=layout)
export(figure, __file__, dates)
|
<commit_before><commit_msg>Add hourly sleep wake pie chart<commit_after>import plotly as py
import plotly.graph_objs as go
import plotly.tools as tools
from datetime import datetime, time, timedelta
from sys import argv
import utils.names as names
from utils.csvparser import parse
from utils.exporter import export
# load data from csv into an OrderedDict
data_file = argv[1]
raw_data = parse(data_file)
dates = list(raw_data.keys())
ndays = (dates[-1] - dates[0]).days + 2
# create the timeline
t0 = datetime.combine(dates[0], time(hour=0))
asleep = [False] * 24 * ndays
def datetime_to_index(dt):
return round((dt - t0).total_seconds() / 3600)
for date, rests in raw_data.items():
for r in rests:
rest, wake, is_nap = r
st = datetime_to_index(rest)
en = datetime_to_index(wake)
asleep[st:en] = [True] * (en - st)
# creating the pie chart
labels = ['Asleep', 'Awake']
values = [[0, 0] for i in range(0, 24)]
traces = []
rows = 4
cols = 6
gridw = 1 / cols
gridh = 1 / rows
for i in range(0, 24):
for j in range(0, ndays):
if asleep[i + 24 * j]:
values[i][0] += 1
else:
values[i][1] += 1
name = '{:02}:00'.format(i)
x = i % cols
y = rows - i // cols - 1
traces.append(go.Pie(labels=labels,
values=values[i],
hole=0.5,
textposition='inside',
name=name,
text=name,
domain={'x': [x * gridw, (x + 1) * gridw],
'y': [y * gridh, (y + 1) * gridh]}))
layout = go.Layout(title=names.graph_title('Sleep/Wake Activity for each Hour of the Day', dates))
figure = go.Figure(data=traces, layout=layout)
export(figure, __file__, dates)
|
|
488cf1d22504346df0c4d4cebdb27e792d241c8d
|
learning/tests/test_dsbn.py
|
learning/tests/test_dsbn.py
|
import unittest
import numpy as np
import theano
import theano.tensor as T
import testing
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.dsbn import DSBN
#-----------------------------------------------------------------------------
class TestDSBN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DSBN(
n_X=16,
n_Y=8,
n_hid=12,
)
self.layer.setup()
|
Add unit test for DSBN
|
Add unit test for DSBN
|
Python
|
agpl-3.0
|
lenovor/reweighted-ws,yanweifu/reweighted-ws,skaasj/reweighted-ws,yanweifu/reweighted-ws,jbornschein/y2k,jbornschein/y2k,codeaudit/reweighted-ws,jbornschein/reweighted-ws,skaasj/reweighted-ws,lenovor/reweighted-ws,codeaudit/reweighted-ws,jbornschein/reweighted-ws
|
Add unit test for DSBN
|
import unittest
import numpy as np
import theano
import theano.tensor as T
import testing
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.dsbn import DSBN
#-----------------------------------------------------------------------------
class TestDSBN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DSBN(
n_X=16,
n_Y=8,
n_hid=12,
)
self.layer.setup()
|
<commit_before><commit_msg>Add unit test for DSBN<commit_after>
|
import unittest
import numpy as np
import theano
import theano.tensor as T
import testing
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.dsbn import DSBN
#-----------------------------------------------------------------------------
class TestDSBN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DSBN(
n_X=16,
n_Y=8,
n_hid=12,
)
self.layer.setup()
|
Add unit test for DSBNimport unittest
import numpy as np
import theano
import theano.tensor as T
import testing
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.dsbn import DSBN
#-----------------------------------------------------------------------------
class TestDSBN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DSBN(
n_X=16,
n_Y=8,
n_hid=12,
)
self.layer.setup()
|
<commit_before><commit_msg>Add unit test for DSBN<commit_after>import unittest
import numpy as np
import theano
import theano.tensor as T
import testing
from test_rws import RWSLayerTest, RWSTopLayerTest
# Unit Under Test
from learning.dsbn import DSBN
#-----------------------------------------------------------------------------
class TestDSBN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DSBN(
n_X=16,
n_Y=8,
n_hid=12,
)
self.layer.setup()
|
|
09011ddc407ec5d9ff0535b043f9630fa76d2dfc
|
ELiDE/ELiDE/game.py
|
ELiDE/ELiDE/game.py
|
import os
from importlib import import_module
from kivy.properties import (
AliasProperty,
ObjectProperty
)
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from LiSE.engine import Engine
import LiSE.proxy
class GameScreen(Screen):
switch_screen = ObjectProperty()
engine = ObjectProperty()
shutdown = ObjectProperty()
class Screens(Widget):
app = ObjectProperty()
def add_widget(self, wid, index=0):
wid.engine = self.app.engine
wid.switch_screen = self.app.screen_manager.setter('screen')
wid.shutdown = self.app.stop
super().add_widget(wid, index)
class GameApp(App):
modules = []
engine = ObjectProperty()
worlddb = AliasProperty(
lambda self: self.name + 'World.db' if self.name else 'LiSEWorld.db',
lambda self, v: None
)
codedb = AliasProperty(
lambda self: self.name + 'Code.db' if self.name else 'LiSECode.db',
lambda self, v: None
)
screens = ObjectProperty()
def build(self):
have_world = have_code = False
try:
os.stat(self.worlddb)
have_world = True
except FileNotFoundError:
pass
try:
os.stat(self.codedb)
have_code = True
except FileNotFoundError:
pass
if not (have_world and have_code):
engine = Engine(self.worlddb, self.codedb)
if not have_code:
for module in self.modules:
import_module(module).install(engine)
if not have_world:
engine.function['__init__'](engine)
engine.close()
self.procman = LiSE.proxy.EngineProcessManager()
self.engine = self.procman.start(self.worlddb, self.codedb)
self.screen_manager = ScreenManager()
self.screens = Screens(app=self)
self.screens.bind(children=self.pull_screens)
self.pull_screens()
return self.screen_manager
def pull_screens(self, *args):
for screen in reversed(self.screens.children):
print('pulling screen ' + screen.name)
self.screens.remove_widget(screen)
self.screen_manager.add_widget(screen)
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.config.write()
def on_stop(self, *largs):
"""Sync the database, wrap up the game, and halt."""
self.procman.shutdown()
self.config.write()
|
Implement App and Screen subclasses for developer's convenience
|
Implement App and Screen subclasses for developer's convenience
|
Python
|
agpl-3.0
|
LogicalDash/LiSE,LogicalDash/LiSE
|
Implement App and Screen subclasses for developer's convenience
|
import os
from importlib import import_module
from kivy.properties import (
AliasProperty,
ObjectProperty
)
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from LiSE.engine import Engine
import LiSE.proxy
class GameScreen(Screen):
switch_screen = ObjectProperty()
engine = ObjectProperty()
shutdown = ObjectProperty()
class Screens(Widget):
app = ObjectProperty()
def add_widget(self, wid, index=0):
wid.engine = self.app.engine
wid.switch_screen = self.app.screen_manager.setter('screen')
wid.shutdown = self.app.stop
super().add_widget(wid, index)
class GameApp(App):
modules = []
engine = ObjectProperty()
worlddb = AliasProperty(
lambda self: self.name + 'World.db' if self.name else 'LiSEWorld.db',
lambda self, v: None
)
codedb = AliasProperty(
lambda self: self.name + 'Code.db' if self.name else 'LiSECode.db',
lambda self, v: None
)
screens = ObjectProperty()
def build(self):
have_world = have_code = False
try:
os.stat(self.worlddb)
have_world = True
except FileNotFoundError:
pass
try:
os.stat(self.codedb)
have_code = True
except FileNotFoundError:
pass
if not (have_world and have_code):
engine = Engine(self.worlddb, self.codedb)
if not have_code:
for module in self.modules:
import_module(module).install(engine)
if not have_world:
engine.function['__init__'](engine)
engine.close()
self.procman = LiSE.proxy.EngineProcessManager()
self.engine = self.procman.start(self.worlddb, self.codedb)
self.screen_manager = ScreenManager()
self.screens = Screens(app=self)
self.screens.bind(children=self.pull_screens)
self.pull_screens()
return self.screen_manager
def pull_screens(self, *args):
for screen in reversed(self.screens.children):
print('pulling screen ' + screen.name)
self.screens.remove_widget(screen)
self.screen_manager.add_widget(screen)
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.config.write()
def on_stop(self, *largs):
"""Sync the database, wrap up the game, and halt."""
self.procman.shutdown()
self.config.write()
|
<commit_before><commit_msg>Implement App and Screen subclasses for developer's convenience<commit_after>
|
import os
from importlib import import_module
from kivy.properties import (
AliasProperty,
ObjectProperty
)
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from LiSE.engine import Engine
import LiSE.proxy
class GameScreen(Screen):
switch_screen = ObjectProperty()
engine = ObjectProperty()
shutdown = ObjectProperty()
class Screens(Widget):
app = ObjectProperty()
def add_widget(self, wid, index=0):
wid.engine = self.app.engine
wid.switch_screen = self.app.screen_manager.setter('screen')
wid.shutdown = self.app.stop
super().add_widget(wid, index)
class GameApp(App):
modules = []
engine = ObjectProperty()
worlddb = AliasProperty(
lambda self: self.name + 'World.db' if self.name else 'LiSEWorld.db',
lambda self, v: None
)
codedb = AliasProperty(
lambda self: self.name + 'Code.db' if self.name else 'LiSECode.db',
lambda self, v: None
)
screens = ObjectProperty()
def build(self):
have_world = have_code = False
try:
os.stat(self.worlddb)
have_world = True
except FileNotFoundError:
pass
try:
os.stat(self.codedb)
have_code = True
except FileNotFoundError:
pass
if not (have_world and have_code):
engine = Engine(self.worlddb, self.codedb)
if not have_code:
for module in self.modules:
import_module(module).install(engine)
if not have_world:
engine.function['__init__'](engine)
engine.close()
self.procman = LiSE.proxy.EngineProcessManager()
self.engine = self.procman.start(self.worlddb, self.codedb)
self.screen_manager = ScreenManager()
self.screens = Screens(app=self)
self.screens.bind(children=self.pull_screens)
self.pull_screens()
return self.screen_manager
def pull_screens(self, *args):
for screen in reversed(self.screens.children):
print('pulling screen ' + screen.name)
self.screens.remove_widget(screen)
self.screen_manager.add_widget(screen)
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.config.write()
def on_stop(self, *largs):
"""Sync the database, wrap up the game, and halt."""
self.procman.shutdown()
self.config.write()
|
Implement App and Screen subclasses for developer's convenienceimport os
from importlib import import_module
from kivy.properties import (
AliasProperty,
ObjectProperty
)
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from LiSE.engine import Engine
import LiSE.proxy
class GameScreen(Screen):
switch_screen = ObjectProperty()
engine = ObjectProperty()
shutdown = ObjectProperty()
class Screens(Widget):
app = ObjectProperty()
def add_widget(self, wid, index=0):
wid.engine = self.app.engine
wid.switch_screen = self.app.screen_manager.setter('screen')
wid.shutdown = self.app.stop
super().add_widget(wid, index)
class GameApp(App):
modules = []
engine = ObjectProperty()
worlddb = AliasProperty(
lambda self: self.name + 'World.db' if self.name else 'LiSEWorld.db',
lambda self, v: None
)
codedb = AliasProperty(
lambda self: self.name + 'Code.db' if self.name else 'LiSECode.db',
lambda self, v: None
)
screens = ObjectProperty()
def build(self):
have_world = have_code = False
try:
os.stat(self.worlddb)
have_world = True
except FileNotFoundError:
pass
try:
os.stat(self.codedb)
have_code = True
except FileNotFoundError:
pass
if not (have_world and have_code):
engine = Engine(self.worlddb, self.codedb)
if not have_code:
for module in self.modules:
import_module(module).install(engine)
if not have_world:
engine.function['__init__'](engine)
engine.close()
self.procman = LiSE.proxy.EngineProcessManager()
self.engine = self.procman.start(self.worlddb, self.codedb)
self.screen_manager = ScreenManager()
self.screens = Screens(app=self)
self.screens.bind(children=self.pull_screens)
self.pull_screens()
return self.screen_manager
def pull_screens(self, *args):
for screen in reversed(self.screens.children):
print('pulling screen ' + screen.name)
self.screens.remove_widget(screen)
self.screen_manager.add_widget(screen)
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.config.write()
def on_stop(self, *largs):
"""Sync the database, wrap up the game, and halt."""
self.procman.shutdown()
self.config.write()
|
<commit_before><commit_msg>Implement App and Screen subclasses for developer's convenience<commit_after>import os
from importlib import import_module
from kivy.properties import (
AliasProperty,
ObjectProperty
)
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.screenmanager import ScreenManager, Screen
from LiSE.engine import Engine
import LiSE.proxy
class GameScreen(Screen):
switch_screen = ObjectProperty()
engine = ObjectProperty()
shutdown = ObjectProperty()
class Screens(Widget):
app = ObjectProperty()
def add_widget(self, wid, index=0):
wid.engine = self.app.engine
wid.switch_screen = self.app.screen_manager.setter('screen')
wid.shutdown = self.app.stop
super().add_widget(wid, index)
class GameApp(App):
modules = []
engine = ObjectProperty()
worlddb = AliasProperty(
lambda self: self.name + 'World.db' if self.name else 'LiSEWorld.db',
lambda self, v: None
)
codedb = AliasProperty(
lambda self: self.name + 'Code.db' if self.name else 'LiSECode.db',
lambda self, v: None
)
screens = ObjectProperty()
def build(self):
have_world = have_code = False
try:
os.stat(self.worlddb)
have_world = True
except FileNotFoundError:
pass
try:
os.stat(self.codedb)
have_code = True
except FileNotFoundError:
pass
if not (have_world and have_code):
engine = Engine(self.worlddb, self.codedb)
if not have_code:
for module in self.modules:
import_module(module).install(engine)
if not have_world:
engine.function['__init__'](engine)
engine.close()
self.procman = LiSE.proxy.EngineProcessManager()
self.engine = self.procman.start(self.worlddb, self.codedb)
self.screen_manager = ScreenManager()
self.screens = Screens(app=self)
self.screens.bind(children=self.pull_screens)
self.pull_screens()
return self.screen_manager
def pull_screens(self, *args):
for screen in reversed(self.screens.children):
print('pulling screen ' + screen.name)
self.screens.remove_widget(screen)
self.screen_manager.add_widget(screen)
def on_pause(self):
"""Sync the database with the current state of the game."""
self.engine.commit()
self.config.write()
def on_stop(self, *largs):
"""Sync the database, wrap up the game, and halt."""
self.procman.shutdown()
self.config.write()
|
|
100e0a406551707e92826c2374f9c135613f6858
|
bin/index_to_contig.py
|
bin/index_to_contig.py
|
"""
Given a tuple of index1, index2, correlation and a tuple of index, contig
rewrite the correlation to be contig1, contig2, correlation
"""
import os
import sys
import argparse
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-i', help='index file', required=True)
parser.add_argument('-c', help='correlation file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
i2c = {}
with open(args.i, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n")
sys.exit(1)
with open(args.c, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n")
sys.exit(1)
if p[0] not in i2c:
sys.stderr.write(f"{p[0]} not found in the index file\n")
sys.exit(1)
if p[1] not in i2c:
sys.stderr.write(f"{p[1]} not found in the index file\n")
sys.exit(1)
print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
|
Convert an index to a contig
|
Convert an index to a contig
|
Python
|
mit
|
linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab
|
Convert an index to a contig
|
"""
Given a tuple of index1, index2, correlation and a tuple of index, contig
rewrite the correlation to be contig1, contig2, correlation
"""
import os
import sys
import argparse
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-i', help='index file', required=True)
parser.add_argument('-c', help='correlation file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
i2c = {}
with open(args.i, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n")
sys.exit(1)
with open(args.c, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n")
sys.exit(1)
if p[0] not in i2c:
sys.stderr.write(f"{p[0]} not found in the index file\n")
sys.exit(1)
if p[1] not in i2c:
sys.stderr.write(f"{p[1]} not found in the index file\n")
sys.exit(1)
print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
|
<commit_before><commit_msg>Convert an index to a contig<commit_after>
|
"""
Given a tuple of index1, index2, correlation and a tuple of index, contig
rewrite the correlation to be contig1, contig2, correlation
"""
import os
import sys
import argparse
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-i', help='index file', required=True)
parser.add_argument('-c', help='correlation file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
i2c = {}
with open(args.i, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n")
sys.exit(1)
with open(args.c, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n")
sys.exit(1)
if p[0] not in i2c:
sys.stderr.write(f"{p[0]} not found in the index file\n")
sys.exit(1)
if p[1] not in i2c:
sys.stderr.write(f"{p[1]} not found in the index file\n")
sys.exit(1)
print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
|
Convert an index to a contig"""
Given a tuple of index1, index2, correlation and a tuple of index, contig
rewrite the correlation to be contig1, contig2, correlation
"""
import os
import sys
import argparse
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-i', help='index file', required=True)
parser.add_argument('-c', help='correlation file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
i2c = {}
with open(args.i, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n")
sys.exit(1)
with open(args.c, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n")
sys.exit(1)
if p[0] not in i2c:
sys.stderr.write(f"{p[0]} not found in the index file\n")
sys.exit(1)
if p[1] not in i2c:
sys.stderr.write(f"{p[1]} not found in the index file\n")
sys.exit(1)
print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
|
<commit_before><commit_msg>Convert an index to a contig<commit_after>"""
Given a tuple of index1, index2, correlation and a tuple of index, contig
rewrite the correlation to be contig1, contig2, correlation
"""
import os
import sys
import argparse
__author__ = 'Rob Edwards'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-i', help='index file', required=True)
parser.add_argument('-c', help='correlation file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
i2c = {}
with open(args.i, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n")
sys.exit(1)
with open(args.c, 'r') as fin:
for l in fin:
if ',' in l:
p = l.strip().split(',')
elif "\t" in l:
p = l.strip().split("\t")
else:
sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n")
sys.exit(1)
if p[0] not in i2c:
sys.stderr.write(f"{p[0]} not found in the index file\n")
sys.exit(1)
if p[1] not in i2c:
sys.stderr.write(f"{p[1]} not found in the index file\n")
sys.exit(1)
print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
|
|
733d24512b40bc5737704f715cea792fa5a702f0
|
script/covall.py
|
script/covall.py
|
#!/usr/bin/env python3
"""
Parse a coverage.info file,
and report success if all files have 100% test coverage.
"""
import sys
if len(sys.argv) != 2:
sys.exit("Usage: covall.py <file>")
covinfo = sys.argv[1]
COV = dict()
with open(covinfo) as fin:
for line in fin:
if line.startswith("SF:"):
fname = line[3:-1]
elif line.startswith("FNF:"):
fnf = int(line[4:-1])
elif line.startswith("FNH:"):
fnh = int(line[4:-1])
elif line.startswith("LF:"):
lf = int(line[3:-1])
elif line.startswith("LH:"):
lh = int(line[3:-1])
elif line.startswith("BRF:"):
brf = int(line[4:-1])
elif line.startswith("BRH:"):
brh = int(line[4:-1])
elif line.startswith("end_of_record"):
COV[fname] = (fnf, fnh, lf, lh, brf, brh)
status = 0
for fname, entry in COV.items():
fnf, fnh, lf, lh, brf, brh = entry
if fnf != fnh or lf != lh:
print("Incomplete:", fname)
status = 1
sys.exit(status)
|
Add script to test for 100% coverage
|
Add script to test for 100% coverage
|
Python
|
apache-2.0
|
cjdrake/boolexpr,cjdrake/boolexpr
|
Add script to test for 100% coverage
|
#!/usr/bin/env python3
"""
Parse a coverage.info file,
and report success if all files have 100% test coverage.
"""
import sys
if len(sys.argv) != 2:
sys.exit("Usage: covall.py <file>")
covinfo = sys.argv[1]
COV = dict()
with open(covinfo) as fin:
for line in fin:
if line.startswith("SF:"):
fname = line[3:-1]
elif line.startswith("FNF:"):
fnf = int(line[4:-1])
elif line.startswith("FNH:"):
fnh = int(line[4:-1])
elif line.startswith("LF:"):
lf = int(line[3:-1])
elif line.startswith("LH:"):
lh = int(line[3:-1])
elif line.startswith("BRF:"):
brf = int(line[4:-1])
elif line.startswith("BRH:"):
brh = int(line[4:-1])
elif line.startswith("end_of_record"):
COV[fname] = (fnf, fnh, lf, lh, brf, brh)
status = 0
for fname, entry in COV.items():
fnf, fnh, lf, lh, brf, brh = entry
if fnf != fnh or lf != lh:
print("Incomplete:", fname)
status = 1
sys.exit(status)
|
<commit_before><commit_msg>Add script to test for 100% coverage<commit_after>
|
#!/usr/bin/env python3
"""
Parse a coverage.info file,
and report success if all files have 100% test coverage.
"""
import sys
if len(sys.argv) != 2:
sys.exit("Usage: covall.py <file>")
covinfo = sys.argv[1]
COV = dict()
with open(covinfo) as fin:
for line in fin:
if line.startswith("SF:"):
fname = line[3:-1]
elif line.startswith("FNF:"):
fnf = int(line[4:-1])
elif line.startswith("FNH:"):
fnh = int(line[4:-1])
elif line.startswith("LF:"):
lf = int(line[3:-1])
elif line.startswith("LH:"):
lh = int(line[3:-1])
elif line.startswith("BRF:"):
brf = int(line[4:-1])
elif line.startswith("BRH:"):
brh = int(line[4:-1])
elif line.startswith("end_of_record"):
COV[fname] = (fnf, fnh, lf, lh, brf, brh)
status = 0
for fname, entry in COV.items():
fnf, fnh, lf, lh, brf, brh = entry
if fnf != fnh or lf != lh:
print("Incomplete:", fname)
status = 1
sys.exit(status)
|
Add script to test for 100% coverage#!/usr/bin/env python3
"""
Parse a coverage.info file,
and report success if all files have 100% test coverage.
"""
import sys
if len(sys.argv) != 2:
sys.exit("Usage: covall.py <file>")
covinfo = sys.argv[1]
COV = dict()
with open(covinfo) as fin:
for line in fin:
if line.startswith("SF:"):
fname = line[3:-1]
elif line.startswith("FNF:"):
fnf = int(line[4:-1])
elif line.startswith("FNH:"):
fnh = int(line[4:-1])
elif line.startswith("LF:"):
lf = int(line[3:-1])
elif line.startswith("LH:"):
lh = int(line[3:-1])
elif line.startswith("BRF:"):
brf = int(line[4:-1])
elif line.startswith("BRH:"):
brh = int(line[4:-1])
elif line.startswith("end_of_record"):
COV[fname] = (fnf, fnh, lf, lh, brf, brh)
status = 0
for fname, entry in COV.items():
fnf, fnh, lf, lh, brf, brh = entry
if fnf != fnh or lf != lh:
print("Incomplete:", fname)
status = 1
sys.exit(status)
|
<commit_before><commit_msg>Add script to test for 100% coverage<commit_after>#!/usr/bin/env python3
"""
Parse a coverage.info file,
and report success if all files have 100% test coverage.
"""
import sys
if len(sys.argv) != 2:
sys.exit("Usage: covall.py <file>")
covinfo = sys.argv[1]
COV = dict()
with open(covinfo) as fin:
for line in fin:
if line.startswith("SF:"):
fname = line[3:-1]
elif line.startswith("FNF:"):
fnf = int(line[4:-1])
elif line.startswith("FNH:"):
fnh = int(line[4:-1])
elif line.startswith("LF:"):
lf = int(line[3:-1])
elif line.startswith("LH:"):
lh = int(line[3:-1])
elif line.startswith("BRF:"):
brf = int(line[4:-1])
elif line.startswith("BRH:"):
brh = int(line[4:-1])
elif line.startswith("end_of_record"):
COV[fname] = (fnf, fnh, lf, lh, brf, brh)
status = 0
for fname, entry in COV.items():
fnf, fnh, lf, lh, brf, brh = entry
if fnf != fnh or lf != lh:
print("Incomplete:", fname)
status = 1
sys.exit(status)
|
|
5622b1c0ad7708b1d3df72da9caa888d111f6156
|
greatbigcrane/job_queue/management/commands/job_server.py
|
greatbigcrane/job_queue/management/commands/job_server.py
|
import Queue
import zmq
addr = 'tcp://127.0.0.1:5555'
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REP) # Receives job requests from application server
socket.bind(addr)
jobs = Queue.Queue()
print("Job Server Is Running")
while True:
request = socket.recv()
if request == "GET":
# Request came from a worker send it the next available job
try:
job = jobs.get_nowait()
socket.send(job)
except Queue.Empty, e:
socket.send("EMPTY")
else:
# Request came from the django app, queue the job
jobs.put(request)
socket.send("ACK")
|
Move the job server into a django management command.
|
Move the job server into a django management command.
|
Python
|
apache-2.0
|
pnomolos/greatbigcrane,pnomolos/greatbigcrane
|
Move the job server into a django management command.
|
import Queue
import zmq
addr = 'tcp://127.0.0.1:5555'
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REP) # Receives job requests from application server
socket.bind(addr)
jobs = Queue.Queue()
print("Job Server Is Running")
while True:
request = socket.recv()
if request == "GET":
# Request came from a worker send it the next available job
try:
job = jobs.get_nowait()
socket.send(job)
except Queue.Empty, e:
socket.send("EMPTY")
else:
# Request came from the django app, queue the job
jobs.put(request)
socket.send("ACK")
|
<commit_before><commit_msg>Move the job server into a django management command.<commit_after>
|
import Queue
import zmq
addr = 'tcp://127.0.0.1:5555'
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REP) # Receives job requests from application server
socket.bind(addr)
jobs = Queue.Queue()
print("Job Server Is Running")
while True:
request = socket.recv()
if request == "GET":
# Request came from a worker send it the next available job
try:
job = jobs.get_nowait()
socket.send(job)
except Queue.Empty, e:
socket.send("EMPTY")
else:
# Request came from the django app, queue the job
jobs.put(request)
socket.send("ACK")
|
Move the job server into a django management command.import Queue
import zmq
addr = 'tcp://127.0.0.1:5555'
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REP) # Receives job requests from application server
socket.bind(addr)
jobs = Queue.Queue()
print("Job Server Is Running")
while True:
request = socket.recv()
if request == "GET":
# Request came from a worker send it the next available job
try:
job = jobs.get_nowait()
socket.send(job)
except Queue.Empty, e:
socket.send("EMPTY")
else:
# Request came from the django app, queue the job
jobs.put(request)
socket.send("ACK")
|
<commit_before><commit_msg>Move the job server into a django management command.<commit_after>import Queue
import zmq
addr = 'tcp://127.0.0.1:5555'
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle(self, **options):
context = zmq.Context()
socket = context.socket(zmq.REP) # Receives job requests from application server
socket.bind(addr)
jobs = Queue.Queue()
print("Job Server Is Running")
while True:
request = socket.recv()
if request == "GET":
# Request came from a worker send it the next available job
try:
job = jobs.get_nowait()
socket.send(job)
except Queue.Empty, e:
socket.send("EMPTY")
else:
# Request came from the django app, queue the job
jobs.put(request)
socket.send("ACK")
|
|
9539a6874af98e437cc17ba04c7f0bdfd0a81c5c
|
cltk/tokenize/utils.py
|
cltk/tokenize/utils.py
|
""" Tokenization utilities
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
import pickle
from abc import abstractmethod
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.corpus.latin.readers import latinlibrary
from cltk.tokenize.latin.params import ABBREVIATIONS
class BaseSentenceTokenizerTrainer(object):
""" Train sentence tokenizer
"""
def __init__(self, language=None):
""" Initialize stoplist builder with option for language specific parameters
:type language: str
:param language : text from which to build the stoplist
"""
if language:
self.language = language.lower()
def _tokenizer_setup(self):
self.punctuation = []
self.strict = []
def pickle_sentence_tokenizer(self, filename, tokenizer):
# Dump pickled tokenizer
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f)
def train_sentence_tokenizer(self, text, punctuation=[], strict=[]):
"""
Train sentence tokenizer.
"""
self._tokenizer_setup()
if punctuation:
self.punctuation = punctuation
if strict:
self.strict = strict
# Set punctuation
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = self.punctuation+self.strict
# Set abbreviations
trainer = PunktTrainer(text, language_punkt_vars)
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
tokenizer = PunktSentenceTokenizer(trainer.get_params())
for abbreviation in ABBREVIATIONS:
tokenizer._params.abbrev_types.add(abbreviation)
return tokenizer
|
Add utilities file for tokenize
|
Add utilities file for tokenize
|
Python
|
mit
|
TylerKirby/cltk,diyclassics/cltk,TylerKirby/cltk,kylepjohnson/cltk,cltk/cltk,D-K-E/cltk
|
Add utilities file for tokenize
|
""" Tokenization utilities
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
import pickle
from abc import abstractmethod
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.corpus.latin.readers import latinlibrary
from cltk.tokenize.latin.params import ABBREVIATIONS
class BaseSentenceTokenizerTrainer(object):
""" Train sentence tokenizer
"""
def __init__(self, language=None):
""" Initialize stoplist builder with option for language specific parameters
:type language: str
:param language : text from which to build the stoplist
"""
if language:
self.language = language.lower()
def _tokenizer_setup(self):
self.punctuation = []
self.strict = []
def pickle_sentence_tokenizer(self, filename, tokenizer):
# Dump pickled tokenizer
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f)
def train_sentence_tokenizer(self, text, punctuation=[], strict=[]):
"""
Train sentence tokenizer.
"""
self._tokenizer_setup()
if punctuation:
self.punctuation = punctuation
if strict:
self.strict = strict
# Set punctuation
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = self.punctuation+self.strict
# Set abbreviations
trainer = PunktTrainer(text, language_punkt_vars)
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
tokenizer = PunktSentenceTokenizer(trainer.get_params())
for abbreviation in ABBREVIATIONS:
tokenizer._params.abbrev_types.add(abbreviation)
return tokenizer
|
<commit_before><commit_msg>Add utilities file for tokenize<commit_after>
|
""" Tokenization utilities
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
import pickle
from abc import abstractmethod
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.corpus.latin.readers import latinlibrary
from cltk.tokenize.latin.params import ABBREVIATIONS
class BaseSentenceTokenizerTrainer(object):
""" Train sentence tokenizer
"""
def __init__(self, language=None):
""" Initialize stoplist builder with option for language specific parameters
:type language: str
:param language : text from which to build the stoplist
"""
if language:
self.language = language.lower()
def _tokenizer_setup(self):
self.punctuation = []
self.strict = []
def pickle_sentence_tokenizer(self, filename, tokenizer):
# Dump pickled tokenizer
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f)
def train_sentence_tokenizer(self, text, punctuation=[], strict=[]):
"""
Train sentence tokenizer.
"""
self._tokenizer_setup()
if punctuation:
self.punctuation = punctuation
if strict:
self.strict = strict
# Set punctuation
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = self.punctuation+self.strict
# Set abbreviations
trainer = PunktTrainer(text, language_punkt_vars)
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
tokenizer = PunktSentenceTokenizer(trainer.get_params())
for abbreviation in ABBREVIATIONS:
tokenizer._params.abbrev_types.add(abbreviation)
return tokenizer
|
Add utilities file for tokenize""" Tokenization utilities
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
import pickle
from abc import abstractmethod
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.corpus.latin.readers import latinlibrary
from cltk.tokenize.latin.params import ABBREVIATIONS
class BaseSentenceTokenizerTrainer(object):
""" Train sentence tokenizer
"""
def __init__(self, language=None):
""" Initialize stoplist builder with option for language specific parameters
:type language: str
:param language : text from which to build the stoplist
"""
if language:
self.language = language.lower()
def _tokenizer_setup(self):
self.punctuation = []
self.strict = []
def pickle_sentence_tokenizer(self, filename, tokenizer):
# Dump pickled tokenizer
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f)
def train_sentence_tokenizer(self, text, punctuation=[], strict=[]):
"""
Train sentence tokenizer.
"""
self._tokenizer_setup()
if punctuation:
self.punctuation = punctuation
if strict:
self.strict = strict
# Set punctuation
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = self.punctuation+self.strict
# Set abbreviations
trainer = PunktTrainer(text, language_punkt_vars)
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
tokenizer = PunktSentenceTokenizer(trainer.get_params())
for abbreviation in ABBREVIATIONS:
tokenizer._params.abbrev_types.add(abbreviation)
return tokenizer
|
<commit_before><commit_msg>Add utilities file for tokenize<commit_after>""" Tokenization utilities
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
import pickle
from abc import abstractmethod
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.corpus.latin.readers import latinlibrary
from cltk.tokenize.latin.params import ABBREVIATIONS
class BaseSentenceTokenizerTrainer(object):
""" Train sentence tokenizer
"""
def __init__(self, language=None):
""" Initialize stoplist builder with option for language specific parameters
:type language: str
:param language : text from which to build the stoplist
"""
if language:
self.language = language.lower()
def _tokenizer_setup(self):
self.punctuation = []
self.strict = []
def pickle_sentence_tokenizer(self, filename, tokenizer):
# Dump pickled tokenizer
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f)
def train_sentence_tokenizer(self, text, punctuation=[], strict=[]):
"""
Train sentence tokenizer.
"""
self._tokenizer_setup()
if punctuation:
self.punctuation = punctuation
if strict:
self.strict = strict
# Set punctuation
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = self.punctuation+self.strict
# Set abbreviations
trainer = PunktTrainer(text, language_punkt_vars)
trainer.INCLUDE_ALL_COLLOCS = True
trainer.INCLUDE_ABBREV_COLLOCS = True
tokenizer = PunktSentenceTokenizer(trainer.get_params())
for abbreviation in ABBREVIATIONS:
tokenizer._params.abbrev_types.add(abbreviation)
return tokenizer
|
|
5000a069b0a7ab32eefafc7acef64946450837cb
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='',
version='0.1',
description='',
url='',
author='',
author_email='',
packages=[''],
install_requires=[
'numpy',
'biopython'
],
zip_safe=False)
|
Test install numpy and biopython for RTD
|
Test install numpy and biopython for RTD
|
Python
|
mit
|
m4rx9/rna-pdb-tools,m4rx9/rna-pdb-tools
|
Test install numpy and biopython for RTD
|
from setuptools import setup
setup(name='',
version='0.1',
description='',
url='',
author='',
author_email='',
packages=[''],
install_requires=[
'numpy',
'biopython'
],
zip_safe=False)
|
<commit_before><commit_msg>Test install numpy and biopython for RTD<commit_after>
|
from setuptools import setup
setup(name='',
version='0.1',
description='',
url='',
author='',
author_email='',
packages=[''],
install_requires=[
'numpy',
'biopython'
],
zip_safe=False)
|
Test install numpy and biopython for RTDfrom setuptools import setup
setup(name='',
version='0.1',
description='',
url='',
author='',
author_email='',
packages=[''],
install_requires=[
'numpy',
'biopython'
],
zip_safe=False)
|
<commit_before><commit_msg>Test install numpy and biopython for RTD<commit_after>from setuptools import setup
setup(name='',
version='0.1',
description='',
url='',
author='',
author_email='',
packages=[''],
install_requires=[
'numpy',
'biopython'
],
zip_safe=False)
|
|
bcbf11a44074d3c027d523e97d274f7838969d65
|
tests.py
|
tests.py
|
#!/usr/bin/python -O
import sqlite3
from parser import SQLITE3_DB_NAME
from random import randint, randrange
def main():
""" Ouputs a random clue (with game ID) from 10 random games for checking. """
sql = sqlite3.connect(SQLITE3_DB_NAME)
# list of random game id numbers
gids = [randint(1, 3790) for i in xrange(10)]
# output format
print "GID".rjust(5), "R -> Clue text -> Answer"
for gid in gids:
rows = sql.execute("select * from clues where game = ?", (gid, ))
rows = rows.fetchall()
# some games were skipped over
if len(rows) > 0:
meta = "#%d" % gid
print meta.rjust(5),
row = randrange(0, len(rows))
print rows[row][2], "->", rows[row][5], "->", rows[row][6]
if __name__ == "__main__":
main()
|
Test to check the db.
|
Test to check the db.
|
Python
|
mit
|
whymarrh/jeopardy-parser,dangoldin/jeopardy-parser,dangoldin/jeopardy-parser
|
Test to check the db.
|
#!/usr/bin/python -O
import sqlite3
from parser import SQLITE3_DB_NAME
from random import randint, randrange
def main():
""" Ouputs a random clue (with game ID) from 10 random games for checking. """
sql = sqlite3.connect(SQLITE3_DB_NAME)
# list of random game id numbers
gids = [randint(1, 3790) for i in xrange(10)]
# output format
print "GID".rjust(5), "R -> Clue text -> Answer"
for gid in gids:
rows = sql.execute("select * from clues where game = ?", (gid, ))
rows = rows.fetchall()
# some games were skipped over
if len(rows) > 0:
meta = "#%d" % gid
print meta.rjust(5),
row = randrange(0, len(rows))
print rows[row][2], "->", rows[row][5], "->", rows[row][6]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Test to check the db.<commit_after>
|
#!/usr/bin/python -O
import sqlite3
from parser import SQLITE3_DB_NAME
from random import randint, randrange
def main():
""" Ouputs a random clue (with game ID) from 10 random games for checking. """
sql = sqlite3.connect(SQLITE3_DB_NAME)
# list of random game id numbers
gids = [randint(1, 3790) for i in xrange(10)]
# output format
print "GID".rjust(5), "R -> Clue text -> Answer"
for gid in gids:
rows = sql.execute("select * from clues where game = ?", (gid, ))
rows = rows.fetchall()
# some games were skipped over
if len(rows) > 0:
meta = "#%d" % gid
print meta.rjust(5),
row = randrange(0, len(rows))
print rows[row][2], "->", rows[row][5], "->", rows[row][6]
if __name__ == "__main__":
main()
|
Test to check the db.#!/usr/bin/python -O
import sqlite3
from parser import SQLITE3_DB_NAME
from random import randint, randrange
def main():
""" Ouputs a random clue (with game ID) from 10 random games for checking. """
sql = sqlite3.connect(SQLITE3_DB_NAME)
# list of random game id numbers
gids = [randint(1, 3790) for i in xrange(10)]
# output format
print "GID".rjust(5), "R -> Clue text -> Answer"
for gid in gids:
rows = sql.execute("select * from clues where game = ?", (gid, ))
rows = rows.fetchall()
# some games were skipped over
if len(rows) > 0:
meta = "#%d" % gid
print meta.rjust(5),
row = randrange(0, len(rows))
print rows[row][2], "->", rows[row][5], "->", rows[row][6]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Test to check the db.<commit_after>#!/usr/bin/python -O
import sqlite3
from parser import SQLITE3_DB_NAME
from random import randint, randrange
def main():
""" Ouputs a random clue (with game ID) from 10 random games for checking. """
sql = sqlite3.connect(SQLITE3_DB_NAME)
# list of random game id numbers
gids = [randint(1, 3790) for i in xrange(10)]
# output format
print "GID".rjust(5), "R -> Clue text -> Answer"
for gid in gids:
rows = sql.execute("select * from clues where game = ?", (gid, ))
rows = rows.fetchall()
# some games were skipped over
if len(rows) > 0:
meta = "#%d" % gid
print meta.rjust(5),
row = randrange(0, len(rows))
print rows[row][2], "->", rows[row][5], "->", rows[row][6]
if __name__ == "__main__":
main()
|
|
1cd9e2ececc47eddf5089596b35797ec4e7562de
|
test/merge_test.py
|
test/merge_test.py
|
import unittest, pyPdf, sys, os.path
from mock import Mock
SRC = os.path.join(os.path.dirname(__file__), '..', 'src')
sys.path.append(SRC)
import merge
class MockPdfReader:
def __init__(self):
self.pages = [None] * 3
def getNumPages(self):
return len(self.pages)
def getPage(self, page_num): pass
class MockPdfWriter:
def __init__(self):
self.pages = []
def write(self, a_file): pass
def addPage(self, page): self.pages.append(page)
class MergeTest(unittest.TestCase):
def setUp(self):
# Stub the global open method inside the merge module
merge.open = Mock(return_value=True)
self.front_pages = MockPdfReader()
self.back_pages = MockPdfReader()
self.outfile = MockPdfWriter()
merge.PdfFileReader = Mock(side_effect=[self.front_pages, self.back_pages])
merge.PdfFileWriter = Mock(return_value=self.outfile)
def test_merged_file_contains_all_pages(self):
merge.merge('fake_doc1', 'fake_doc2', 'fake_out', True, False)
expected_len = len(self.front_pages.pages) + len(self.back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
if __name__ == '__main__':
unittest.main()
|
Create test class for merge function
|
Create test class for merge function
|
Python
|
bsd-2-clause
|
mgarriott/PDFMerger
|
Create test class for merge function
|
import unittest, pyPdf, sys, os.path
from mock import Mock
SRC = os.path.join(os.path.dirname(__file__), '..', 'src')
sys.path.append(SRC)
import merge
class MockPdfReader:
def __init__(self):
self.pages = [None] * 3
def getNumPages(self):
return len(self.pages)
def getPage(self, page_num): pass
class MockPdfWriter:
def __init__(self):
self.pages = []
def write(self, a_file): pass
def addPage(self, page): self.pages.append(page)
class MergeTest(unittest.TestCase):
def setUp(self):
# Stub the global open method inside the merge module
merge.open = Mock(return_value=True)
self.front_pages = MockPdfReader()
self.back_pages = MockPdfReader()
self.outfile = MockPdfWriter()
merge.PdfFileReader = Mock(side_effect=[self.front_pages, self.back_pages])
merge.PdfFileWriter = Mock(return_value=self.outfile)
def test_merged_file_contains_all_pages(self):
merge.merge('fake_doc1', 'fake_doc2', 'fake_out', True, False)
expected_len = len(self.front_pages.pages) + len(self.back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test class for merge function<commit_after>
|
import unittest, pyPdf, sys, os.path
from mock import Mock
SRC = os.path.join(os.path.dirname(__file__), '..', 'src')
sys.path.append(SRC)
import merge
class MockPdfReader:
def __init__(self):
self.pages = [None] * 3
def getNumPages(self):
return len(self.pages)
def getPage(self, page_num): pass
class MockPdfWriter:
def __init__(self):
self.pages = []
def write(self, a_file): pass
def addPage(self, page): self.pages.append(page)
class MergeTest(unittest.TestCase):
def setUp(self):
# Stub the global open method inside the merge module
merge.open = Mock(return_value=True)
self.front_pages = MockPdfReader()
self.back_pages = MockPdfReader()
self.outfile = MockPdfWriter()
merge.PdfFileReader = Mock(side_effect=[self.front_pages, self.back_pages])
merge.PdfFileWriter = Mock(return_value=self.outfile)
def test_merged_file_contains_all_pages(self):
merge.merge('fake_doc1', 'fake_doc2', 'fake_out', True, False)
expected_len = len(self.front_pages.pages) + len(self.back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
if __name__ == '__main__':
unittest.main()
|
Create test class for merge functionimport unittest, pyPdf, sys, os.path
from mock import Mock
SRC = os.path.join(os.path.dirname(__file__), '..', 'src')
sys.path.append(SRC)
import merge
class MockPdfReader:
def __init__(self):
self.pages = [None] * 3
def getNumPages(self):
return len(self.pages)
def getPage(self, page_num): pass
class MockPdfWriter:
def __init__(self):
self.pages = []
def write(self, a_file): pass
def addPage(self, page): self.pages.append(page)
class MergeTest(unittest.TestCase):
def setUp(self):
# Stub the global open method inside the merge module
merge.open = Mock(return_value=True)
self.front_pages = MockPdfReader()
self.back_pages = MockPdfReader()
self.outfile = MockPdfWriter()
merge.PdfFileReader = Mock(side_effect=[self.front_pages, self.back_pages])
merge.PdfFileWriter = Mock(return_value=self.outfile)
def test_merged_file_contains_all_pages(self):
merge.merge('fake_doc1', 'fake_doc2', 'fake_out', True, False)
expected_len = len(self.front_pages.pages) + len(self.back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test class for merge function<commit_after>import unittest, pyPdf, sys, os.path
from mock import Mock
SRC = os.path.join(os.path.dirname(__file__), '..', 'src')
sys.path.append(SRC)
import merge
class MockPdfReader:
def __init__(self):
self.pages = [None] * 3
def getNumPages(self):
return len(self.pages)
def getPage(self, page_num): pass
class MockPdfWriter:
def __init__(self):
self.pages = []
def write(self, a_file): pass
def addPage(self, page): self.pages.append(page)
class MergeTest(unittest.TestCase):
def setUp(self):
# Stub the global open method inside the merge module
merge.open = Mock(return_value=True)
self.front_pages = MockPdfReader()
self.back_pages = MockPdfReader()
self.outfile = MockPdfWriter()
merge.PdfFileReader = Mock(side_effect=[self.front_pages, self.back_pages])
merge.PdfFileWriter = Mock(return_value=self.outfile)
def test_merged_file_contains_all_pages(self):
merge.merge('fake_doc1', 'fake_doc2', 'fake_out', True, False)
expected_len = len(self.front_pages.pages) + len(self.back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
if __name__ == '__main__':
unittest.main()
|
|
8ed2ef198b5b28f7d4661ea9c50e5076273b6c97
|
CodeFights/alphabeticShift.py
|
CodeFights/alphabeticShift.py
|
#!/usr/local/bin/python
# Code Fights Alternating Sums Problem
def alphabeticShift(inputString):
test = [chr((ord(c) - 96) % 26 + 97) for c in inputString]
return ''.join(test)
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = alphabeticShift(t[0])
if t[1] == res:
print("PASSED: alphabeticShift({}) returned {}"
.format(t[0], res))
else:
print("FAILED: alphabeticShift({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights alphabetic shift problem
|
Solve Code Fights alphabetic shift problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights alphabetic shift problem
|
#!/usr/local/bin/python
# Code Fights Alternating Sums Problem
def alphabeticShift(inputString):
test = [chr((ord(c) - 96) % 26 + 97) for c in inputString]
return ''.join(test)
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = alphabeticShift(t[0])
if t[1] == res:
print("PASSED: alphabeticShift({}) returned {}"
.format(t[0], res))
else:
print("FAILED: alphabeticShift({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights alphabetic shift problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Alternating Sums Problem
def alphabeticShift(inputString):
test = [chr((ord(c) - 96) % 26 + 97) for c in inputString]
return ''.join(test)
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = alphabeticShift(t[0])
if t[1] == res:
print("PASSED: alphabeticShift({}) returned {}"
.format(t[0], res))
else:
print("FAILED: alphabeticShift({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights alphabetic shift problem#!/usr/local/bin/python
# Code Fights Alternating Sums Problem
def alphabeticShift(inputString):
test = [chr((ord(c) - 96) % 26 + 97) for c in inputString]
return ''.join(test)
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = alphabeticShift(t[0])
if t[1] == res:
print("PASSED: alphabeticShift({}) returned {}"
.format(t[0], res))
else:
print("FAILED: alphabeticShift({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights alphabetic shift problem<commit_after>#!/usr/local/bin/python
# Code Fights Alternating Sums Problem
def alphabeticShift(inputString):
test = [chr((ord(c) - 96) % 26 + 97) for c in inputString]
return ''.join(test)
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = alphabeticShift(t[0])
if t[1] == res:
print("PASSED: alphabeticShift({}) returned {}"
.format(t[0], res))
else:
print("FAILED: alphabeticShift({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
a45d3def93e78d9fc26fadf27d83d5a4de44ddc4
|
playserver/trackchecker.py
|
playserver/trackchecker.py
|
from threading import Timer
import track
class TrackChecker():
currentSong = ""
currentArtist = ""
currentAlbum = ""
timer = Timer(interval, checkSong)
listeners = []
@staticmethod
def checkSong():
song = track.getCurrentSong()
artist = track.getCurrentArtist()
album = track.getCurrentAlbum()
if (song != currentSong or artist != currentArtist
or album != currentAlbum):
currentSong = song
currentArtist = artist
currentAlbum = album
_callListeners()
@staticmethod
def registerListener(func):
listeners.append(func)
@staticmethod
def _callListeners():
for listener in listeners:
listener(currentSong, currentArtist, currentAlbum)
|
Add TrackChecker to allow checking of changing tracks
|
Add TrackChecker to allow checking of changing tracks
|
Python
|
mit
|
ollien/playserver,ollien/playserver,ollien/playserver
|
Add TrackChecker to allow checking of changing tracks
|
from threading import Timer
import track
class TrackChecker():
currentSong = ""
currentArtist = ""
currentAlbum = ""
timer = Timer(interval, checkSong)
listeners = []
@staticmethod
def checkSong():
song = track.getCurrentSong()
artist = track.getCurrentArtist()
album = track.getCurrentAlbum()
if (song != currentSong or artist != currentArtist
or album != currentAlbum):
currentSong = song
currentArtist = artist
currentAlbum = album
_callListeners()
@staticmethod
def registerListener(func):
listeners.append(func)
@staticmethod
def _callListeners():
for listener in listeners:
listener(currentSong, currentArtist, currentAlbum)
|
<commit_before><commit_msg>Add TrackChecker to allow checking of changing tracks<commit_after>
|
from threading import Timer
import track
class TrackChecker():
currentSong = ""
currentArtist = ""
currentAlbum = ""
timer = Timer(interval, checkSong)
listeners = []
@staticmethod
def checkSong():
song = track.getCurrentSong()
artist = track.getCurrentArtist()
album = track.getCurrentAlbum()
if (song != currentSong or artist != currentArtist
or album != currentAlbum):
currentSong = song
currentArtist = artist
currentAlbum = album
_callListeners()
@staticmethod
def registerListener(func):
listeners.append(func)
@staticmethod
def _callListeners():
for listener in listeners:
listener(currentSong, currentArtist, currentAlbum)
|
Add TrackChecker to allow checking of changing tracksfrom threading import Timer
import track
class TrackChecker():
currentSong = ""
currentArtist = ""
currentAlbum = ""
timer = Timer(interval, checkSong)
listeners = []
@staticmethod
def checkSong():
song = track.getCurrentSong()
artist = track.getCurrentArtist()
album = track.getCurrentAlbum()
if (song != currentSong or artist != currentArtist
or album != currentAlbum):
currentSong = song
currentArtist = artist
currentAlbum = album
_callListeners()
@staticmethod
def registerListener(func):
listeners.append(func)
@staticmethod
def _callListeners():
for listener in listeners:
listener(currentSong, currentArtist, currentAlbum)
|
<commit_before><commit_msg>Add TrackChecker to allow checking of changing tracks<commit_after>from threading import Timer
import track
class TrackChecker():
currentSong = ""
currentArtist = ""
currentAlbum = ""
timer = Timer(interval, checkSong)
listeners = []
@staticmethod
def checkSong():
song = track.getCurrentSong()
artist = track.getCurrentArtist()
album = track.getCurrentAlbum()
if (song != currentSong or artist != currentArtist
or album != currentAlbum):
currentSong = song
currentArtist = artist
currentAlbum = album
_callListeners()
@staticmethod
def registerListener(func):
listeners.append(func)
@staticmethod
def _callListeners():
for listener in listeners:
listener(currentSong, currentArtist, currentAlbum)
|
|
842064d8b4852232c19df9d176fb50a0c302d867
|
update-manifest.py
|
update-manifest.py
|
#!/usr/bin/env python
import os
import re
repo = os.path.dirname(os.path.realpath(__file__))
code = 'android:versionCode="%s"'
name = 'android:versionName="%s"'
in_code = code % r'(\d+)'
in_name = name % r'([^"]+)'
new_code = None
new_name = None
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
if filename == 'AndroidManifest.xml':
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
contents = f.read()
if new_code is None:
print('Current version code: ' + re.search(in_code, contents).group(1))
new_code = raw_input('New version code: ')
print('Current version name: ' + re.search(in_name, contents).group(1))
new_name = raw_input('New version name: ')
contents = re.sub(in_code, code % new_code, contents)
contents = re.sub(in_name, name % new_name, contents)
with open(filepath, 'w') as f:
f.write(contents)
|
Add super-simple automatic AndroidManifest.xml updater.
|
Add super-simple automatic AndroidManifest.xml updater.
|
Python
|
apache-2.0
|
msdgwzhy6/ActionBarSherlock,caobaibing/ActionBarSherlock,msdgwzhy6/ActionBarSherlock,ubreader/ActionBarSherlock,msdgwzhy6/ActionBarSherlock,mercadolibre/ActionBarSherlock,mxn21/ActionBarSherlock,vimalrajpara2006/Lib-Droid-ActionbarSherlock,zhaokidd/ActionBarSherlock,zhupengGitHub/ActionBarSherlock,SpeedSolutions/ActionBarSherlock,MarkMjw/ActionBarSherlock,Xomo/ActionBarSherlock-v4,ftdWang/ActionBarSherlock,SunnyLy/ActionBarSherlock,caobaibing/ActionBarSherlock,chengkaizone/ActionBarSherlock,Madioter/ActionBarSherlock,beshkenadze/ActionBarSherlock,gaojinhua/ActionBarSherlock,cuckata23/ActionBarSherlock,lzheng571/ActionBarSherlock,charlialiang/ActionBarSherlock,ywang2014/ActionBarSherlock,ftdWang/ActionBarSherlock,zhanggangbz/ActionBarSherlock,zhupengGitHub/ActionBarSherlock,GeekHades/ActionBarSherlock,kuailexs/ActionBarSherlock,walkinghorse/ActionBarSherlock,chengkaizone/ActionBarSherlock,amirul10022/ActionBarSampleProject,AppFellas/ActionBarSherlockGradle,yummy222/ActionBarSherlock,DreamRoad/ActionBarSherlock,charlialiang/ActionBarSherlock,vimalrajpara2006/Lib-Droid-ActionbarSherlock,mxn21/ActionBarSherlock,lauren7249/ActionBarSherlock,ftdWang/ActionBarSherlock,bupthcp/ActionBarSherlock,lzy-h2o2/ActionBarSherlock,yoome/ActionBarSherlock,kevinsawicki/ActionBarSherlock,RyanLee7/ActionBarSherlock,newcoderzhang/ActionBarSherlock,Xomo/ActionBarSherlock,Metaswitch/ActionBarSherlock,sunzongju/ActionBarSherlock,Xomo/ActionBarSherlock,danDingCongRong/ActionBarSherlock,hgl888/ActionBarSherlock,zhaoyuqing/ActionBarSherlock,cncomer/ActionBarSherlock,zhaoyuqing/SlidingMenu,pixmob/freemobilenetstat,RyanLee7/ActionBarSherlock,HamzaHasan90/ActionBarSherlock,liuzwei/ActionBarSherlock,zhaokidd/ActionBarSherlock,osoft/ActionBarSherlock,zhaoyuqing/SlidingMenu,wenwenhappy/ActionBarSherlock,worker8/actionbarsherlock_fork,18611480882/ActionBarSherlock,Gigithecode/ActionBarSherlock,sharpdeep/ActionBarSherlock,DreamRoad/ActionBarSherlock,the-diamond-dogs-group-oss/ActionBarSherlock,gaojinhua/ActionBarSherlock,mxm2005/ActionBarSherlock,sharpdeep/ActionBarSherlock,codersplatform/android,mxm2005/ActionBarSherlock,danDingCongRong/ActionBarSherlock,yummy222/ActionBarSherlock,kangdawei/ActionBarSherlock,kevinsawicki/ActionBarSherlock,maxi182/ActionBarSherlock,jameswald/ActionBarSherlock,mikandi/ActionBarSherlock,zhaokidd/ActionBarSherlock,androidgilbert/ActionBarSherlock,AppFellas/ActionBarSherlockGradle,pixmob/httpclient,GeekHades/ActionBarSherlock,vimalrajpara2006/Lib-Droid-ActionbarSherlock,tonycheng93/ActionBarSherlock,nizamsikder/ActionBarSherlock,VladislavLipskiy/ActionBarSherlock,osoft/ActionBarSherlock,rao1219/ActionBarSherlock,MarkMjw/ActionBarSherlock,DavidWangTM/ActionBarSherlock,beamly/ActionBarSherlock,MarkMjw/ActionBarSherlock,jameswald/ActionBarSherlock,androidgilbert/ActionBarSherlock,Liyue1314/ActionBarSherlock,SunnyLy/ActionBarSherlock,practo/ActionBarSherlock,zhupengGitHub/ActionBarSherlock,hgl888/ActionBarSherlock,Liyue1314/ActionBarSherlock,cncomer/ActionBarSherlock,mxn21/ActionBarSherlock,mikandi/ActionBarSherlock,wenwenhappy/ActionBarSherlock,Madioter/ActionBarSherlock,nizamsikder/ActionBarSherlock,cuckata23/ActionBarSherlock,JakeWharton/ActionBarSherlock,yummy222/ActionBarSherlock,wenwenhappy/ActionBarSherlock,mikandi/ActionBarSherlock,bumptech/ActionBarSherlock,r3gis3r/ActionBarSherlock,maxi182/ActionBarSherlock,tonycheng93/ActionBarSherlock,DavidWangTM/ActionBarSherlock,caobaibing/ActionBarSherlock,VladislavLipskiy/ActionBarSherlock,Liyue1314/ActionBarSherlock,amirul10022/ActionBarSampleProject,kangdawei/ActionBarSherlock,danDingCongRong/ActionBarSherlock,maxi182/ActionBarSherlock,Metaswitch/ActionBarSherlock,cloud9IM/ActionBarSherlock,DavidWangTM/ActionBarSherlock,androidgilbert/ActionBarSherlock,JakeWharton/ActionBarSherlock,HamzaHasan90/ActionBarSherlock,zhangyihao/ActionBarSherlock,fengnanyue/ActionBarSherlock,kangdawei/ActionBarSherlock,mxn21/ActionBarSherlock,lauren7249/ActionBarSherlock,Xomo/ActionBarSherlock,ftdWang/ActionBarSherlock,gaojinhua/ActionBarSherlock,rao1219/ActionBarSherlock,newcoderzhang/ActionBarSherlock,lzheng571/ActionBarSherlock,GeekHades/ActionBarSherlock,mxm2005/ActionBarSherlock,bupthcp/ActionBarSherlock,SpeedSolutions/ActionBarSherlock,beamly/ActionBarSherlock,pixmob/httpclient,charlialiang/ActionBarSherlock,zhaoyuqing/ActionBarSherlock,chengkaizone/ActionBarSherlock,worker8/actionbarsherlock_fork,perrystreetsoftware/ActionBarSherlock,newcoderzhang/ActionBarSherlock,liuzwei/ActionBarSherlock,lzy-h2o2/ActionBarSherlock,cuckata23/ActionBarSherlock,GrioSF/ActionBarSherlock-grio,codersplatform/android,danDingCongRong/ActionBarSherlock,tonycheng93/ActionBarSherlock,walkinghorse/ActionBarSherlock,sunzongju/ActionBarSherlock,walkinghorse/ActionBarSherlock,rao1219/ActionBarSherlock,mlabraca/ActionBarSherlock,lyxwll/ActionBarSherlock,r3gis3r/ActionBarSherlock,msdgwzhy6/ActionBarSherlock,GrioSF/ActionBarSherlock-grio,yihr/ActionBarSherlock,GrioSF/ActionBarSherlock-grio,ubreader/ActionBarSherlock,zhangyihao/ActionBarSherlock,caobaibing/ActionBarSherlock,bumptech/ActionBarSherlock,GeekHades/ActionBarSherlock,beamly/ActionBarSherlock,the-diamond-dogs-group-oss/ActionBarSherlock,newcoderzhang/ActionBarSherlock,ywang2014/ActionBarSherlock,osoft/ActionBarSherlock,DavidWangTM/ActionBarSherlock,Gigithecode/ActionBarSherlock,mikandi/ActionBarSherlock,beshkenadze/ActionBarSherlock,VladislavLipskiy/ActionBarSherlock,SpeedSolutions/ActionBarSherlock,walkinghorse/ActionBarSherlock,androidgilbert/ActionBarSherlock,liuzwei/ActionBarSherlock,Gigithecode/ActionBarSherlock,hgl888/ActionBarSherlock,yihr/ActionBarSherlock,RyanLee7/ActionBarSherlock,zhanggangbz/ActionBarSherlock,zhaoyuqing/ActionBarSherlock,codersplatform/android,lzheng571/ActionBarSherlock,rao1219/ActionBarSherlock,zhangyihao/ActionBarSherlock,sunzongju/ActionBarSherlock,ywang2014/ActionBarSherlock,mlabraca/ActionBarSherlock,charlialiang/ActionBarSherlock,18611480882/ActionBarSherlock,sunzongju/ActionBarSherlock,zhupengGitHub/ActionBarSherlock,lyxwll/ActionBarSherlock,nizamsikder/ActionBarSherlock,chengkaizone/ActionBarSherlock,zhaoyuqing/SlidingMenu,the-diamond-dogs-group-oss/ActionBarSherlock,yoome/ActionBarSherlock,lauren7249/ActionBarSherlock,zhanggangbz/ActionBarSherlock,perrystreetsoftware/ActionBarSherlock,zhangyihao/ActionBarSherlock,SpeedSolutions/ActionBarSherlock,MarkMjw/ActionBarSherlock,mxm2005/ActionBarSherlock,yoome/ActionBarSherlock,18611480882/ActionBarSherlock,lzheng571/ActionBarSherlock,SunnyLy/ActionBarSherlock,ubreader/ActionBarSherlock,beshkenadze/ActionBarSherlock,tonycheng93/ActionBarSherlock,kangdawei/ActionBarSherlock,cncomer/ActionBarSherlock,cuckata23/ActionBarSherlock,practo/ActionBarSherlock,lyxwll/ActionBarSherlock,petedoyle/ActionBarSherlock,kuailexs/ActionBarSherlock,lzy-h2o2/ActionBarSherlock,12307/ActionBarSherlock,HamzaHasan90/ActionBarSherlock,nizamsikder/ActionBarSherlock,yihr/ActionBarSherlock,AppFellas/ActionBarSherlockGradle,fengnanyue/ActionBarSherlock,maxi182/ActionBarSherlock,yummy222/ActionBarSherlock,lyxwll/ActionBarSherlock,petedoyle/ActionBarSherlock,jameswald/ActionBarSherlock,cloud9IM/ActionBarSherlock,fengnanyue/ActionBarSherlock,lzy-h2o2/ActionBarSherlock,petedoyle/ActionBarSherlock,yihr/ActionBarSherlock,sharpdeep/ActionBarSherlock,12307/ActionBarSherlock,yoome/ActionBarSherlock,HamzaHasan90/ActionBarSherlock,codersplatform/android,gaojinhua/ActionBarSherlock,ywang2014/ActionBarSherlock,mlabraca/ActionBarSherlock,fengnanyue/ActionBarSherlock,wenwenhappy/ActionBarSherlock,adamsp/ActionBarSherlock,DreamRoad/ActionBarSherlock,kuailexs/ActionBarSherlock,12307/ActionBarSherlock,zhanggangbz/ActionBarSherlock,sharpdeep/ActionBarSherlock,Liyue1314/ActionBarSherlock,mercadolibre/ActionBarSherlock,hgl888/ActionBarSherlock,zhaokidd/ActionBarSherlock,Madioter/ActionBarSherlock,amirul10022/ActionBarSampleProject,amirul10022/ActionBarSampleProject,Madioter/ActionBarSherlock,ubreader/ActionBarSherlock,adamsp/ActionBarSherlock,kuailexs/ActionBarSherlock,Xomo/ActionBarSherlock-v4,Metaswitch/ActionBarSherlock,DreamRoad/ActionBarSherlock,worker8/actionbarsherlock_fork,perrystreetsoftware/ActionBarSherlock,osoft/ActionBarSherlock,JakeWharton/ActionBarSherlock,SunnyLy/ActionBarSherlock
|
Add super-simple automatic AndroidManifest.xml updater.
|
#!/usr/bin/env python
import os
import re
repo = os.path.dirname(os.path.realpath(__file__))
code = 'android:versionCode="%s"'
name = 'android:versionName="%s"'
in_code = code % r'(\d+)'
in_name = name % r'([^"]+)'
new_code = None
new_name = None
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
if filename == 'AndroidManifest.xml':
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
contents = f.read()
if new_code is None:
print('Current version code: ' + re.search(in_code, contents).group(1))
new_code = raw_input('New version code: ')
print('Current version name: ' + re.search(in_name, contents).group(1))
new_name = raw_input('New version name: ')
contents = re.sub(in_code, code % new_code, contents)
contents = re.sub(in_name, name % new_name, contents)
with open(filepath, 'w') as f:
f.write(contents)
|
<commit_before><commit_msg>Add super-simple automatic AndroidManifest.xml updater.<commit_after>
|
#!/usr/bin/env python
import os
import re
repo = os.path.dirname(os.path.realpath(__file__))
code = 'android:versionCode="%s"'
name = 'android:versionName="%s"'
in_code = code % r'(\d+)'
in_name = name % r'([^"]+)'
new_code = None
new_name = None
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
if filename == 'AndroidManifest.xml':
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
contents = f.read()
if new_code is None:
print('Current version code: ' + re.search(in_code, contents).group(1))
new_code = raw_input('New version code: ')
print('Current version name: ' + re.search(in_name, contents).group(1))
new_name = raw_input('New version name: ')
contents = re.sub(in_code, code % new_code, contents)
contents = re.sub(in_name, name % new_name, contents)
with open(filepath, 'w') as f:
f.write(contents)
|
Add super-simple automatic AndroidManifest.xml updater.#!/usr/bin/env python
import os
import re
repo = os.path.dirname(os.path.realpath(__file__))
code = 'android:versionCode="%s"'
name = 'android:versionName="%s"'
in_code = code % r'(\d+)'
in_name = name % r'([^"]+)'
new_code = None
new_name = None
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
if filename == 'AndroidManifest.xml':
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
contents = f.read()
if new_code is None:
print('Current version code: ' + re.search(in_code, contents).group(1))
new_code = raw_input('New version code: ')
print('Current version name: ' + re.search(in_name, contents).group(1))
new_name = raw_input('New version name: ')
contents = re.sub(in_code, code % new_code, contents)
contents = re.sub(in_name, name % new_name, contents)
with open(filepath, 'w') as f:
f.write(contents)
|
<commit_before><commit_msg>Add super-simple automatic AndroidManifest.xml updater.<commit_after>#!/usr/bin/env python
import os
import re
repo = os.path.dirname(os.path.realpath(__file__))
code = 'android:versionCode="%s"'
name = 'android:versionName="%s"'
in_code = code % r'(\d+)'
in_name = name % r'([^"]+)'
new_code = None
new_name = None
for dirpath, dirnames, filenames in os.walk(repo):
for filename in filenames:
if filename == 'AndroidManifest.xml':
filepath = os.path.join(dirpath, filename)
with open(filepath) as f:
contents = f.read()
if new_code is None:
print('Current version code: ' + re.search(in_code, contents).group(1))
new_code = raw_input('New version code: ')
print('Current version name: ' + re.search(in_name, contents).group(1))
new_name = raw_input('New version name: ')
contents = re.sub(in_code, code % new_code, contents)
contents = re.sub(in_name, name % new_name, contents)
with open(filepath, 'w') as f:
f.write(contents)
|
|
c94aefed7b260b7ae05c56f02253cba44f03b602
|
scripts/migrate_boxfiles.py
|
scripts/migrate_boxfiles.py
|
import logging
from website.app import init_app
from website.addons.box.model import BoxFile
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in BoxFile.find():
new_path = '/' + file.path.split('/')[1]
logger.info(u'{} -> {}'.format(file.path, new_path))
file.path = new_path
file.save()
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
Add a migration for box file guids
|
Add a migration for box file guids
|
Python
|
apache-2.0
|
brandonPurvis/osf.io,rdhyee/osf.io,mattclark/osf.io,alexschiller/osf.io,adlius/osf.io,TomHeatwole/osf.io,sbt9uc/osf.io,MerlinZhang/osf.io,petermalcolm/osf.io,mfraezz/osf.io,brandonPurvis/osf.io,aaxelb/osf.io,crcresearch/osf.io,sbt9uc/osf.io,adlius/osf.io,lyndsysimon/osf.io,ZobairAlijan/osf.io,cosenal/osf.io,laurenrevere/osf.io,rdhyee/osf.io,emetsger/osf.io,amyshi188/osf.io,HarryRybacki/osf.io,laurenrevere/osf.io,hmoco/osf.io,baylee-d/osf.io,crcresearch/osf.io,mfraezz/osf.io,jolene-esposito/osf.io,cslzchen/osf.io,mattclark/osf.io,arpitar/osf.io,caneruguz/osf.io,alexschiller/osf.io,chrisseto/osf.io,ticklemepierce/osf.io,zkraime/osf.io,jolene-esposito/osf.io,ZobairAlijan/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,abought/osf.io,leb2dg/osf.io,Nesiehr/osf.io,doublebits/osf.io,fabianvf/osf.io,hmoco/osf.io,KAsante95/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,samanehsan/osf.io,barbour-em/osf.io,caseyrollins/osf.io,njantrania/osf.io,bdyetton/prettychart,alexschiller/osf.io,jeffreyliu3230/osf.io,petermalcolm/osf.io,njantrania/osf.io,binoculars/osf.io,arpitar/osf.io,acshi/osf.io,acshi/osf.io,caseyrygt/osf.io,cslzchen/osf.io,ticklemepierce/osf.io,zachjanicki/osf.io,DanielSBrown/osf.io,arpitar/osf.io,amyshi188/osf.io,GageGaskins/osf.io,TomHeatwole/osf.io,haoyuchen1992/osf.io,felliott/osf.io,monikagrabowska/osf.io,brianjgeiger/osf.io,chennan47/osf.io,mluke93/osf.io,mluke93/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,caseyrygt/osf.io,jmcarp/osf.io,Johnetordoff/osf.io,chennan47/osf.io,HarryRybacki/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,reinaH/osf.io,danielneis/osf.io,acshi/osf.io,kwierman/osf.io,erinspace/osf.io,caneruguz/osf.io,emetsger/osf.io,mfraezz/osf.io,jinluyuan/osf.io,ckc6cz/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,HalcyonChimera/osf.io,bdyetton/prettychart,barbour-em/osf.io,DanielSBrown/osf.io,zamattiac/osf.io,chrisseto/osf.io,revanthkolli/osf.io,RomanZWang/osf.io,SSJohns/osf.io,pattisdr/osf.io,MerlinZhang/osf.io,asanfilippo7/osf.io,doublebits/osf.io,jeffreyliu3230/osf.io,monikagrabowska/osf.io,kwierman/osf.io,leb2dg/osf.io,adlius/osf.io,sloria/osf.io,billyhunt/osf.io,cosenal/osf.io,RomanZWang/osf.io,zamattiac/osf.io,ckc6cz/osf.io,aaxelb/osf.io,MerlinZhang/osf.io,samchrisinger/osf.io,erinspace/osf.io,GaryKriebel/osf.io,Nesiehr/osf.io,jeffreyliu3230/osf.io,arpitar/osf.io,felliott/osf.io,njantrania/osf.io,TomBaxter/osf.io,cosenal/osf.io,samanehsan/osf.io,rdhyee/osf.io,acshi/osf.io,binoculars/osf.io,jolene-esposito/osf.io,samchrisinger/osf.io,danielneis/osf.io,GageGaskins/osf.io,icereval/osf.io,sbt9uc/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,cwisecarver/osf.io,mluke93/osf.io,jeffreyliu3230/osf.io,fabianvf/osf.io,KAsante95/osf.io,ckc6cz/osf.io,dplorimer/osf,Nesiehr/osf.io,caseyrygt/osf.io,SSJohns/osf.io,danielneis/osf.io,CenterForOpenScience/osf.io,billyhunt/osf.io,HalcyonChimera/osf.io,jmcarp/osf.io,wearpants/osf.io,alexschiller/osf.io,GageGaskins/osf.io,abought/osf.io,mfraezz/osf.io,rdhyee/osf.io,zamattiac/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,GaryKriebel/osf.io,dplorimer/osf,cldershem/osf.io,billyhunt/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,KAsante95/osf.io,lyndsysimon/osf.io,fabianvf/osf.io,mattclark/osf.io,DanielSBrown/osf.io,zachjanicki/osf.io,monikagrabowska/osf.io,KAsante95/osf.io,samchrisinger/osf.io,pattisdr/osf.io,kwierman/osf.io,chennan47/osf.io,brianjgeiger/osf.io,reinaH/osf.io,saradbowman/osf.io,GageGaskins/osf.io,bdyetton/prettychart,icereval/osf.io,asanfilippo7/osf.io,bdyetton/prettychart,pattisdr/osf.io,abought/osf.io,cwisecarver/osf.io,zkraime/osf.io,kch8qx/osf.io,ZobairAlijan/osf.io,amyshi188/osf.io,asanfilippo7/osf.io,sloria/osf.io,HarryRybacki/osf.io,Ghalko/osf.io,zachjanicki/osf.io,HarryRybacki/osf.io,zamattiac/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,chrisseto/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,zkraime/osf.io,haoyuchen1992/osf.io,emetsger/osf.io,wearpants/osf.io,kwierman/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,petermalcolm/osf.io,mluo613/osf.io,cosenal/osf.io,revanthkolli/osf.io,aaxelb/osf.io,zkraime/osf.io,hmoco/osf.io,binoculars/osf.io,wearpants/osf.io,fabianvf/osf.io,kch8qx/osf.io,lyndsysimon/osf.io,GaryKriebel/osf.io,revanthkolli/osf.io,erinspace/osf.io,brandonPurvis/osf.io,doublebits/osf.io,mluo613/osf.io,barbour-em/osf.io,SSJohns/osf.io,samchrisinger/osf.io,felliott/osf.io,jnayak1/osf.io,chrisseto/osf.io,jnayak1/osf.io,zachjanicki/osf.io,jnayak1/osf.io,leb2dg/osf.io,hmoco/osf.io,GaryKriebel/osf.io,monikagrabowska/osf.io,Ghalko/osf.io,jinluyuan/osf.io,jinluyuan/osf.io,icereval/osf.io,brandonPurvis/osf.io,adlius/osf.io,jinluyuan/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,samanehsan/osf.io,wearpants/osf.io,samanehsan/osf.io,kch8qx/osf.io,Ghalko/osf.io,monikagrabowska/osf.io,amyshi188/osf.io,jmcarp/osf.io,mluo613/osf.io,aaxelb/osf.io,ckc6cz/osf.io,mluke93/osf.io,felliott/osf.io,cslzchen/osf.io,dplorimer/osf,reinaH/osf.io,laurenrevere/osf.io,cldershem/osf.io,danielneis/osf.io,abought/osf.io,acshi/osf.io,MerlinZhang/osf.io,SSJohns/osf.io,emetsger/osf.io,saradbowman/osf.io,DanielSBrown/osf.io,sloria/osf.io,cldershem/osf.io,njantrania/osf.io,doublebits/osf.io,billyhunt/osf.io,barbour-em/osf.io,Johnetordoff/osf.io,KAsante95/osf.io,jmcarp/osf.io,caseyrygt/osf.io,caseyrollins/osf.io,asanfilippo7/osf.io,alexschiller/osf.io,sbt9uc/osf.io,billyhunt/osf.io,baylee-d/osf.io,GageGaskins/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,brandonPurvis/osf.io,petermalcolm/osf.io,jolene-esposito/osf.io,cslzchen/osf.io,caneruguz/osf.io,TomHeatwole/osf.io,doublebits/osf.io,ticklemepierce/osf.io,mluo613/osf.io,revanthkolli/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,Ghalko/osf.io,leb2dg/osf.io,RomanZWang/osf.io,dplorimer/osf,mluo613/osf.io,TomBaxter/osf.io,cldershem/osf.io
|
Add a migration for box file guids
|
import logging
from website.app import init_app
from website.addons.box.model import BoxFile
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in BoxFile.find():
new_path = '/' + file.path.split('/')[1]
logger.info(u'{} -> {}'.format(file.path, new_path))
file.path = new_path
file.save()
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
<commit_before><commit_msg>Add a migration for box file guids<commit_after>
|
import logging
from website.app import init_app
from website.addons.box.model import BoxFile
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in BoxFile.find():
new_path = '/' + file.path.split('/')[1]
logger.info(u'{} -> {}'.format(file.path, new_path))
file.path = new_path
file.save()
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
Add a migration for box file guidsimport logging
from website.app import init_app
from website.addons.box.model import BoxFile
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in BoxFile.find():
new_path = '/' + file.path.split('/')[1]
logger.info(u'{} -> {}'.format(file.path, new_path))
file.path = new_path
file.save()
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
<commit_before><commit_msg>Add a migration for box file guids<commit_after>import logging
from website.app import init_app
from website.addons.box.model import BoxFile
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in BoxFile.find():
new_path = '/' + file.path.split('/')[1]
logger.info(u'{} -> {}'.format(file.path, new_path))
file.path = new_path
file.save()
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
|
8db409d7d63135c81daa5cc556269599b362b525
|
affiliate-builder/build_recipes.py
|
affiliate-builder/build_recipes.py
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL
def main(recipe_dir=RECIPE_FOLDER):
builder = Builder(recipe_dir, BINSTAR_CHANNEL, 'main')
builder.main()
print('moo')
if __name__ == '__main__':
main()
|
Add script for building recipes
|
Add script for building recipes
|
Python
|
bsd-3-clause
|
astropy/conda-builder-affiliated,Cadair/conda-builder-affiliated,mwcraig/conda-builder-affiliated,kbarbary/conda-builder-affiliated,zblz/conda-builder-affiliated,kbarbary/conda-builder-affiliated,cdeil/conda-builder-affiliated,cdeil/conda-builder-affiliated,astropy/conda-build-tools,bmorris3/conda-builder-affiliated,Cadair/conda-builder-affiliated,astropy/conda-builder-affiliated,bmorris3/conda-builder-affiliated,astropy/conda-build-tools,zblz/conda-builder-affiliated,mwcraig/conda-builder-affiliated
|
Add script for building recipes
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL
def main(recipe_dir=RECIPE_FOLDER):
builder = Builder(recipe_dir, BINSTAR_CHANNEL, 'main')
builder.main()
print('moo')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for building recipes<commit_after>
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL
def main(recipe_dir=RECIPE_FOLDER):
builder = Builder(recipe_dir, BINSTAR_CHANNEL, 'main')
builder.main()
print('moo')
if __name__ == '__main__':
main()
|
Add script for building recipesfrom __future__ import (division, print_function, absolute_import,
unicode_literals)
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL
def main(recipe_dir=RECIPE_FOLDER):
builder = Builder(recipe_dir, BINSTAR_CHANNEL, 'main')
builder.main()
print('moo')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for building recipes<commit_after>from __future__ import (division, print_function, absolute_import,
unicode_literals)
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL
def main(recipe_dir=RECIPE_FOLDER):
builder = Builder(recipe_dir, BINSTAR_CHANNEL, 'main')
builder.main()
print('moo')
if __name__ == '__main__':
main()
|
|
3d04954345b15527707d119939b8f79a761e7782
|
pyaml.py
|
pyaml.py
|
import yaml
sample_yaml_as_dict = '''
first_dict_key: some value
second_dict_key: some other value
'''
sample_yaml_as_list = '''
# Notice here how i don't need quotes. Read the wikipedia page for more info!
- list item 1
- list item 2
'''
my_config_dict = yaml.load(sample_yaml_as_dict)
print my_config_dict
# Will print:
# {'second_dict_key': 'some other value', 'first_dict_key': 'some value'}
my_config_list = yaml.load(sample_yaml_as_list)
print my_config_list
# Will print:
# ['list item 1', 'list item 2']
# Load some external config file
with open('~/my_config.yaml') as fp:
my_configuration = yaml.load(fp)
print my_configuration_dict
|
Add yaml file to python dict example
|
Add yaml file to python dict example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add yaml file to python dict example
|
import yaml
sample_yaml_as_dict = '''
first_dict_key: some value
second_dict_key: some other value
'''
sample_yaml_as_list = '''
# Notice here how i don't need quotes. Read the wikipedia page for more info!
- list item 1
- list item 2
'''
my_config_dict = yaml.load(sample_yaml_as_dict)
print my_config_dict
# Will print:
# {'second_dict_key': 'some other value', 'first_dict_key': 'some value'}
my_config_list = yaml.load(sample_yaml_as_list)
print my_config_list
# Will print:
# ['list item 1', 'list item 2']
# Load some external config file
with open('~/my_config.yaml') as fp:
my_configuration = yaml.load(fp)
print my_configuration_dict
|
<commit_before><commit_msg>Add yaml file to python dict example<commit_after>
|
import yaml
sample_yaml_as_dict = '''
first_dict_key: some value
second_dict_key: some other value
'''
sample_yaml_as_list = '''
# Notice here how i don't need quotes. Read the wikipedia page for more info!
- list item 1
- list item 2
'''
my_config_dict = yaml.load(sample_yaml_as_dict)
print my_config_dict
# Will print:
# {'second_dict_key': 'some other value', 'first_dict_key': 'some value'}
my_config_list = yaml.load(sample_yaml_as_list)
print my_config_list
# Will print:
# ['list item 1', 'list item 2']
# Load some external config file
with open('~/my_config.yaml') as fp:
my_configuration = yaml.load(fp)
print my_configuration_dict
|
Add yaml file to python dict exampleimport yaml
sample_yaml_as_dict = '''
first_dict_key: some value
second_dict_key: some other value
'''
sample_yaml_as_list = '''
# Notice here how i don't need quotes. Read the wikipedia page for more info!
- list item 1
- list item 2
'''
my_config_dict = yaml.load(sample_yaml_as_dict)
print my_config_dict
# Will print:
# {'second_dict_key': 'some other value', 'first_dict_key': 'some value'}
my_config_list = yaml.load(sample_yaml_as_list)
print my_config_list
# Will print:
# ['list item 1', 'list item 2']
# Load some external config file
with open('~/my_config.yaml') as fp:
my_configuration = yaml.load(fp)
print my_configuration_dict
|
<commit_before><commit_msg>Add yaml file to python dict example<commit_after>import yaml
sample_yaml_as_dict = '''
first_dict_key: some value
second_dict_key: some other value
'''
sample_yaml_as_list = '''
# Notice here how i don't need quotes. Read the wikipedia page for more info!
- list item 1
- list item 2
'''
my_config_dict = yaml.load(sample_yaml_as_dict)
print my_config_dict
# Will print:
# {'second_dict_key': 'some other value', 'first_dict_key': 'some value'}
my_config_list = yaml.load(sample_yaml_as_list)
print my_config_list
# Will print:
# ['list item 1', 'list item 2']
# Load some external config file
with open('~/my_config.yaml') as fp:
my_configuration = yaml.load(fp)
print my_configuration_dict
|
|
0c0cbd2a289a651a5247b7c378d70370b76a35c2
|
app/soc/logic/helper/convert_db.py
|
app/soc/logic/helper/convert_db.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
|
Add a script to normalize user accounts
|
Add a script to normalize user accounts
Patch by: Sverre Rabbelier
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%402240
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Add a script to normalize user accounts
Patch by: Sverre Rabbelier
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%402240
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
|
<commit_before><commit_msg>Add a script to normalize user accounts
Patch by: Sverre Rabbelier
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%402240<commit_after>
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
|
Add a script to normalize user accounts
Patch by: Sverre Rabbelier
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%402240#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
|
<commit_before><commit_msg>Add a script to normalize user accounts
Patch by: Sverre Rabbelier
--HG--
extra : convert_revision : svn%3A32761e7d-7263-4528-b7be-7235b26367ec/trunk%402240<commit_after>#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the DB from an old scheme to a new one.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import users
from django import http
from soc.models import user as user_model
from soc.logic import accounts
from soc.logic.models.user import logic as user_logic
def convert_user_accounts(*args, **kwargs):
"""Converts all current user accounts to normalized form.
"""
data = user_logic.getAll(user_model.User.all())
for user in data:
normalized = accounts.normalizeAccount(user.account)
if user.account != normalized:
user.account = normalized
user.put()
return http.HttpResponse('Done')
|
|
55d299d88358bd1e106e96b4475a268cdfe581fb
|
setup.py
|
setup.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
Split functions to avoid eventlet import.
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d
|
Python
|
apache-2.0
|
openstack-attic/oslo.version,emonty/oslo.version
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
<commit_before><commit_msg>Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
<commit_before><commit_msg>Split functions to avoid eventlet import.
Some of these functions are used in setup.py. In a virtualenv
based workflow, python setup.py sdist is called to create a
tarball which is then installed into the virtualenv. These
functions need to be in a separate file so that they can be
imported by setup.py without eventlet needing to be installed.
Change-Id: I6f7dc9614895b8c91135c62373b98afe55e1fc7d<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import os
import re
import subprocess
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
|
|
3b79b49b08c35a1e8d57fa9774027bdeda4e8e83
|
test_transformations.py
|
test_transformations.py
|
import unittest
from transformations import drop_vowel, words_with_ck, repeat_to_single, vowel_expand, get_matched_letters_indices, l33t
class MyTestCase(unittest.TestCase):
def test_drop_one_vowel(self):
bad_words = drop_vowel('duck')
expected = ['duck', 'dck']
self.assertEquals(expected, bad_words)
def test_drop_multiple_vowels(self):
bad_words = drop_vowel('saman')
expected = ['saman', 'samn', 'sman', 'smn']
self.assertEquals(expected, bad_words)
def test_drop_vowel_minimum(self):
bad_words = drop_vowel('bad')
expected = ['bad']
self.assertEquals(expected, bad_words)
def test_words_with_ck(self):
bad_words = words_with_ck('duck')
expected = ['duck', 'duc', 'ducc', 'duk', 'dukk']
self.assertEquals(expected, bad_words)
def test_l33t(self):
bad_words = l33t('bad')
expected = ['bad', 'b4d']
self.assertEquals(expected, bad_words)
def test_get_matched_letter_indices(self):
indices = get_matched_letters_indices('saman', 'aeiou')
expected = [(1, 'a'), (3, 'a')]
self.assertEquals(indices, expected)
def test_double_to_single(self):
bad_words = repeat_to_single('coffee')
self.assertIn('coffee', bad_words)
self.assertIn('coffe', bad_words)
self.assertIn('cofee', bad_words)
self.assertIn('cofe', bad_words)
def test_repeat_to_single(self):
bad_words = repeat_to_single('hhhi')
self.assertIn('hhhi', bad_words)
self.assertIn('hhi', bad_words)
self.assertIn('hi', bad_words)
def test_multiple_repeats_to_single(self):
bad_words = repeat_to_single('hhff')
self.assertIn('hhff', bad_words)
self.assertIn('hff', bad_words)
self.assertIn('hhf', bad_words)
if __name__ == '__main__':
unittest.main()
|
Add tests for transformation functions
|
Add tests for transformation functions
|
Python
|
apache-2.0
|
JeffSpies/nonwordlist,CenterForOpenScience/guid-filter
|
Add tests for transformation functions
|
import unittest
from transformations import drop_vowel, words_with_ck, repeat_to_single, vowel_expand, get_matched_letters_indices, l33t
class MyTestCase(unittest.TestCase):
def test_drop_one_vowel(self):
bad_words = drop_vowel('duck')
expected = ['duck', 'dck']
self.assertEquals(expected, bad_words)
def test_drop_multiple_vowels(self):
bad_words = drop_vowel('saman')
expected = ['saman', 'samn', 'sman', 'smn']
self.assertEquals(expected, bad_words)
def test_drop_vowel_minimum(self):
bad_words = drop_vowel('bad')
expected = ['bad']
self.assertEquals(expected, bad_words)
def test_words_with_ck(self):
bad_words = words_with_ck('duck')
expected = ['duck', 'duc', 'ducc', 'duk', 'dukk']
self.assertEquals(expected, bad_words)
def test_l33t(self):
bad_words = l33t('bad')
expected = ['bad', 'b4d']
self.assertEquals(expected, bad_words)
def test_get_matched_letter_indices(self):
indices = get_matched_letters_indices('saman', 'aeiou')
expected = [(1, 'a'), (3, 'a')]
self.assertEquals(indices, expected)
def test_double_to_single(self):
bad_words = repeat_to_single('coffee')
self.assertIn('coffee', bad_words)
self.assertIn('coffe', bad_words)
self.assertIn('cofee', bad_words)
self.assertIn('cofe', bad_words)
def test_repeat_to_single(self):
bad_words = repeat_to_single('hhhi')
self.assertIn('hhhi', bad_words)
self.assertIn('hhi', bad_words)
self.assertIn('hi', bad_words)
def test_multiple_repeats_to_single(self):
bad_words = repeat_to_single('hhff')
self.assertIn('hhff', bad_words)
self.assertIn('hff', bad_words)
self.assertIn('hhf', bad_words)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for transformation functions<commit_after>
|
import unittest
from transformations import drop_vowel, words_with_ck, repeat_to_single, vowel_expand, get_matched_letters_indices, l33t
class MyTestCase(unittest.TestCase):
def test_drop_one_vowel(self):
bad_words = drop_vowel('duck')
expected = ['duck', 'dck']
self.assertEquals(expected, bad_words)
def test_drop_multiple_vowels(self):
bad_words = drop_vowel('saman')
expected = ['saman', 'samn', 'sman', 'smn']
self.assertEquals(expected, bad_words)
def test_drop_vowel_minimum(self):
bad_words = drop_vowel('bad')
expected = ['bad']
self.assertEquals(expected, bad_words)
def test_words_with_ck(self):
bad_words = words_with_ck('duck')
expected = ['duck', 'duc', 'ducc', 'duk', 'dukk']
self.assertEquals(expected, bad_words)
def test_l33t(self):
bad_words = l33t('bad')
expected = ['bad', 'b4d']
self.assertEquals(expected, bad_words)
def test_get_matched_letter_indices(self):
indices = get_matched_letters_indices('saman', 'aeiou')
expected = [(1, 'a'), (3, 'a')]
self.assertEquals(indices, expected)
def test_double_to_single(self):
bad_words = repeat_to_single('coffee')
self.assertIn('coffee', bad_words)
self.assertIn('coffe', bad_words)
self.assertIn('cofee', bad_words)
self.assertIn('cofe', bad_words)
def test_repeat_to_single(self):
bad_words = repeat_to_single('hhhi')
self.assertIn('hhhi', bad_words)
self.assertIn('hhi', bad_words)
self.assertIn('hi', bad_words)
def test_multiple_repeats_to_single(self):
bad_words = repeat_to_single('hhff')
self.assertIn('hhff', bad_words)
self.assertIn('hff', bad_words)
self.assertIn('hhf', bad_words)
if __name__ == '__main__':
unittest.main()
|
Add tests for transformation functionsimport unittest
from transformations import drop_vowel, words_with_ck, repeat_to_single, vowel_expand, get_matched_letters_indices, l33t
class MyTestCase(unittest.TestCase):
def test_drop_one_vowel(self):
bad_words = drop_vowel('duck')
expected = ['duck', 'dck']
self.assertEquals(expected, bad_words)
def test_drop_multiple_vowels(self):
bad_words = drop_vowel('saman')
expected = ['saman', 'samn', 'sman', 'smn']
self.assertEquals(expected, bad_words)
def test_drop_vowel_minimum(self):
bad_words = drop_vowel('bad')
expected = ['bad']
self.assertEquals(expected, bad_words)
def test_words_with_ck(self):
bad_words = words_with_ck('duck')
expected = ['duck', 'duc', 'ducc', 'duk', 'dukk']
self.assertEquals(expected, bad_words)
def test_l33t(self):
bad_words = l33t('bad')
expected = ['bad', 'b4d']
self.assertEquals(expected, bad_words)
def test_get_matched_letter_indices(self):
indices = get_matched_letters_indices('saman', 'aeiou')
expected = [(1, 'a'), (3, 'a')]
self.assertEquals(indices, expected)
def test_double_to_single(self):
bad_words = repeat_to_single('coffee')
self.assertIn('coffee', bad_words)
self.assertIn('coffe', bad_words)
self.assertIn('cofee', bad_words)
self.assertIn('cofe', bad_words)
def test_repeat_to_single(self):
bad_words = repeat_to_single('hhhi')
self.assertIn('hhhi', bad_words)
self.assertIn('hhi', bad_words)
self.assertIn('hi', bad_words)
def test_multiple_repeats_to_single(self):
bad_words = repeat_to_single('hhff')
self.assertIn('hhff', bad_words)
self.assertIn('hff', bad_words)
self.assertIn('hhf', bad_words)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for transformation functions<commit_after>import unittest
from transformations import drop_vowel, words_with_ck, repeat_to_single, vowel_expand, get_matched_letters_indices, l33t
class MyTestCase(unittest.TestCase):
def test_drop_one_vowel(self):
bad_words = drop_vowel('duck')
expected = ['duck', 'dck']
self.assertEquals(expected, bad_words)
def test_drop_multiple_vowels(self):
bad_words = drop_vowel('saman')
expected = ['saman', 'samn', 'sman', 'smn']
self.assertEquals(expected, bad_words)
def test_drop_vowel_minimum(self):
bad_words = drop_vowel('bad')
expected = ['bad']
self.assertEquals(expected, bad_words)
def test_words_with_ck(self):
bad_words = words_with_ck('duck')
expected = ['duck', 'duc', 'ducc', 'duk', 'dukk']
self.assertEquals(expected, bad_words)
def test_l33t(self):
bad_words = l33t('bad')
expected = ['bad', 'b4d']
self.assertEquals(expected, bad_words)
def test_get_matched_letter_indices(self):
indices = get_matched_letters_indices('saman', 'aeiou')
expected = [(1, 'a'), (3, 'a')]
self.assertEquals(indices, expected)
def test_double_to_single(self):
bad_words = repeat_to_single('coffee')
self.assertIn('coffee', bad_words)
self.assertIn('coffe', bad_words)
self.assertIn('cofee', bad_words)
self.assertIn('cofe', bad_words)
def test_repeat_to_single(self):
bad_words = repeat_to_single('hhhi')
self.assertIn('hhhi', bad_words)
self.assertIn('hhi', bad_words)
self.assertIn('hi', bad_words)
def test_multiple_repeats_to_single(self):
bad_words = repeat_to_single('hhff')
self.assertIn('hhff', bad_words)
self.assertIn('hff', bad_words)
self.assertIn('hhf', bad_words)
if __name__ == '__main__':
unittest.main()
|
|
7a9772952b5a6b39986b9a705ac7bbaf2810de91
|
python/prm_fhir/extractors.py
|
python/prm_fhir/extractors.py
|
"""
### CODE OWNERS: Shea Parkes
### OBJECTIVE:
Extraction methods for relevant items.
### DEVELOPER NOTES:
<none>
"""
import typing
from collections import OrderedDict
from fhirclient.client import FHIRClient
# =============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
# =============================================================================
def _create_fhir_client(
url_fhir: str,
*,
app_id: str='prm_analytics'
) -> FHIRClient:
"""Instantiate a FHIRClient"""
return FHIRClient(settings={
'app_id': app_id,
'api_base': url_fhir,
})
def extract_patients(
url_fhir: str,
search_struct: dict,
) -> typing.Generator[OrderedDict]:
"""Generate patient records from a FHIR endpoint."""
_client = _create_fhir_client(url_fhir)
#TODO: Search shit
yield OrderedDict([
('name', patientname),
('dob', isodate),
('address', blah),
])
|
Put up a structure to extraction library.
|
Put up a structure to extraction library.
Do this so we have a skelton to fill in.
|
Python
|
mit
|
IndyActuaries/epic-fhir,IndyActuaries/epic-fhir
|
Put up a structure to extraction library.
Do this so we have a skelton to fill in.
|
"""
### CODE OWNERS: Shea Parkes
### OBJECTIVE:
Extraction methods for relevant items.
### DEVELOPER NOTES:
<none>
"""
import typing
from collections import OrderedDict
from fhirclient.client import FHIRClient
# =============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
# =============================================================================
def _create_fhir_client(
url_fhir: str,
*,
app_id: str='prm_analytics'
) -> FHIRClient:
"""Instantiate a FHIRClient"""
return FHIRClient(settings={
'app_id': app_id,
'api_base': url_fhir,
})
def extract_patients(
url_fhir: str,
search_struct: dict,
) -> typing.Generator[OrderedDict]:
"""Generate patient records from a FHIR endpoint."""
_client = _create_fhir_client(url_fhir)
#TODO: Search shit
yield OrderedDict([
('name', patientname),
('dob', isodate),
('address', blah),
])
|
<commit_before><commit_msg>Put up a structure to extraction library.
Do this so we have a skelton to fill in.<commit_after>
|
"""
### CODE OWNERS: Shea Parkes
### OBJECTIVE:
Extraction methods for relevant items.
### DEVELOPER NOTES:
<none>
"""
import typing
from collections import OrderedDict
from fhirclient.client import FHIRClient
# =============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
# =============================================================================
def _create_fhir_client(
url_fhir: str,
*,
app_id: str='prm_analytics'
) -> FHIRClient:
"""Instantiate a FHIRClient"""
return FHIRClient(settings={
'app_id': app_id,
'api_base': url_fhir,
})
def extract_patients(
url_fhir: str,
search_struct: dict,
) -> typing.Generator[OrderedDict]:
"""Generate patient records from a FHIR endpoint."""
_client = _create_fhir_client(url_fhir)
#TODO: Search shit
yield OrderedDict([
('name', patientname),
('dob', isodate),
('address', blah),
])
|
Put up a structure to extraction library.
Do this so we have a skelton to fill in."""
### CODE OWNERS: Shea Parkes
### OBJECTIVE:
Extraction methods for relevant items.
### DEVELOPER NOTES:
<none>
"""
import typing
from collections import OrderedDict
from fhirclient.client import FHIRClient
# =============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
# =============================================================================
def _create_fhir_client(
url_fhir: str,
*,
app_id: str='prm_analytics'
) -> FHIRClient:
"""Instantiate a FHIRClient"""
return FHIRClient(settings={
'app_id': app_id,
'api_base': url_fhir,
})
def extract_patients(
url_fhir: str,
search_struct: dict,
) -> typing.Generator[OrderedDict]:
"""Generate patient records from a FHIR endpoint."""
_client = _create_fhir_client(url_fhir)
#TODO: Search shit
yield OrderedDict([
('name', patientname),
('dob', isodate),
('address', blah),
])
|
<commit_before><commit_msg>Put up a structure to extraction library.
Do this so we have a skelton to fill in.<commit_after>"""
### CODE OWNERS: Shea Parkes
### OBJECTIVE:
Extraction methods for relevant items.
### DEVELOPER NOTES:
<none>
"""
import typing
from collections import OrderedDict
from fhirclient.client import FHIRClient
# =============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
# =============================================================================
def _create_fhir_client(
url_fhir: str,
*,
app_id: str='prm_analytics'
) -> FHIRClient:
"""Instantiate a FHIRClient"""
return FHIRClient(settings={
'app_id': app_id,
'api_base': url_fhir,
})
def extract_patients(
url_fhir: str,
search_struct: dict,
) -> typing.Generator[OrderedDict]:
"""Generate patient records from a FHIR endpoint."""
_client = _create_fhir_client(url_fhir)
#TODO: Search shit
yield OrderedDict([
('name', patientname),
('dob', isodate),
('address', blah),
])
|
|
847f38e75e4ec79dbdd10a9627ec6b5a15ba2e41
|
tests/seat_map_test.py
|
tests/seat_map_test.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicSeatMap(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
smap = self.sds.seat_map('JFK', 'LAX', tomorrow, 'AA', 1)
f = open('smap-args.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
def test_basic_request(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
tomorrow_str = tomorrow.strftime('%Y-%m-%d')
options = {
"EnhancedSeatMapRQ": {
"SeatMapQueryEnhanced": {
"RequestType": "Payload",
"Flight": {
"destination": "EZE",
"origin": "DFW",
"DepartureDate": {
"content": "2016-03-20"
},
"Marketing": [{
"carrier": "AA",
"content": "997"
}]
}
}
}
}
smap = self.sds.seat_map_opts(options)
f = open('smap-out.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
if __name__ == '__main__':
unittest.main()
|
Add tests for seat map
|
Add tests for seat map
|
Python
|
mit
|
Jamil/sabre_dev_studio
|
Add tests for seat map
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicSeatMap(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
smap = self.sds.seat_map('JFK', 'LAX', tomorrow, 'AA', 1)
f = open('smap-args.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
def test_basic_request(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
tomorrow_str = tomorrow.strftime('%Y-%m-%d')
options = {
"EnhancedSeatMapRQ": {
"SeatMapQueryEnhanced": {
"RequestType": "Payload",
"Flight": {
"destination": "EZE",
"origin": "DFW",
"DepartureDate": {
"content": "2016-03-20"
},
"Marketing": [{
"carrier": "AA",
"content": "997"
}]
}
}
}
}
smap = self.sds.seat_map_opts(options)
f = open('smap-out.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for seat map<commit_after>
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicSeatMap(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
smap = self.sds.seat_map('JFK', 'LAX', tomorrow, 'AA', 1)
f = open('smap-args.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
def test_basic_request(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
tomorrow_str = tomorrow.strftime('%Y-%m-%d')
options = {
"EnhancedSeatMapRQ": {
"SeatMapQueryEnhanced": {
"RequestType": "Payload",
"Flight": {
"destination": "EZE",
"origin": "DFW",
"DepartureDate": {
"content": "2016-03-20"
},
"Marketing": [{
"carrier": "AA",
"content": "997"
}]
}
}
}
}
smap = self.sds.seat_map_opts(options)
f = open('smap-out.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
if __name__ == '__main__':
unittest.main()
|
Add tests for seat mapimport unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicSeatMap(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
smap = self.sds.seat_map('JFK', 'LAX', tomorrow, 'AA', 1)
f = open('smap-args.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
def test_basic_request(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
tomorrow_str = tomorrow.strftime('%Y-%m-%d')
options = {
"EnhancedSeatMapRQ": {
"SeatMapQueryEnhanced": {
"RequestType": "Payload",
"Flight": {
"destination": "EZE",
"origin": "DFW",
"DepartureDate": {
"content": "2016-03-20"
},
"Marketing": [{
"carrier": "AA",
"content": "997"
}]
}
}
}
}
smap = self.sds.seat_map_opts(options)
f = open('smap-out.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for seat map<commit_after>import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicSeatMap(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
smap = self.sds.seat_map('JFK', 'LAX', tomorrow, 'AA', 1)
f = open('smap-args.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
def test_basic_request(self):
now = datetime.datetime.now()
tomorrow = now + datetime.timedelta(days=1)
tomorrow_str = tomorrow.strftime('%Y-%m-%d')
options = {
"EnhancedSeatMapRQ": {
"SeatMapQueryEnhanced": {
"RequestType": "Payload",
"Flight": {
"destination": "EZE",
"origin": "DFW",
"DepartureDate": {
"content": "2016-03-20"
},
"Marketing": [{
"carrier": "AA",
"content": "997"
}]
}
}
}
}
smap = self.sds.seat_map_opts(options)
f = open('smap-out.txt', 'w')
f.write(json.dumps(smap._asdict()))
f.close()
self.assertIsNotNone(smap)
if __name__ == '__main__':
unittest.main()
|
|
1455d5109efb64aa1c2579a97c0d84a60da22708
|
EDMScripts/test_rotator.py
|
EDMScripts/test_rotator.py
|
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
r = Random()
def EDMGo():
# loop and take data
blockIndex = 0
maxBlockIndex = 100000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# randomise polarization
polAngle = 360.0 * r.NextDouble()
hc.SetPolarizerAngle(polAngle)
blockIndex = blockIndex + 1
def run_script():
EDMGo()
|
Add a script to test the Thorlabs polarization rotator.
|
Add a script to test the Thorlabs polarization rotator.
|
Python
|
mit
|
ColdMatter/EDMSuite,jstammers/EDMSuite,jstammers/EDMSuite,jstammers/EDMSuite,ColdMatter/EDMSuite,ColdMatter/EDMSuite,Stok/EDMSuite,Stok/EDMSuite,jstammers/EDMSuite,jstammers/EDMSuite,ColdMatter/EDMSuite
|
Add a script to test the Thorlabs polarization rotator.
|
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
r = Random()
def EDMGo():
# loop and take data
blockIndex = 0
maxBlockIndex = 100000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# randomise polarization
polAngle = 360.0 * r.NextDouble()
hc.SetPolarizerAngle(polAngle)
blockIndex = blockIndex + 1
def run_script():
EDMGo()
|
<commit_before><commit_msg>Add a script to test the Thorlabs polarization rotator.<commit_after>
|
# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
r = Random()
def EDMGo():
# loop and take data
blockIndex = 0
maxBlockIndex = 100000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# randomise polarization
polAngle = 360.0 * r.NextDouble()
hc.SetPolarizerAngle(polAngle)
blockIndex = blockIndex + 1
def run_script():
EDMGo()
|
Add a script to test the Thorlabs polarization rotator.# Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
r = Random()
def EDMGo():
# loop and take data
blockIndex = 0
maxBlockIndex = 100000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# randomise polarization
polAngle = 360.0 * r.NextDouble()
hc.SetPolarizerAngle(polAngle)
blockIndex = blockIndex + 1
def run_script():
EDMGo()
|
<commit_before><commit_msg>Add a script to test the Thorlabs polarization rotator.<commit_after># Import a whole load of stuff
from System.IO import *
from System.Drawing import *
from System.Runtime.Remoting import *
from System.Threading import *
from System.Windows.Forms import *
from System.Xml.Serialization import *
from System import *
from Analysis.EDM import *
from DAQ.Environment import *
from EDMConfig import *
r = Random()
def EDMGo():
# loop and take data
blockIndex = 0
maxBlockIndex = 100000
while blockIndex < maxBlockIndex:
print("Acquiring block " + str(blockIndex) + " ...")
# randomise polarization
polAngle = 360.0 * r.NextDouble()
hc.SetPolarizerAngle(polAngle)
blockIndex = blockIndex + 1
def run_script():
EDMGo()
|
|
8b4e49f5aae691a5b3f3b77190356f70f9f23bb8
|
src/project/word2vec_corpus.py
|
src/project/word2vec_corpus.py
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
class W2VCorpus(Corpus):
def __init__(self, dict_loc, vec_loc):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
# Set up for Word-to-Vec
return
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = W2VCorpus(sys.argv[2], sys.argv[3])
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add basic framework for W2V
|
Add basic framework for W2V
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add basic framework for W2V
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
class W2VCorpus(Corpus):
def __init__(self, dict_loc, vec_loc):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
# Set up for Word-to-Vec
return
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = W2VCorpus(sys.argv[2], sys.argv[3])
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add basic framework for W2V<commit_after>
|
import sys
from os.path import isdir, isfile
from corpus import Corpus
class W2VCorpus(Corpus):
def __init__(self, dict_loc, vec_loc):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
# Set up for Word-to-Vec
return
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = W2VCorpus(sys.argv[2], sys.argv[3])
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add basic framework for W2Vimport sys
from os.path import isdir, isfile
from corpus import Corpus
class W2VCorpus(Corpus):
def __init__(self, dict_loc, vec_loc):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
# Set up for Word-to-Vec
return
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = W2VCorpus(sys.argv[2], sys.argv[3])
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add basic framework for W2V<commit_after>import sys
from os.path import isdir, isfile
from corpus import Corpus
class W2VCorpus(Corpus):
def __init__(self, dict_loc, vec_loc):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
# Set up for Word-to-Vec
return
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = W2VCorpus(sys.argv[2], sys.argv[3])
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
|
8a3b9c2b3a25bda85cf3d961758a986dbdc19084
|
tests/test_advection.py
|
tests/test_advection.py
|
from parcels import Grid, Particle, JITParticle, AdvectionRK4, Geographic, GeographicPolar
import numpy as np
import pytest
from datetime import timedelta as delta
ptype = {'scipy': Particle, 'jit': JITParticle}
@pytest.fixture
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.ones((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.zeros(npart, dtype=np.float32) + 20.,
lat=np.linspace(0, 80, npart, dtype=np.float32))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(np.array([p.lon for p in pset])) > 1.e-4).all()
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.ones((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart, dtype=np.float32),
lat=np.linspace(0, 30, npart, dtype=np.float32))
delta_lat = np.diff(np.array([p.lat for p in pset]))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
|
Add a set of advection tests for meridional and zonal advection
|
Tests: Add a set of advection tests for meridional and zonal advection
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Tests: Add a set of advection tests for meridional and zonal advection
|
from parcels import Grid, Particle, JITParticle, AdvectionRK4, Geographic, GeographicPolar
import numpy as np
import pytest
from datetime import timedelta as delta
ptype = {'scipy': Particle, 'jit': JITParticle}
@pytest.fixture
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.ones((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.zeros(npart, dtype=np.float32) + 20.,
lat=np.linspace(0, 80, npart, dtype=np.float32))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(np.array([p.lon for p in pset])) > 1.e-4).all()
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.ones((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart, dtype=np.float32),
lat=np.linspace(0, 30, npart, dtype=np.float32))
delta_lat = np.diff(np.array([p.lat for p in pset]))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
|
<commit_before><commit_msg>Tests: Add a set of advection tests for meridional and zonal advection<commit_after>
|
from parcels import Grid, Particle, JITParticle, AdvectionRK4, Geographic, GeographicPolar
import numpy as np
import pytest
from datetime import timedelta as delta
ptype = {'scipy': Particle, 'jit': JITParticle}
@pytest.fixture
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.ones((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.zeros(npart, dtype=np.float32) + 20.,
lat=np.linspace(0, 80, npart, dtype=np.float32))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(np.array([p.lon for p in pset])) > 1.e-4).all()
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.ones((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart, dtype=np.float32),
lat=np.linspace(0, 30, npart, dtype=np.float32))
delta_lat = np.diff(np.array([p.lat for p in pset]))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
|
Tests: Add a set of advection tests for meridional and zonal advectionfrom parcels import Grid, Particle, JITParticle, AdvectionRK4, Geographic, GeographicPolar
import numpy as np
import pytest
from datetime import timedelta as delta
ptype = {'scipy': Particle, 'jit': JITParticle}
@pytest.fixture
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.ones((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.zeros(npart, dtype=np.float32) + 20.,
lat=np.linspace(0, 80, npart, dtype=np.float32))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(np.array([p.lon for p in pset])) > 1.e-4).all()
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.ones((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart, dtype=np.float32),
lat=np.linspace(0, 30, npart, dtype=np.float32))
delta_lat = np.diff(np.array([p.lat for p in pset]))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
|
<commit_before><commit_msg>Tests: Add a set of advection tests for meridional and zonal advection<commit_after>from parcels import Grid, Particle, JITParticle, AdvectionRK4, Geographic, GeographicPolar
import numpy as np
import pytest
from datetime import timedelta as delta
ptype = {'scipy': Particle, 'jit': JITParticle}
@pytest.fixture
def lon(xdim=200):
return np.linspace(-170, 170, xdim, dtype=np.float32)
@pytest.fixture
def lat(ydim=100):
return np.linspace(-80, 80, ydim, dtype=np.float32)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_zonal(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.ones((lon.size, lat.size), dtype=np.float32)
V = np.zeros((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.zeros(npart, dtype=np.float32) + 20.,
lat=np.linspace(0, 80, npart, dtype=np.float32))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert (np.diff(np.array([p.lon for p in pset])) > 1.e-4).all()
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_advection_meridional(lon, lat, mode, npart=10):
""" Particles at high latitude move geographically faster due to
the pole correction in `GeographicPolar`.
"""
U = np.zeros((lon.size, lat.size), dtype=np.float32)
V = np.ones((lon.size, lat.size), dtype=np.float32)
grid = Grid.from_data(U, lon, lat, V, lon, lat,
u_units=GeographicPolar(), v_units=Geographic())
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(-60, 60, npart, dtype=np.float32),
lat=np.linspace(0, 30, npart, dtype=np.float32))
delta_lat = np.diff(np.array([p.lat for p in pset]))
pset.execute(AdvectionRK4, endtime=delta(hours=2), dt=delta(seconds=30))
assert np.allclose(np.diff(np.array([p.lat for p in pset])), delta_lat, rtol=1.e-4)
|
|
ff0fa3d3aaa7de147571330a16895befb272440a
|
mongoshell.py
|
mongoshell.py
|
#! /usr/bin/env python
from os import environ
from subprocess import check_call
from urlparse import urlparse
if 'MONGOLAB_URI' in environ:
print 'Using', environ['MONGOLAB_URI']
url = urlparse(environ['MONGOLAB_URI'])
cmd = 'mongo -u %s -p %s %s:%d/%s' % (url.username,
url.password,
url.hostname,
url.port,
url.path[1:])
else:
cmd = 'mongo TaarifaAPI'
check_call(cmd, shell=True)
|
Add script to run a mongo shell in the MongoLab environment
|
Add script to run a mongo shell in the MongoLab environment
|
Python
|
bsd-3-clause
|
taarifa/taarifa_backend,taarifa/taarifa_backend,taarifa/taarifa_backend,taarifa/taarifa_backend
|
Add script to run a mongo shell in the MongoLab environment
|
#! /usr/bin/env python
from os import environ
from subprocess import check_call
from urlparse import urlparse
if 'MONGOLAB_URI' in environ:
print 'Using', environ['MONGOLAB_URI']
url = urlparse(environ['MONGOLAB_URI'])
cmd = 'mongo -u %s -p %s %s:%d/%s' % (url.username,
url.password,
url.hostname,
url.port,
url.path[1:])
else:
cmd = 'mongo TaarifaAPI'
check_call(cmd, shell=True)
|
<commit_before><commit_msg>Add script to run a mongo shell in the MongoLab environment<commit_after>
|
#! /usr/bin/env python
from os import environ
from subprocess import check_call
from urlparse import urlparse
if 'MONGOLAB_URI' in environ:
print 'Using', environ['MONGOLAB_URI']
url = urlparse(environ['MONGOLAB_URI'])
cmd = 'mongo -u %s -p %s %s:%d/%s' % (url.username,
url.password,
url.hostname,
url.port,
url.path[1:])
else:
cmd = 'mongo TaarifaAPI'
check_call(cmd, shell=True)
|
Add script to run a mongo shell in the MongoLab environment#! /usr/bin/env python
from os import environ
from subprocess import check_call
from urlparse import urlparse
if 'MONGOLAB_URI' in environ:
print 'Using', environ['MONGOLAB_URI']
url = urlparse(environ['MONGOLAB_URI'])
cmd = 'mongo -u %s -p %s %s:%d/%s' % (url.username,
url.password,
url.hostname,
url.port,
url.path[1:])
else:
cmd = 'mongo TaarifaAPI'
check_call(cmd, shell=True)
|
<commit_before><commit_msg>Add script to run a mongo shell in the MongoLab environment<commit_after>#! /usr/bin/env python
from os import environ
from subprocess import check_call
from urlparse import urlparse
if 'MONGOLAB_URI' in environ:
print 'Using', environ['MONGOLAB_URI']
url = urlparse(environ['MONGOLAB_URI'])
cmd = 'mongo -u %s -p %s %s:%d/%s' % (url.username,
url.password,
url.hostname,
url.port,
url.path[1:])
else:
cmd = 'mongo TaarifaAPI'
check_call(cmd, shell=True)
|
|
c8f3e1149d8fa7ed4e402fc655cb13758f7f28c7
|
services/comprehension/main-api/comprehension/management/commands/pre_filter_responses.py
|
services/comprehension/main-api/comprehension/management/commands/pre_filter_responses.py
|
import csv
from django.core.management.base import BaseCommand
from ...views.plagiarism import PlagiarismFeedbackView
class Command(BaseCommand):
help = 'Parses a CSV for feedback records'
def add_arguments(self, parser):
parser.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage')
parser.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file')
def handle(self, *args, **kwargs):
passage_source = kwargs['passage_source']
csv_input = kwargs['csv_input']
passage_text = self._retrieve_passage(passage_source)
with open(csv_input, 'r') as csv_in,\
open(f'filtered_{csv_input}', 'w') as csv_out:
reader = csv.reader(csv_in)
writer = csv.writer(csv_out)
for row in reader:
entry = row[0]
if PlagiarismFeedbackView._check_is_plagiarism(
passage_text,
entry
):
continue
writer.writerow(row)
def _retrieve_passage(self, passage_path):
with open(passage_path, 'r') as f:
return f.read()
|
Add a command to filter responses for plagiarism
|
Add a command to filter responses for plagiarism
|
Python
|
agpl-3.0
|
empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core,empirical-org/Empirical-Core
|
Add a command to filter responses for plagiarism
|
import csv
from django.core.management.base import BaseCommand
from ...views.plagiarism import PlagiarismFeedbackView
class Command(BaseCommand):
help = 'Parses a CSV for feedback records'
def add_arguments(self, parser):
parser.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage')
parser.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file')
def handle(self, *args, **kwargs):
passage_source = kwargs['passage_source']
csv_input = kwargs['csv_input']
passage_text = self._retrieve_passage(passage_source)
with open(csv_input, 'r') as csv_in,\
open(f'filtered_{csv_input}', 'w') as csv_out:
reader = csv.reader(csv_in)
writer = csv.writer(csv_out)
for row in reader:
entry = row[0]
if PlagiarismFeedbackView._check_is_plagiarism(
passage_text,
entry
):
continue
writer.writerow(row)
def _retrieve_passage(self, passage_path):
with open(passage_path, 'r') as f:
return f.read()
|
<commit_before><commit_msg>Add a command to filter responses for plagiarism<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from ...views.plagiarism import PlagiarismFeedbackView
class Command(BaseCommand):
help = 'Parses a CSV for feedback records'
def add_arguments(self, parser):
parser.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage')
parser.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file')
def handle(self, *args, **kwargs):
passage_source = kwargs['passage_source']
csv_input = kwargs['csv_input']
passage_text = self._retrieve_passage(passage_source)
with open(csv_input, 'r') as csv_in,\
open(f'filtered_{csv_input}', 'w') as csv_out:
reader = csv.reader(csv_in)
writer = csv.writer(csv_out)
for row in reader:
entry = row[0]
if PlagiarismFeedbackView._check_is_plagiarism(
passage_text,
entry
):
continue
writer.writerow(row)
def _retrieve_passage(self, passage_path):
with open(passage_path, 'r') as f:
return f.read()
|
Add a command to filter responses for plagiarismimport csv
from django.core.management.base import BaseCommand
from ...views.plagiarism import PlagiarismFeedbackView
class Command(BaseCommand):
help = 'Parses a CSV for feedback records'
def add_arguments(self, parser):
parser.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage')
parser.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file')
def handle(self, *args, **kwargs):
passage_source = kwargs['passage_source']
csv_input = kwargs['csv_input']
passage_text = self._retrieve_passage(passage_source)
with open(csv_input, 'r') as csv_in,\
open(f'filtered_{csv_input}', 'w') as csv_out:
reader = csv.reader(csv_in)
writer = csv.writer(csv_out)
for row in reader:
entry = row[0]
if PlagiarismFeedbackView._check_is_plagiarism(
passage_text,
entry
):
continue
writer.writerow(row)
def _retrieve_passage(self, passage_path):
with open(passage_path, 'r') as f:
return f.read()
|
<commit_before><commit_msg>Add a command to filter responses for plagiarism<commit_after>import csv
from django.core.management.base import BaseCommand
from ...views.plagiarism import PlagiarismFeedbackView
class Command(BaseCommand):
help = 'Parses a CSV for feedback records'
def add_arguments(self, parser):
parser.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage')
parser.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file')
def handle(self, *args, **kwargs):
passage_source = kwargs['passage_source']
csv_input = kwargs['csv_input']
passage_text = self._retrieve_passage(passage_source)
with open(csv_input, 'r') as csv_in,\
open(f'filtered_{csv_input}', 'w') as csv_out:
reader = csv.reader(csv_in)
writer = csv.writer(csv_out)
for row in reader:
entry = row[0]
if PlagiarismFeedbackView._check_is_plagiarism(
passage_text,
entry
):
continue
writer.writerow(row)
def _retrieve_passage(self, passage_path):
with open(passage_path, 'r') as f:
return f.read()
|
|
60e31ba20a596346031396fcd34796dc96f9ffdf
|
kolibri/content/migrations/0009_auto_20180410_1139.py
|
kolibri/content/migrations/0009_auto_20180410_1139.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-10 18:39
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('content', '0008_contentnode_coach_content'),
]
operations = [
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150),
),
migrations.AlterField(
model_name='localfile',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40),
),
]
|
Add epub to file preset and localfile extension
|
Add epub to file preset and localfile extension
|
Python
|
mit
|
lyw07/kolibri,DXCanas/kolibri,mrpau/kolibri,benjaoming/kolibri,learningequality/kolibri,indirectlylit/kolibri,benjaoming/kolibri,learningequality/kolibri,jonboiser/kolibri,learningequality/kolibri,indirectlylit/kolibri,jonboiser/kolibri,indirectlylit/kolibri,mrpau/kolibri,DXCanas/kolibri,indirectlylit/kolibri,jonboiser/kolibri,mrpau/kolibri,jonboiser/kolibri,benjaoming/kolibri,mrpau/kolibri,lyw07/kolibri,lyw07/kolibri,benjaoming/kolibri,lyw07/kolibri,DXCanas/kolibri,DXCanas/kolibri,learningequality/kolibri
|
Add epub to file preset and localfile extension
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-10 18:39
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('content', '0008_contentnode_coach_content'),
]
operations = [
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150),
),
migrations.AlterField(
model_name='localfile',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40),
),
]
|
<commit_before><commit_msg>Add epub to file preset and localfile extension<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-10 18:39
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('content', '0008_contentnode_coach_content'),
]
operations = [
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150),
),
migrations.AlterField(
model_name='localfile',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40),
),
]
|
Add epub to file preset and localfile extension# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-10 18:39
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('content', '0008_contentnode_coach_content'),
]
operations = [
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150),
),
migrations.AlterField(
model_name='localfile',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40),
),
]
|
<commit_before><commit_msg>Add epub to file preset and localfile extension<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-10 18:39
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('content', '0008_contentnode_coach_content'),
]
operations = [
migrations.AlterField(
model_name='file',
name='preset',
field=models.CharField(blank=True, choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('epub', 'ePub Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150),
),
migrations.AlterField(
model_name='localfile',
name='extension',
field=models.CharField(blank=True, choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), ('png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip'), ('epub', 'ePub Document')], max_length=40),
),
]
|
|
0add6b09c268b87a9f7007f80934418cbdee6d2c
|
ci/generate_universe_resource.py
|
ci/generate_universe_resource.py
|
#!/usr/bin/env python3
# Usage example:
# ./generate_universe_resource.py dcos-core-cli 1.12-patch.2
import json
import sys
import hashlib as hash
import requests
plugin_name = sys.argv[1]
plugin_version = sys.argv[2]
resource = {
"cli": {
"binaries": {}
}
}
for platform in ['linux', 'darwin', 'windows']:
url = "https://downloads.dcos.io/cli/releases/plugins/{}/{}/x86-64/{}-{}.zip".format(
plugin_name, platform, plugin_name, plugin_version)
sha = hash.sha256()
r = requests.get(url, stream=True)
for chunk in r.iter_content(1024):
sha.update(chunk)
resource['cli']['binaries'][platform] = {
'x86-64': {
'kind': 'zip',
'url': url,
'contentHash': [
{
'algo': 'sha256',
'value': sha.hexdigest()
}
]
}
}
json.dump(resource, sys.stdout, indent=4)
|
Add a script to generate a universe resource
|
Add a script to generate a universe resource
The generate_universe_resource.py script can be used to generate
resource files for universe. It takes as argument a plugin name and a
version and uses them to download the plugins from their canonical URLs
and generate sha256 checksums accordingly.
https://jira.mesosphere.com/browse/DCOS_OSS-1797
|
Python
|
apache-2.0
|
kensipe/dcos-cli,kensipe/dcos-cli,dcos/dcos-cli,dcos/dcos-cli,dcos/dcos-cli,dcos/dcos-cli,dcos/dcos-cli,kensipe/dcos-cli,kensipe/dcos-cli,kensipe/dcos-cli
|
Add a script to generate a universe resource
The generate_universe_resource.py script can be used to generate
resource files for universe. It takes as argument a plugin name and a
version and uses them to download the plugins from their canonical URLs
and generate sha256 checksums accordingly.
https://jira.mesosphere.com/browse/DCOS_OSS-1797
|
#!/usr/bin/env python3
# Usage example:
# ./generate_universe_resource.py dcos-core-cli 1.12-patch.2
import json
import sys
import hashlib as hash
import requests
plugin_name = sys.argv[1]
plugin_version = sys.argv[2]
resource = {
"cli": {
"binaries": {}
}
}
for platform in ['linux', 'darwin', 'windows']:
url = "https://downloads.dcos.io/cli/releases/plugins/{}/{}/x86-64/{}-{}.zip".format(
plugin_name, platform, plugin_name, plugin_version)
sha = hash.sha256()
r = requests.get(url, stream=True)
for chunk in r.iter_content(1024):
sha.update(chunk)
resource['cli']['binaries'][platform] = {
'x86-64': {
'kind': 'zip',
'url': url,
'contentHash': [
{
'algo': 'sha256',
'value': sha.hexdigest()
}
]
}
}
json.dump(resource, sys.stdout, indent=4)
|
<commit_before><commit_msg>Add a script to generate a universe resource
The generate_universe_resource.py script can be used to generate
resource files for universe. It takes as argument a plugin name and a
version and uses them to download the plugins from their canonical URLs
and generate sha256 checksums accordingly.
https://jira.mesosphere.com/browse/DCOS_OSS-1797<commit_after>
|
#!/usr/bin/env python3
# Usage example:
# ./generate_universe_resource.py dcos-core-cli 1.12-patch.2
import json
import sys
import hashlib as hash
import requests
plugin_name = sys.argv[1]
plugin_version = sys.argv[2]
resource = {
"cli": {
"binaries": {}
}
}
for platform in ['linux', 'darwin', 'windows']:
url = "https://downloads.dcos.io/cli/releases/plugins/{}/{}/x86-64/{}-{}.zip".format(
plugin_name, platform, plugin_name, plugin_version)
sha = hash.sha256()
r = requests.get(url, stream=True)
for chunk in r.iter_content(1024):
sha.update(chunk)
resource['cli']['binaries'][platform] = {
'x86-64': {
'kind': 'zip',
'url': url,
'contentHash': [
{
'algo': 'sha256',
'value': sha.hexdigest()
}
]
}
}
json.dump(resource, sys.stdout, indent=4)
|
Add a script to generate a universe resource
The generate_universe_resource.py script can be used to generate
resource files for universe. It takes as argument a plugin name and a
version and uses them to download the plugins from their canonical URLs
and generate sha256 checksums accordingly.
https://jira.mesosphere.com/browse/DCOS_OSS-1797#!/usr/bin/env python3
# Usage example:
# ./generate_universe_resource.py dcos-core-cli 1.12-patch.2
import json
import sys
import hashlib as hash
import requests
plugin_name = sys.argv[1]
plugin_version = sys.argv[2]
resource = {
"cli": {
"binaries": {}
}
}
for platform in ['linux', 'darwin', 'windows']:
url = "https://downloads.dcos.io/cli/releases/plugins/{}/{}/x86-64/{}-{}.zip".format(
plugin_name, platform, plugin_name, plugin_version)
sha = hash.sha256()
r = requests.get(url, stream=True)
for chunk in r.iter_content(1024):
sha.update(chunk)
resource['cli']['binaries'][platform] = {
'x86-64': {
'kind': 'zip',
'url': url,
'contentHash': [
{
'algo': 'sha256',
'value': sha.hexdigest()
}
]
}
}
json.dump(resource, sys.stdout, indent=4)
|
<commit_before><commit_msg>Add a script to generate a universe resource
The generate_universe_resource.py script can be used to generate
resource files for universe. It takes as argument a plugin name and a
version and uses them to download the plugins from their canonical URLs
and generate sha256 checksums accordingly.
https://jira.mesosphere.com/browse/DCOS_OSS-1797<commit_after>#!/usr/bin/env python3
# Usage example:
# ./generate_universe_resource.py dcos-core-cli 1.12-patch.2
import json
import sys
import hashlib as hash
import requests
plugin_name = sys.argv[1]
plugin_version = sys.argv[2]
resource = {
"cli": {
"binaries": {}
}
}
for platform in ['linux', 'darwin', 'windows']:
url = "https://downloads.dcos.io/cli/releases/plugins/{}/{}/x86-64/{}-{}.zip".format(
plugin_name, platform, plugin_name, plugin_version)
sha = hash.sha256()
r = requests.get(url, stream=True)
for chunk in r.iter_content(1024):
sha.update(chunk)
resource['cli']['binaries'][platform] = {
'x86-64': {
'kind': 'zip',
'url': url,
'contentHash': [
{
'algo': 'sha256',
'value': sha.hexdigest()
}
]
}
}
json.dump(resource, sys.stdout, indent=4)
|
|
5b1a84a73abc6fc95d453cfbb78cf58bfc9c8310
|
setup.py
|
setup.py
|
from __future__ import print_function
from os import sys, path
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-texturefeatures',
version='1.0.0',
author='Insight Software Consortium',
author_email='community@itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTextureFeatures',
description=r'An ITK module to compute N-dimension grayscale texture feature images',
long_description='ITK is an open-source, cross-platform library that '
'provides developers with an extensive suite of software '
'tools for image analysis. Developed through extreme '
'programming methodologies, ITK employs leading-edge '
'algorithms for registering and segmenting '
'multidimensional scientific images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit glcm texture features image imaging',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
|
Add initial Python package configuration
|
ENH: Add initial Python package configuration
|
Python
|
apache-2.0
|
InsightSoftwareConsortium/ITKTextureFeatures,InsightSoftwareConsortium/ITKTextureFeatures
|
ENH: Add initial Python package configuration
|
from __future__ import print_function
from os import sys, path
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-texturefeatures',
version='1.0.0',
author='Insight Software Consortium',
author_email='community@itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTextureFeatures',
description=r'An ITK module to compute N-dimension grayscale texture feature images',
long_description='ITK is an open-source, cross-platform library that '
'provides developers with an extensive suite of software '
'tools for image analysis. Developed through extreme '
'programming methodologies, ITK employs leading-edge '
'algorithms for registering and segmenting '
'multidimensional scientific images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit glcm texture features image imaging',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
|
<commit_before><commit_msg>ENH: Add initial Python package configuration<commit_after>
|
from __future__ import print_function
from os import sys, path
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-texturefeatures',
version='1.0.0',
author='Insight Software Consortium',
author_email='community@itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTextureFeatures',
description=r'An ITK module to compute N-dimension grayscale texture feature images',
long_description='ITK is an open-source, cross-platform library that '
'provides developers with an extensive suite of software '
'tools for image analysis. Developed through extreme '
'programming methodologies, ITK employs leading-edge '
'algorithms for registering and segmenting '
'multidimensional scientific images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit glcm texture features image imaging',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
|
ENH: Add initial Python package configurationfrom __future__ import print_function
from os import sys, path
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-texturefeatures',
version='1.0.0',
author='Insight Software Consortium',
author_email='community@itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTextureFeatures',
description=r'An ITK module to compute N-dimension grayscale texture feature images',
long_description='ITK is an open-source, cross-platform library that '
'provides developers with an extensive suite of software '
'tools for image analysis. Developed through extreme '
'programming methodologies, ITK employs leading-edge '
'algorithms for registering and segmenting '
'multidimensional scientific images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit glcm texture features image imaging',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
|
<commit_before><commit_msg>ENH: Add initial Python package configuration<commit_after>from __future__ import print_function
from os import sys, path
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-texturefeatures',
version='1.0.0',
author='Insight Software Consortium',
author_email='community@itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTextureFeatures',
description=r'An ITK module to compute N-dimension grayscale texture feature images',
long_description='ITK is an open-source, cross-platform library that '
'provides developers with an extensive suite of software '
'tools for image analysis. Developed through extreme '
'programming methodologies, ITK employs leading-edge '
'algorithms for registering and segmenting '
'multidimensional scientific images.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit glcm texture features image imaging',
url=r'https://itk.org/',
install_requires=[
r'itk'
]
)
|
|
3bf5f09f61cfe3e1bd4d8c736a014f05c1bd940e
|
tests/test_sqlcache.py
|
tests/test_sqlcache.py
|
from botbot import sqlcache
import os
from itertools import combinations
from string import ascii_letters
def get_dbpath():
return os.path.join('.', 'test.db')
def test_FileDatabase_constructor(tmpdir):
prev = tmpdir.chdir()
f = sqlcache.FileDatabase(get_dbpath())
assert f
prev.chdir()
def test_bidirectional_problem_serialization():
for probs in combinations(ascii_letters, 3):
fi = {'problems': set(probs)}
sqlcache.serialize_problems(fi)
sqlcache.decode_problems(fi)
assert fi['problems'] == set(probs)
def test_db_finders(tmpdir):
prev = tmpdir.chdir()
tmp = sqlcache.get_dbpath
sqlcache.get_dbpath = get_dbpath
assert not sqlcache.db_exists()
tmpdir.join(get_dbpath()).ensure(file=True)
assert sqlcache.db_exists()
sqlcache.get_dbpath = tmp
prev.chdir()
|
Add new tests for sqlcache.py
|
Add new tests for sqlcache.py
|
Python
|
mit
|
jackstanek/BotBot,jackstanek/BotBot
|
Add new tests for sqlcache.py
|
from botbot import sqlcache
import os
from itertools import combinations
from string import ascii_letters
def get_dbpath():
return os.path.join('.', 'test.db')
def test_FileDatabase_constructor(tmpdir):
prev = tmpdir.chdir()
f = sqlcache.FileDatabase(get_dbpath())
assert f
prev.chdir()
def test_bidirectional_problem_serialization():
for probs in combinations(ascii_letters, 3):
fi = {'problems': set(probs)}
sqlcache.serialize_problems(fi)
sqlcache.decode_problems(fi)
assert fi['problems'] == set(probs)
def test_db_finders(tmpdir):
prev = tmpdir.chdir()
tmp = sqlcache.get_dbpath
sqlcache.get_dbpath = get_dbpath
assert not sqlcache.db_exists()
tmpdir.join(get_dbpath()).ensure(file=True)
assert sqlcache.db_exists()
sqlcache.get_dbpath = tmp
prev.chdir()
|
<commit_before><commit_msg>Add new tests for sqlcache.py<commit_after>
|
from botbot import sqlcache
import os
from itertools import combinations
from string import ascii_letters
def get_dbpath():
return os.path.join('.', 'test.db')
def test_FileDatabase_constructor(tmpdir):
prev = tmpdir.chdir()
f = sqlcache.FileDatabase(get_dbpath())
assert f
prev.chdir()
def test_bidirectional_problem_serialization():
for probs in combinations(ascii_letters, 3):
fi = {'problems': set(probs)}
sqlcache.serialize_problems(fi)
sqlcache.decode_problems(fi)
assert fi['problems'] == set(probs)
def test_db_finders(tmpdir):
prev = tmpdir.chdir()
tmp = sqlcache.get_dbpath
sqlcache.get_dbpath = get_dbpath
assert not sqlcache.db_exists()
tmpdir.join(get_dbpath()).ensure(file=True)
assert sqlcache.db_exists()
sqlcache.get_dbpath = tmp
prev.chdir()
|
Add new tests for sqlcache.pyfrom botbot import sqlcache
import os
from itertools import combinations
from string import ascii_letters
def get_dbpath():
return os.path.join('.', 'test.db')
def test_FileDatabase_constructor(tmpdir):
prev = tmpdir.chdir()
f = sqlcache.FileDatabase(get_dbpath())
assert f
prev.chdir()
def test_bidirectional_problem_serialization():
for probs in combinations(ascii_letters, 3):
fi = {'problems': set(probs)}
sqlcache.serialize_problems(fi)
sqlcache.decode_problems(fi)
assert fi['problems'] == set(probs)
def test_db_finders(tmpdir):
prev = tmpdir.chdir()
tmp = sqlcache.get_dbpath
sqlcache.get_dbpath = get_dbpath
assert not sqlcache.db_exists()
tmpdir.join(get_dbpath()).ensure(file=True)
assert sqlcache.db_exists()
sqlcache.get_dbpath = tmp
prev.chdir()
|
<commit_before><commit_msg>Add new tests for sqlcache.py<commit_after>from botbot import sqlcache
import os
from itertools import combinations
from string import ascii_letters
def get_dbpath():
return os.path.join('.', 'test.db')
def test_FileDatabase_constructor(tmpdir):
prev = tmpdir.chdir()
f = sqlcache.FileDatabase(get_dbpath())
assert f
prev.chdir()
def test_bidirectional_problem_serialization():
for probs in combinations(ascii_letters, 3):
fi = {'problems': set(probs)}
sqlcache.serialize_problems(fi)
sqlcache.decode_problems(fi)
assert fi['problems'] == set(probs)
def test_db_finders(tmpdir):
prev = tmpdir.chdir()
tmp = sqlcache.get_dbpath
sqlcache.get_dbpath = get_dbpath
assert not sqlcache.db_exists()
tmpdir.join(get_dbpath()).ensure(file=True)
assert sqlcache.db_exists()
sqlcache.get_dbpath = tmp
prev.chdir()
|
|
269a40987bcda02109ea9b53157056c43ff6ab58
|
demo_zinnia_ckeditor/urls.py
|
demo_zinnia_ckeditor/urls.py
|
"""Urls for the zinnia-ckeditor demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from django.views.generic.base import RedirectView
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', RedirectView.as_view(url='/blog/')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
|
Add URLs for demo project
|
Add URLs for demo project
|
Python
|
bsd-3-clause
|
django-blog-zinnia/zinnia-wysiwyg-ckeditor
|
Add URLs for demo project
|
"""Urls for the zinnia-ckeditor demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from django.views.generic.base import RedirectView
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', RedirectView.as_view(url='/blog/')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
|
<commit_before><commit_msg>Add URLs for demo project<commit_after>
|
"""Urls for the zinnia-ckeditor demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from django.views.generic.base import RedirectView
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', RedirectView.as_view(url='/blog/')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
|
Add URLs for demo project"""Urls for the zinnia-ckeditor demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from django.views.generic.base import RedirectView
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', RedirectView.as_view(url='/blog/')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
|
<commit_before><commit_msg>Add URLs for demo project<commit_after>"""Urls for the zinnia-ckeditor demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from django.views.generic.base import RedirectView
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', RedirectView.as_view(url='/blog/')),
url(r'^blog/', include('zinnia.urls', namespace='zinnia')),
url(r'^comments/', include('django_comments.urls')),
url(r'^ckeditor/', include('ckeditor.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
|
|
7ee31b444556a70fb1d6fca27545ad1fbc317347
|
paasta_tools/contrib/add_to_deploy_queue.py
|
paasta_tools/contrib/add_to_deploy_queue.py
|
#!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from kazoo.client import KazooClient
from service_configuration_lib import DEFAULT_SOA_DIR
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import validate_service_instance
def parse_args(default_bounce_by_delay_secs):
parser = argparse.ArgumentParser(
description="Add a service instance to the deploy queue",
)
parser.add_argument(
"--bounce-by-delay-secs",
help="Number of seconds to wait before considering this entry late. Default: %(default)s",
dest="bounce_by_delay_secs",
type=float,
default=default_bounce_by_delay_secs,
)
parser.add_argument(
"service_instance",
help="The service.instance to add to the deploy queue",
type=str,
)
return parser.parse_args()
def main():
system_paasta_config = load_system_paasta_config()
args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())
service, instance = args.service_instance.split(".", 1)
try:
validate_service_instance(
service,
instance,
cluster=system_paasta_config.get_cluster(),
soa_dir=DEFAULT_SOA_DIR,
)
except NoConfigurationForServiceError as e:
paasta_print(PaastaColors.red(str(e)))
sys.exit(1)
service_instance = ServiceInstance(
service=service,
instance=instance,
bounce_by=time.time() + args.bounce_by_delay_secs,
wait_until=time.time(),
watcher="manually_added",
failures=0,
enqueue_time=time.time(),
bounce_start_time=time.time(),
)
zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
queue.put(service_instance)
if __name__ == "__main__":
sys.exit(main())
|
Add script to add an arbitrary service instance to the deploy_queue
|
Add script to add an arbitrary service instance to the deploy_queue
|
Python
|
apache-2.0
|
Yelp/paasta,Yelp/paasta
|
Add script to add an arbitrary service instance to the deploy_queue
|
#!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from kazoo.client import KazooClient
from service_configuration_lib import DEFAULT_SOA_DIR
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import validate_service_instance
def parse_args(default_bounce_by_delay_secs):
parser = argparse.ArgumentParser(
description="Add a service instance to the deploy queue",
)
parser.add_argument(
"--bounce-by-delay-secs",
help="Number of seconds to wait before considering this entry late. Default: %(default)s",
dest="bounce_by_delay_secs",
type=float,
default=default_bounce_by_delay_secs,
)
parser.add_argument(
"service_instance",
help="The service.instance to add to the deploy queue",
type=str,
)
return parser.parse_args()
def main():
system_paasta_config = load_system_paasta_config()
args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())
service, instance = args.service_instance.split(".", 1)
try:
validate_service_instance(
service,
instance,
cluster=system_paasta_config.get_cluster(),
soa_dir=DEFAULT_SOA_DIR,
)
except NoConfigurationForServiceError as e:
paasta_print(PaastaColors.red(str(e)))
sys.exit(1)
service_instance = ServiceInstance(
service=service,
instance=instance,
bounce_by=time.time() + args.bounce_by_delay_secs,
wait_until=time.time(),
watcher="manually_added",
failures=0,
enqueue_time=time.time(),
bounce_start_time=time.time(),
)
zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
queue.put(service_instance)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to add an arbitrary service instance to the deploy_queue<commit_after>
|
#!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from kazoo.client import KazooClient
from service_configuration_lib import DEFAULT_SOA_DIR
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import validate_service_instance
def parse_args(default_bounce_by_delay_secs):
parser = argparse.ArgumentParser(
description="Add a service instance to the deploy queue",
)
parser.add_argument(
"--bounce-by-delay-secs",
help="Number of seconds to wait before considering this entry late. Default: %(default)s",
dest="bounce_by_delay_secs",
type=float,
default=default_bounce_by_delay_secs,
)
parser.add_argument(
"service_instance",
help="The service.instance to add to the deploy queue",
type=str,
)
return parser.parse_args()
def main():
system_paasta_config = load_system_paasta_config()
args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())
service, instance = args.service_instance.split(".", 1)
try:
validate_service_instance(
service,
instance,
cluster=system_paasta_config.get_cluster(),
soa_dir=DEFAULT_SOA_DIR,
)
except NoConfigurationForServiceError as e:
paasta_print(PaastaColors.red(str(e)))
sys.exit(1)
service_instance = ServiceInstance(
service=service,
instance=instance,
bounce_by=time.time() + args.bounce_by_delay_secs,
wait_until=time.time(),
watcher="manually_added",
failures=0,
enqueue_time=time.time(),
bounce_start_time=time.time(),
)
zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
queue.put(service_instance)
if __name__ == "__main__":
sys.exit(main())
|
Add script to add an arbitrary service instance to the deploy_queue#!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from kazoo.client import KazooClient
from service_configuration_lib import DEFAULT_SOA_DIR
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import validate_service_instance
def parse_args(default_bounce_by_delay_secs):
parser = argparse.ArgumentParser(
description="Add a service instance to the deploy queue",
)
parser.add_argument(
"--bounce-by-delay-secs",
help="Number of seconds to wait before considering this entry late. Default: %(default)s",
dest="bounce_by_delay_secs",
type=float,
default=default_bounce_by_delay_secs,
)
parser.add_argument(
"service_instance",
help="The service.instance to add to the deploy queue",
type=str,
)
return parser.parse_args()
def main():
system_paasta_config = load_system_paasta_config()
args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())
service, instance = args.service_instance.split(".", 1)
try:
validate_service_instance(
service,
instance,
cluster=system_paasta_config.get_cluster(),
soa_dir=DEFAULT_SOA_DIR,
)
except NoConfigurationForServiceError as e:
paasta_print(PaastaColors.red(str(e)))
sys.exit(1)
service_instance = ServiceInstance(
service=service,
instance=instance,
bounce_by=time.time() + args.bounce_by_delay_secs,
wait_until=time.time(),
watcher="manually_added",
failures=0,
enqueue_time=time.time(),
bounce_start_time=time.time(),
)
zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
queue.put(service_instance)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to add an arbitrary service instance to the deploy_queue<commit_after>#!/usr/bin/env python
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from kazoo.client import KazooClient
from service_configuration_lib import DEFAULT_SOA_DIR
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import validate_service_instance
def parse_args(default_bounce_by_delay_secs):
parser = argparse.ArgumentParser(
description="Add a service instance to the deploy queue",
)
parser.add_argument(
"--bounce-by-delay-secs",
help="Number of seconds to wait before considering this entry late. Default: %(default)s",
dest="bounce_by_delay_secs",
type=float,
default=default_bounce_by_delay_secs,
)
parser.add_argument(
"service_instance",
help="The service.instance to add to the deploy queue",
type=str,
)
return parser.parse_args()
def main():
system_paasta_config = load_system_paasta_config()
args = parse_args(system_paasta_config.get_deployd_startup_bounce_deadline())
service, instance = args.service_instance.split(".", 1)
try:
validate_service_instance(
service,
instance,
cluster=system_paasta_config.get_cluster(),
soa_dir=DEFAULT_SOA_DIR,
)
except NoConfigurationForServiceError as e:
paasta_print(PaastaColors.red(str(e)))
sys.exit(1)
service_instance = ServiceInstance(
service=service,
instance=instance,
bounce_by=time.time() + args.bounce_by_delay_secs,
wait_until=time.time(),
watcher="manually_added",
failures=0,
enqueue_time=time.time(),
bounce_start_time=time.time(),
)
zk_client = KazooClient(hosts=system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
queue.put(service_instance)
if __name__ == "__main__":
sys.exit(main())
|
|
d2ff3f0905ded1dcbffd7ebd9cada423d44767d6
|
py/4sum-ii.py
|
py/4sum-ii.py
|
from collections import Counter
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count1 = Counter()
for a in A:
for b in B:
count1[a + b] += 1
ans = 0
for c in C:
for d in D:
ans += count1[-(c + d)]
return ans
|
Add py solution for 454. 4Sum II
|
Add py solution for 454. 4Sum II
454. 4Sum II: https://leetcode.com/problems/4sum-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 454. 4Sum II
454. 4Sum II: https://leetcode.com/problems/4sum-ii/
|
from collections import Counter
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count1 = Counter()
for a in A:
for b in B:
count1[a + b] += 1
ans = 0
for c in C:
for d in D:
ans += count1[-(c + d)]
return ans
|
<commit_before><commit_msg>Add py solution for 454. 4Sum II
454. 4Sum II: https://leetcode.com/problems/4sum-ii/<commit_after>
|
from collections import Counter
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count1 = Counter()
for a in A:
for b in B:
count1[a + b] += 1
ans = 0
for c in C:
for d in D:
ans += count1[-(c + d)]
return ans
|
Add py solution for 454. 4Sum II
454. 4Sum II: https://leetcode.com/problems/4sum-ii/from collections import Counter
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count1 = Counter()
for a in A:
for b in B:
count1[a + b] += 1
ans = 0
for c in C:
for d in D:
ans += count1[-(c + d)]
return ans
|
<commit_before><commit_msg>Add py solution for 454. 4Sum II
454. 4Sum II: https://leetcode.com/problems/4sum-ii/<commit_after>from collections import Counter
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
count1 = Counter()
for a in A:
for b in B:
count1[a + b] += 1
ans = 0
for c in C:
for d in D:
ans += count1[-(c + d)]
return ans
|
|
a807dcac9c69ec6769f233f9ea8be3dfd06f43c4
|
elections/kenya/data/update_csv.py
|
elections/kenya/data/update_csv.py
|
#!/usr/bin/env python
import requests
URLS = (
('2017_candidates_presidency.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/10RBG4fIluYn2jBgCRBBQ--6yHTtppYrB2ef-zpmVxhE/export?format=csv'),
('2017_candidates_senate.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1x3_otOE376QFwfGO9vMC5qZxeIvGR3MR_UeflgLVLj8/export?format=csv'),
('2017_candidates_county_assemblies.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1ZWRN6XeN6dVhWqvdikDDeMp6aFM3zJVv9cmf80NZebY/export?format=csv'),
('2017_candidates_governors.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1RxXOwbHly8nv5-wVwnRSvNx5zYEs1Xvl0bPF1hfn_NI/export?format=csv'),
('2017_candidates_assembly.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1Ccj-yg_B92j5H9mUUCo6vaw1Zgjra0KoX9s5fzMDzJA/export?format=csv'),
('2017_candidates_wr.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1SPkbrnUbstmHWeIU0W3yxvOehhnj7GLHzcwWp2pGeXc/export?format=csv'),
)
for filename, url in URLS:
with open(filename, 'wb') as f:
f.write(requests.get(url).content)
|
Add a script for updating the CSV files
|
Add a script for updating the CSV files
|
Python
|
agpl-3.0
|
mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative
|
Add a script for updating the CSV files
|
#!/usr/bin/env python
import requests
URLS = (
('2017_candidates_presidency.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/10RBG4fIluYn2jBgCRBBQ--6yHTtppYrB2ef-zpmVxhE/export?format=csv'),
('2017_candidates_senate.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1x3_otOE376QFwfGO9vMC5qZxeIvGR3MR_UeflgLVLj8/export?format=csv'),
('2017_candidates_county_assemblies.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1ZWRN6XeN6dVhWqvdikDDeMp6aFM3zJVv9cmf80NZebY/export?format=csv'),
('2017_candidates_governors.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1RxXOwbHly8nv5-wVwnRSvNx5zYEs1Xvl0bPF1hfn_NI/export?format=csv'),
('2017_candidates_assembly.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1Ccj-yg_B92j5H9mUUCo6vaw1Zgjra0KoX9s5fzMDzJA/export?format=csv'),
('2017_candidates_wr.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1SPkbrnUbstmHWeIU0W3yxvOehhnj7GLHzcwWp2pGeXc/export?format=csv'),
)
for filename, url in URLS:
with open(filename, 'wb') as f:
f.write(requests.get(url).content)
|
<commit_before><commit_msg>Add a script for updating the CSV files<commit_after>
|
#!/usr/bin/env python
import requests
URLS = (
('2017_candidates_presidency.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/10RBG4fIluYn2jBgCRBBQ--6yHTtppYrB2ef-zpmVxhE/export?format=csv'),
('2017_candidates_senate.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1x3_otOE376QFwfGO9vMC5qZxeIvGR3MR_UeflgLVLj8/export?format=csv'),
('2017_candidates_county_assemblies.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1ZWRN6XeN6dVhWqvdikDDeMp6aFM3zJVv9cmf80NZebY/export?format=csv'),
('2017_candidates_governors.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1RxXOwbHly8nv5-wVwnRSvNx5zYEs1Xvl0bPF1hfn_NI/export?format=csv'),
('2017_candidates_assembly.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1Ccj-yg_B92j5H9mUUCo6vaw1Zgjra0KoX9s5fzMDzJA/export?format=csv'),
('2017_candidates_wr.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1SPkbrnUbstmHWeIU0W3yxvOehhnj7GLHzcwWp2pGeXc/export?format=csv'),
)
for filename, url in URLS:
with open(filename, 'wb') as f:
f.write(requests.get(url).content)
|
Add a script for updating the CSV files#!/usr/bin/env python
import requests
URLS = (
('2017_candidates_presidency.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/10RBG4fIluYn2jBgCRBBQ--6yHTtppYrB2ef-zpmVxhE/export?format=csv'),
('2017_candidates_senate.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1x3_otOE376QFwfGO9vMC5qZxeIvGR3MR_UeflgLVLj8/export?format=csv'),
('2017_candidates_county_assemblies.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1ZWRN6XeN6dVhWqvdikDDeMp6aFM3zJVv9cmf80NZebY/export?format=csv'),
('2017_candidates_governors.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1RxXOwbHly8nv5-wVwnRSvNx5zYEs1Xvl0bPF1hfn_NI/export?format=csv'),
('2017_candidates_assembly.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1Ccj-yg_B92j5H9mUUCo6vaw1Zgjra0KoX9s5fzMDzJA/export?format=csv'),
('2017_candidates_wr.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1SPkbrnUbstmHWeIU0W3yxvOehhnj7GLHzcwWp2pGeXc/export?format=csv'),
)
for filename, url in URLS:
with open(filename, 'wb') as f:
f.write(requests.get(url).content)
|
<commit_before><commit_msg>Add a script for updating the CSV files<commit_after>#!/usr/bin/env python
import requests
URLS = (
('2017_candidates_presidency.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/10RBG4fIluYn2jBgCRBBQ--6yHTtppYrB2ef-zpmVxhE/export?format=csv'),
('2017_candidates_senate.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1x3_otOE376QFwfGO9vMC5qZxeIvGR3MR_UeflgLVLj8/export?format=csv'),
('2017_candidates_county_assemblies.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1ZWRN6XeN6dVhWqvdikDDeMp6aFM3zJVv9cmf80NZebY/export?format=csv'),
('2017_candidates_governors.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1RxXOwbHly8nv5-wVwnRSvNx5zYEs1Xvl0bPF1hfn_NI/export?format=csv'),
('2017_candidates_assembly.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1Ccj-yg_B92j5H9mUUCo6vaw1Zgjra0KoX9s5fzMDzJA/export?format=csv'),
('2017_candidates_wr.csv',
'https://docs.google.com/a/mysociety.org/spreadsheets/d/1SPkbrnUbstmHWeIU0W3yxvOehhnj7GLHzcwWp2pGeXc/export?format=csv'),
)
for filename, url in URLS:
with open(filename, 'wb') as f:
f.write(requests.get(url).content)
|
|
2d1dcb334a8fec45f3abe11c13d1649a18e3033d
|
shuup/core/migrations/0087_fix_attribute_migration.py
|
shuup/core/migrations/0087_fix_attribute_migration.py
|
# Generated by Django 2.2.19 on 2021-05-03 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuup', '0086_attribute_choices'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='max_choices',
field=models.PositiveIntegerField(default=1, help_text='Maximum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Maximum amount of choices'),
),
migrations.AlterField(
model_name='attribute',
name='min_choices',
field=models.PositiveIntegerField(default=0, help_text='Minimum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Minimum amount of choices'),
),
]
|
Make sure there is no pending migrations
|
Make sure there is no pending migrations
|
Python
|
agpl-3.0
|
shoopio/shoop,shoopio/shoop,shoopio/shoop
|
Make sure there is no pending migrations
|
# Generated by Django 2.2.19 on 2021-05-03 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuup', '0086_attribute_choices'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='max_choices',
field=models.PositiveIntegerField(default=1, help_text='Maximum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Maximum amount of choices'),
),
migrations.AlterField(
model_name='attribute',
name='min_choices',
field=models.PositiveIntegerField(default=0, help_text='Minimum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Minimum amount of choices'),
),
]
|
<commit_before><commit_msg>Make sure there is no pending migrations<commit_after>
|
# Generated by Django 2.2.19 on 2021-05-03 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuup', '0086_attribute_choices'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='max_choices',
field=models.PositiveIntegerField(default=1, help_text='Maximum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Maximum amount of choices'),
),
migrations.AlterField(
model_name='attribute',
name='min_choices',
field=models.PositiveIntegerField(default=0, help_text='Minimum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Minimum amount of choices'),
),
]
|
Make sure there is no pending migrations# Generated by Django 2.2.19 on 2021-05-03 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuup', '0086_attribute_choices'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='max_choices',
field=models.PositiveIntegerField(default=1, help_text='Maximum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Maximum amount of choices'),
),
migrations.AlterField(
model_name='attribute',
name='min_choices',
field=models.PositiveIntegerField(default=0, help_text='Minimum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Minimum amount of choices'),
),
]
|
<commit_before><commit_msg>Make sure there is no pending migrations<commit_after># Generated by Django 2.2.19 on 2021-05-03 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shuup', '0086_attribute_choices'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='max_choices',
field=models.PositiveIntegerField(default=1, help_text='Maximum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Maximum amount of choices'),
),
migrations.AlterField(
model_name='attribute',
name='min_choices',
field=models.PositiveIntegerField(default=0, help_text='Minimum amount of choices that user can choose from existing options. This field has affect only for choices type.', verbose_name='Minimum amount of choices'),
),
]
|
|
ee0cda1dea775e58e5416b57fc0d96114f075128
|
tests/unit/test_repr.py
|
tests/unit/test_repr.py
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
Test the repr() of object
|
Test the repr() of object
|
Python
|
bsd-3-clause
|
wdv4758h/butter,dasSOZO/python-butter
|
Test the repr() of object
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test the repr() of object<commit_after>
|
from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
Test the repr() of objectfrom butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test the repr() of object<commit_after>from butter.eventfd import Eventfd
from butter.fanotify import Fanotify
from butter.inotify import Inotify
from butter.signalfd import Signalfd
from butter.timerfd import Timerfd
import pytest
@pytest.fixture(params=[Eventfd, Fanotify, Inotify, Signalfd, Timerfd])
def obj(request):
Obj = request.param
o = Obj.__new__(Obj)
return o
@pytest.mark.repr
@pytest.mark.unit
def test_repr_name(obj):
obj._fd = 1
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd(obj):
obj._fd = 1
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.repr
@pytest.mark.unit
def test_repr_fd_closed(obj):
obj._fd = None
assert 'fd=closed' in repr(obj), "Instance does not indicate it is closed"
|
|
28af24002e410c27d5d09c301bd8b8b6bf4683dd
|
meanrecipes/sources.py
|
meanrecipes/sources.py
|
#!/usr/bin/env python3
import random
class Recipe:
'''
Represents an individual recipe, made up of the following attributes:
* title
A string containing a human-readable title or name for the recipe.
* ingredients
An iterable of tuples (quantity, unit, name) describing the ingredients
used in the recipe. Here, quantity is a float representing the amount
of the ingredient used when the recipe is adjusted to serve one person.
The values 'unit' and 'name' are strings.
* method
A list of strings describing the steps in the recipe.
'''
def __init__(self, title, ingredients, method):
self.title = title
self.ingredients = ingredients
self.method = method
def __repr__(self):
return '<Recipe "%s">' % self.title
class RecipeSource:
'''
Represents a source of raw recipes, e.g. a particular recipe website.
'''
def search(self, term):
'''
Search the given source for recipes with the given keyword, returning
an iterator over resulting Recipe objects.
'''
raise NotImplemented()
class RandomRecipeSource(RecipeSource):
'''
A recipe source which creates random gibberish recipes, for testing
purposes.
'''
maximum_recipes = 10
def search(self, term):
# First, gather a set of random words
chance = 0.001
with open('/usr/share/dict/words') as wordlist:
words = [line.strip() for line in wordlist if random.uniform(0, 1) <= chance]
pick = lambda n: ' '.join(random.choice(words) for i in range(n))
# Then put them together to make a recipe
for i in range(random.randrange(self.maximum_recipes)):
title = pick(4)
n = random.randint(1, 10)
ingredients = [(random.uniform(0.1, 1000.0), 'g', pick(1)) for _ in range(n)]
method = [pick(30) for _ in range(n)]
yield Recipe(title, ingredients, method)
|
Add recipe & source base classes and fake recipe generator
|
Add recipe & source base classes and fake recipe generator
|
Python
|
bsd-2-clause
|
kkelk/MeanRecipes,kkelk/MeanRecipes,kkelk/MeanRecipes,kkelk/MeanRecipes
|
Add recipe & source base classes and fake recipe generator
|
#!/usr/bin/env python3
import random
class Recipe:
'''
Represents an individual recipe, made up of the following attributes:
* title
A string containing a human-readable title or name for the recipe.
* ingredients
An iterable of tuples (quantity, unit, name) describing the ingredients
used in the recipe. Here, quantity is a float representing the amount
of the ingredient used when the recipe is adjusted to serve one person.
The values 'unit' and 'name' are strings.
* method
A list of strings describing the steps in the recipe.
'''
def __init__(self, title, ingredients, method):
self.title = title
self.ingredients = ingredients
self.method = method
def __repr__(self):
return '<Recipe "%s">' % self.title
class RecipeSource:
'''
Represents a source of raw recipes, e.g. a particular recipe website.
'''
def search(self, term):
'''
Search the given source for recipes with the given keyword, returning
an iterator over resulting Recipe objects.
'''
raise NotImplemented()
class RandomRecipeSource(RecipeSource):
'''
A recipe source which creates random gibberish recipes, for testing
purposes.
'''
maximum_recipes = 10
def search(self, term):
# First, gather a set of random words
chance = 0.001
with open('/usr/share/dict/words') as wordlist:
words = [line.strip() for line in wordlist if random.uniform(0, 1) <= chance]
pick = lambda n: ' '.join(random.choice(words) for i in range(n))
# Then put them together to make a recipe
for i in range(random.randrange(self.maximum_recipes)):
title = pick(4)
n = random.randint(1, 10)
ingredients = [(random.uniform(0.1, 1000.0), 'g', pick(1)) for _ in range(n)]
method = [pick(30) for _ in range(n)]
yield Recipe(title, ingredients, method)
|
<commit_before><commit_msg>Add recipe & source base classes and fake recipe generator<commit_after>
|
#!/usr/bin/env python3
import random
class Recipe:
'''
Represents an individual recipe, made up of the following attributes:
* title
A string containing a human-readable title or name for the recipe.
* ingredients
An iterable of tuples (quantity, unit, name) describing the ingredients
used in the recipe. Here, quantity is a float representing the amount
of the ingredient used when the recipe is adjusted to serve one person.
The values 'unit' and 'name' are strings.
* method
A list of strings describing the steps in the recipe.
'''
def __init__(self, title, ingredients, method):
self.title = title
self.ingredients = ingredients
self.method = method
def __repr__(self):
return '<Recipe "%s">' % self.title
class RecipeSource:
'''
Represents a source of raw recipes, e.g. a particular recipe website.
'''
def search(self, term):
'''
Search the given source for recipes with the given keyword, returning
an iterator over resulting Recipe objects.
'''
raise NotImplemented()
class RandomRecipeSource(RecipeSource):
'''
A recipe source which creates random gibberish recipes, for testing
purposes.
'''
maximum_recipes = 10
def search(self, term):
# First, gather a set of random words
chance = 0.001
with open('/usr/share/dict/words') as wordlist:
words = [line.strip() for line in wordlist if random.uniform(0, 1) <= chance]
pick = lambda n: ' '.join(random.choice(words) for i in range(n))
# Then put them together to make a recipe
for i in range(random.randrange(self.maximum_recipes)):
title = pick(4)
n = random.randint(1, 10)
ingredients = [(random.uniform(0.1, 1000.0), 'g', pick(1)) for _ in range(n)]
method = [pick(30) for _ in range(n)]
yield Recipe(title, ingredients, method)
|
Add recipe & source base classes and fake recipe generator#!/usr/bin/env python3
import random
class Recipe:
'''
Represents an individual recipe, made up of the following attributes:
* title
A string containing a human-readable title or name for the recipe.
* ingredients
An iterable of tuples (quantity, unit, name) describing the ingredients
used in the recipe. Here, quantity is a float representing the amount
of the ingredient used when the recipe is adjusted to serve one person.
The values 'unit' and 'name' are strings.
* method
A list of strings describing the steps in the recipe.
'''
def __init__(self, title, ingredients, method):
self.title = title
self.ingredients = ingredients
self.method = method
def __repr__(self):
return '<Recipe "%s">' % self.title
class RecipeSource:
'''
Represents a source of raw recipes, e.g. a particular recipe website.
'''
def search(self, term):
'''
Search the given source for recipes with the given keyword, returning
an iterator over resulting Recipe objects.
'''
raise NotImplemented()
class RandomRecipeSource(RecipeSource):
'''
A recipe source which creates random gibberish recipes, for testing
purposes.
'''
maximum_recipes = 10
def search(self, term):
# First, gather a set of random words
chance = 0.001
with open('/usr/share/dict/words') as wordlist:
words = [line.strip() for line in wordlist if random.uniform(0, 1) <= chance]
pick = lambda n: ' '.join(random.choice(words) for i in range(n))
# Then put them together to make a recipe
for i in range(random.randrange(self.maximum_recipes)):
title = pick(4)
n = random.randint(1, 10)
ingredients = [(random.uniform(0.1, 1000.0), 'g', pick(1)) for _ in range(n)]
method = [pick(30) for _ in range(n)]
yield Recipe(title, ingredients, method)
|
<commit_before><commit_msg>Add recipe & source base classes and fake recipe generator<commit_after>#!/usr/bin/env python3
import random
class Recipe:
'''
Represents an individual recipe, made up of the following attributes:
* title
A string containing a human-readable title or name for the recipe.
* ingredients
An iterable of tuples (quantity, unit, name) describing the ingredients
used in the recipe. Here, quantity is a float representing the amount
of the ingredient used when the recipe is adjusted to serve one person.
The values 'unit' and 'name' are strings.
* method
A list of strings describing the steps in the recipe.
'''
def __init__(self, title, ingredients, method):
self.title = title
self.ingredients = ingredients
self.method = method
def __repr__(self):
return '<Recipe "%s">' % self.title
class RecipeSource:
'''
Represents a source of raw recipes, e.g. a particular recipe website.
'''
def search(self, term):
'''
Search the given source for recipes with the given keyword, returning
an iterator over resulting Recipe objects.
'''
raise NotImplemented()
class RandomRecipeSource(RecipeSource):
'''
A recipe source which creates random gibberish recipes, for testing
purposes.
'''
maximum_recipes = 10
def search(self, term):
# First, gather a set of random words
chance = 0.001
with open('/usr/share/dict/words') as wordlist:
words = [line.strip() for line in wordlist if random.uniform(0, 1) <= chance]
pick = lambda n: ' '.join(random.choice(words) for i in range(n))
# Then put them together to make a recipe
for i in range(random.randrange(self.maximum_recipes)):
title = pick(4)
n = random.randint(1, 10)
ingredients = [(random.uniform(0.1, 1000.0), 'g', pick(1)) for _ in range(n)]
method = [pick(30) for _ in range(n)]
yield Recipe(title, ingredients, method)
|
|
92171a1bbccdfac0fd962f046f72faa681244c98
|
src/txkube/test/_compat.py
|
src/txkube/test/_compat.py
|
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Helpers for Python 2/3 compatibility.
"""
from twisted.python.compat import _PY3
def encode_environ(env):
"""
Convert a ``dict`` of ``unicode`` keys and values to ``bytes`` on Python 2,
but return the ``dict`` unmodified on Python 3.
"""
if _PY3:
return env
else:
bytes_env = {}
for key in env:
bytes_env[key.encode("ascii")] = env[key].encode("ascii")
return bytes_env
|
Add helper for Python 2/3 compatibility.
|
Add helper for Python 2/3 compatibility.
|
Python
|
mit
|
LeastAuthority/txkube
|
Add helper for Python 2/3 compatibility.
|
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Helpers for Python 2/3 compatibility.
"""
from twisted.python.compat import _PY3
def encode_environ(env):
"""
Convert a ``dict`` of ``unicode`` keys and values to ``bytes`` on Python 2,
but return the ``dict`` unmodified on Python 3.
"""
if _PY3:
return env
else:
bytes_env = {}
for key in env:
bytes_env[key.encode("ascii")] = env[key].encode("ascii")
return bytes_env
|
<commit_before><commit_msg>Add helper for Python 2/3 compatibility.<commit_after>
|
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Helpers for Python 2/3 compatibility.
"""
from twisted.python.compat import _PY3
def encode_environ(env):
"""
Convert a ``dict`` of ``unicode`` keys and values to ``bytes`` on Python 2,
but return the ``dict`` unmodified on Python 3.
"""
if _PY3:
return env
else:
bytes_env = {}
for key in env:
bytes_env[key.encode("ascii")] = env[key].encode("ascii")
return bytes_env
|
Add helper for Python 2/3 compatibility.# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Helpers for Python 2/3 compatibility.
"""
from twisted.python.compat import _PY3
def encode_environ(env):
"""
Convert a ``dict`` of ``unicode`` keys and values to ``bytes`` on Python 2,
but return the ``dict`` unmodified on Python 3.
"""
if _PY3:
return env
else:
bytes_env = {}
for key in env:
bytes_env[key.encode("ascii")] = env[key].encode("ascii")
return bytes_env
|
<commit_before><commit_msg>Add helper for Python 2/3 compatibility.<commit_after># Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
Helpers for Python 2/3 compatibility.
"""
from twisted.python.compat import _PY3
def encode_environ(env):
"""
Convert a ``dict`` of ``unicode`` keys and values to ``bytes`` on Python 2,
but return the ``dict`` unmodified on Python 3.
"""
if _PY3:
return env
else:
bytes_env = {}
for key in env:
bytes_env[key.encode("ascii")] = env[key].encode("ascii")
return bytes_env
|
|
70e55da36982217e5bcd983128a984be1aae84ab
|
util/timeline_adjust.py
|
util/timeline_adjust.py
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import re
time_re = re.compile(r"^\s*#?\s*([0-9]+(?:\.[0-9]+)?)\s+\"")
first_num_re = re.compile(r"([0-9]+(?:\.[0-9]+)?)")
def adjust_lines(lines, adjust):
for line in lines:
match = re.match(time_re, line)
if match:
time = float(match.group(1)) + adjust
print(re.sub(first_num_re, str(time), line, 1), end='')
else:
print(line, end='')
def main():
parser = argparse.ArgumentParser(
description="A utility to uniformly adjust times in an act timeline file")
parser.add_argument('--file', required=True, type=argparse.FileType('r', 0),
help="The timeline file to adjust times in")
parser.add_argument('--adjust', required=True, type=float,
help="The amount of time to adjust each entry by")
args = parser.parse_args()
adjust_lines(args.file, args.adjust)
if __name__ == "__main__":
main()
|
Add python utility to adjust timelines
|
Add python utility to adjust timelines
It's kind of a pain to adjust all the times in a file or a section by a
particular amount, so this python file will do it for you.
|
Python
|
apache-2.0
|
quisquous/cactbot,quisquous/cactbot,sqt/cactbot,sqt/cactbot,quisquous/cactbot,sqt/cactbot,quisquous/cactbot,sqt/cactbot,quisquous/cactbot,quisquous/cactbot,sqt/cactbot
|
Add python utility to adjust timelines
It's kind of a pain to adjust all the times in a file or a section by a
particular amount, so this python file will do it for you.
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import re
time_re = re.compile(r"^\s*#?\s*([0-9]+(?:\.[0-9]+)?)\s+\"")
first_num_re = re.compile(r"([0-9]+(?:\.[0-9]+)?)")
def adjust_lines(lines, adjust):
for line in lines:
match = re.match(time_re, line)
if match:
time = float(match.group(1)) + adjust
print(re.sub(first_num_re, str(time), line, 1), end='')
else:
print(line, end='')
def main():
parser = argparse.ArgumentParser(
description="A utility to uniformly adjust times in an act timeline file")
parser.add_argument('--file', required=True, type=argparse.FileType('r', 0),
help="The timeline file to adjust times in")
parser.add_argument('--adjust', required=True, type=float,
help="The amount of time to adjust each entry by")
args = parser.parse_args()
adjust_lines(args.file, args.adjust)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add python utility to adjust timelines
It's kind of a pain to adjust all the times in a file or a section by a
particular amount, so this python file will do it for you.<commit_after>
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import re
time_re = re.compile(r"^\s*#?\s*([0-9]+(?:\.[0-9]+)?)\s+\"")
first_num_re = re.compile(r"([0-9]+(?:\.[0-9]+)?)")
def adjust_lines(lines, adjust):
for line in lines:
match = re.match(time_re, line)
if match:
time = float(match.group(1)) + adjust
print(re.sub(first_num_re, str(time), line, 1), end='')
else:
print(line, end='')
def main():
parser = argparse.ArgumentParser(
description="A utility to uniformly adjust times in an act timeline file")
parser.add_argument('--file', required=True, type=argparse.FileType('r', 0),
help="The timeline file to adjust times in")
parser.add_argument('--adjust', required=True, type=float,
help="The amount of time to adjust each entry by")
args = parser.parse_args()
adjust_lines(args.file, args.adjust)
if __name__ == "__main__":
main()
|
Add python utility to adjust timelines
It's kind of a pain to adjust all the times in a file or a section by a
particular amount, so this python file will do it for you.#!/usr/bin/python
from __future__ import print_function
import argparse
import re
time_re = re.compile(r"^\s*#?\s*([0-9]+(?:\.[0-9]+)?)\s+\"")
first_num_re = re.compile(r"([0-9]+(?:\.[0-9]+)?)")
def adjust_lines(lines, adjust):
for line in lines:
match = re.match(time_re, line)
if match:
time = float(match.group(1)) + adjust
print(re.sub(first_num_re, str(time), line, 1), end='')
else:
print(line, end='')
def main():
parser = argparse.ArgumentParser(
description="A utility to uniformly adjust times in an act timeline file")
parser.add_argument('--file', required=True, type=argparse.FileType('r', 0),
help="The timeline file to adjust times in")
parser.add_argument('--adjust', required=True, type=float,
help="The amount of time to adjust each entry by")
args = parser.parse_args()
adjust_lines(args.file, args.adjust)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add python utility to adjust timelines
It's kind of a pain to adjust all the times in a file or a section by a
particular amount, so this python file will do it for you.<commit_after>#!/usr/bin/python
from __future__ import print_function
import argparse
import re
time_re = re.compile(r"^\s*#?\s*([0-9]+(?:\.[0-9]+)?)\s+\"")
first_num_re = re.compile(r"([0-9]+(?:\.[0-9]+)?)")
def adjust_lines(lines, adjust):
for line in lines:
match = re.match(time_re, line)
if match:
time = float(match.group(1)) + adjust
print(re.sub(first_num_re, str(time), line, 1), end='')
else:
print(line, end='')
def main():
parser = argparse.ArgumentParser(
description="A utility to uniformly adjust times in an act timeline file")
parser.add_argument('--file', required=True, type=argparse.FileType('r', 0),
help="The timeline file to adjust times in")
parser.add_argument('--adjust', required=True, type=float,
help="The amount of time to adjust each entry by")
args = parser.parse_args()
adjust_lines(args.file, args.adjust)
if __name__ == "__main__":
main()
|
|
804086ef7fe50d8b710406dd0efb614733779912
|
iati/core/tests/test_validate.py
|
iati/core/tests/test_validate.py
|
"""A module containing tests for the library representation of validation."""
valid_xml = """
<?xml version="1.0"?>
<iati-activities version="2.02">
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
invalid_xml = """
<?xml version="1.0"?>
<iati-activities version="200.02"><!-- Invalid Version -->
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
|
Add valid and invalid xml Invalid XML is currently only invalid due to an incorrect version number.
|
Add valid and invalid xml
Invalid XML is currently only invalid due to an incorrect version
number.
|
Python
|
mit
|
IATI/iati.core,IATI/iati.core
|
Add valid and invalid xml
Invalid XML is currently only invalid due to an incorrect version
number.
|
"""A module containing tests for the library representation of validation."""
valid_xml = """
<?xml version="1.0"?>
<iati-activities version="2.02">
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
invalid_xml = """
<?xml version="1.0"?>
<iati-activities version="200.02"><!-- Invalid Version -->
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
|
<commit_before><commit_msg>Add valid and invalid xml
Invalid XML is currently only invalid due to an incorrect version
number.<commit_after>
|
"""A module containing tests for the library representation of validation."""
valid_xml = """
<?xml version="1.0"?>
<iati-activities version="2.02">
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
invalid_xml = """
<?xml version="1.0"?>
<iati-activities version="200.02"><!-- Invalid Version -->
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
|
Add valid and invalid xml
Invalid XML is currently only invalid due to an incorrect version
number."""A module containing tests for the library representation of validation."""
valid_xml = """
<?xml version="1.0"?>
<iati-activities version="2.02">
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
invalid_xml = """
<?xml version="1.0"?>
<iati-activities version="200.02"><!-- Invalid Version -->
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
|
<commit_before><commit_msg>Add valid and invalid xml
Invalid XML is currently only invalid due to an incorrect version
number.<commit_after>"""A module containing tests for the library representation of validation."""
valid_xml = """
<?xml version="1.0"?>
<iati-activities version="2.02">
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
invalid_xml = """
<?xml version="1.0"?>
<iati-activities version="200.02"><!-- Invalid Version -->
<iati-activity>
<iati-identifier></iati-identifier>
<reporting-org type="40" ref="AA-AAA-123456789">
<narrative>Organisation name</narrative>
</reporting-org>
<title>
<narrative>Xxxxxxx</narrative>
</title>
<description>
<narrative>Xxxxxxx</narrative>
</description>
<participating-org role="2"></participating-org>
<activity-status code="2"/>
<activity-date type="1" iso-date="2023-11-27"/>
</iati-activity>
</iati-activities>
"""
|
|
979347a3af800229701d3a48ea2ca5e9aec709df
|
faq/migrations/0001_initial.py
|
faq/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
},
),
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.CharField(max_length=100)),
],
options={
'verbose_name': 'topic',
'verbose_name_plural': 'topics',
},
),
migrations.AddField(
model_name='questions',
name='topic',
field=models.ForeignKey(to='faq.Topics'),
),
]
|
Create migration for the models.
|
Create migration for the models.
|
Python
|
bsd-3-clause
|
donnywdavis/Django-faq-views,donnywdavis/Django-faq-views
|
Create migration for the models.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
},
),
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.CharField(max_length=100)),
],
options={
'verbose_name': 'topic',
'verbose_name_plural': 'topics',
},
),
migrations.AddField(
model_name='questions',
name='topic',
field=models.ForeignKey(to='faq.Topics'),
),
]
|
<commit_before><commit_msg>Create migration for the models.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
},
),
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.CharField(max_length=100)),
],
options={
'verbose_name': 'topic',
'verbose_name_plural': 'topics',
},
),
migrations.AddField(
model_name='questions',
name='topic',
field=models.ForeignKey(to='faq.Topics'),
),
]
|
Create migration for the models.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
},
),
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.CharField(max_length=100)),
],
options={
'verbose_name': 'topic',
'verbose_name_plural': 'topics',
},
),
migrations.AddField(
model_name='questions',
name='topic',
field=models.ForeignKey(to='faq.Topics'),
),
]
|
<commit_before><commit_msg>Create migration for the models.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('question', models.CharField(max_length=200)),
('answer', models.TextField()),
],
options={
'verbose_name': 'question',
'verbose_name_plural': 'questions',
},
),
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),
('topic', models.CharField(max_length=100)),
],
options={
'verbose_name': 'topic',
'verbose_name_plural': 'topics',
},
),
migrations.AddField(
model_name='questions',
name='topic',
field=models.ForeignKey(to='faq.Topics'),
),
]
|
|
894cbd1f7439fd6aca5a2f54a8111d22143c10f8
|
package/segmenter.py
|
package/segmenter.py
|
import copy
import package.app as app
import cv2
import numpy as np
from package.image import Image, CS_BGR
__author__ = 'luigolas'
class Segmenter():
compatible_color_spaces = []
def segment(self, image):
"""
:param image:
:raise NotImplementedError:
"""
raise NotImplementedError("Please Implement segment method")
class Grabcut(Segmenter):
compatible_color_spaces = [CS_BGR]
def __init__(self, mask_source, iterCount, color_space=CS_BGR):
self._mask_name = mask_source.split("/")[-1].split(".")[0]
self._mask = np.loadtxt(mask_source, np.uint8)
self._iterCount = iterCount
if color_space not in Grabcut.compatible_color_spaces:
raise AttributeError("Grabcut can't work with colorspace " + str(color_space))
self._colorspace = color_space
self.name = type(self).__name__ + str(self._iterCount) + self._mask_name
self.dict_name = {"Segmenter": str(type(self).__name__), "SegIter": self._iterCount,
"Mask": self._mask_name}
def segment(self, image):
"""
:param image:
:return: :raise TypeError:
"""
if not isinstance(image, Image):
raise TypeError("Must be a valid Image (package.image) object")
if image.colorspace != self._colorspace:
raise AttributeError("Image must be in BGR color space")
if app.DB:
try:
mask = app.DB[self.dbname(image.imgname)]
# print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25]))
return mask
except FileNotFoundError:
# Not in DataBase, continue calculating
pass
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
# mask = self._mask.copy()
mask = copy.copy(self._mask)
cv2.grabCut(image, mask, None, bgdmodel, fgdmodel, self._iterCount, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
if app.DB:
app.DB[self.dbname(image.imgname)] = mask
return mask
def dbname(self, imgname):
classname = type(self).__name__
foldername = imgname.split("/")[-2]
imgname = imgname.split("/")[-1]
imgname = imgname.split(".")[0] # Take out file extension
keys = ["masks", classname, "iter" + str(self._iterCount), self._mask_name, foldername, imgname]
return keys
|
Add Segmenter. Not working yet
|
Add Segmenter. Not working yet
|
Python
|
mit
|
Luigolas/PyReID
|
Add Segmenter. Not working yet
|
import copy
import package.app as app
import cv2
import numpy as np
from package.image import Image, CS_BGR
__author__ = 'luigolas'
class Segmenter():
compatible_color_spaces = []
def segment(self, image):
"""
:param image:
:raise NotImplementedError:
"""
raise NotImplementedError("Please Implement segment method")
class Grabcut(Segmenter):
compatible_color_spaces = [CS_BGR]
def __init__(self, mask_source, iterCount, color_space=CS_BGR):
self._mask_name = mask_source.split("/")[-1].split(".")[0]
self._mask = np.loadtxt(mask_source, np.uint8)
self._iterCount = iterCount
if color_space not in Grabcut.compatible_color_spaces:
raise AttributeError("Grabcut can't work with colorspace " + str(color_space))
self._colorspace = color_space
self.name = type(self).__name__ + str(self._iterCount) + self._mask_name
self.dict_name = {"Segmenter": str(type(self).__name__), "SegIter": self._iterCount,
"Mask": self._mask_name}
def segment(self, image):
"""
:param image:
:return: :raise TypeError:
"""
if not isinstance(image, Image):
raise TypeError("Must be a valid Image (package.image) object")
if image.colorspace != self._colorspace:
raise AttributeError("Image must be in BGR color space")
if app.DB:
try:
mask = app.DB[self.dbname(image.imgname)]
# print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25]))
return mask
except FileNotFoundError:
# Not in DataBase, continue calculating
pass
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
# mask = self._mask.copy()
mask = copy.copy(self._mask)
cv2.grabCut(image, mask, None, bgdmodel, fgdmodel, self._iterCount, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
if app.DB:
app.DB[self.dbname(image.imgname)] = mask
return mask
def dbname(self, imgname):
classname = type(self).__name__
foldername = imgname.split("/")[-2]
imgname = imgname.split("/")[-1]
imgname = imgname.split(".")[0] # Take out file extension
keys = ["masks", classname, "iter" + str(self._iterCount), self._mask_name, foldername, imgname]
return keys
|
<commit_before><commit_msg>Add Segmenter. Not working yet<commit_after>
|
import copy
import package.app as app
import cv2
import numpy as np
from package.image import Image, CS_BGR
__author__ = 'luigolas'
class Segmenter():
compatible_color_spaces = []
def segment(self, image):
"""
:param image:
:raise NotImplementedError:
"""
raise NotImplementedError("Please Implement segment method")
class Grabcut(Segmenter):
compatible_color_spaces = [CS_BGR]
def __init__(self, mask_source, iterCount, color_space=CS_BGR):
self._mask_name = mask_source.split("/")[-1].split(".")[0]
self._mask = np.loadtxt(mask_source, np.uint8)
self._iterCount = iterCount
if color_space not in Grabcut.compatible_color_spaces:
raise AttributeError("Grabcut can't work with colorspace " + str(color_space))
self._colorspace = color_space
self.name = type(self).__name__ + str(self._iterCount) + self._mask_name
self.dict_name = {"Segmenter": str(type(self).__name__), "SegIter": self._iterCount,
"Mask": self._mask_name}
def segment(self, image):
"""
:param image:
:return: :raise TypeError:
"""
if not isinstance(image, Image):
raise TypeError("Must be a valid Image (package.image) object")
if image.colorspace != self._colorspace:
raise AttributeError("Image must be in BGR color space")
if app.DB:
try:
mask = app.DB[self.dbname(image.imgname)]
# print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25]))
return mask
except FileNotFoundError:
# Not in DataBase, continue calculating
pass
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
# mask = self._mask.copy()
mask = copy.copy(self._mask)
cv2.grabCut(image, mask, None, bgdmodel, fgdmodel, self._iterCount, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
if app.DB:
app.DB[self.dbname(image.imgname)] = mask
return mask
def dbname(self, imgname):
classname = type(self).__name__
foldername = imgname.split("/")[-2]
imgname = imgname.split("/")[-1]
imgname = imgname.split(".")[0] # Take out file extension
keys = ["masks", classname, "iter" + str(self._iterCount), self._mask_name, foldername, imgname]
return keys
|
Add Segmenter. Not working yetimport copy
import package.app as app
import cv2
import numpy as np
from package.image import Image, CS_BGR
__author__ = 'luigolas'
class Segmenter():
compatible_color_spaces = []
def segment(self, image):
"""
:param image:
:raise NotImplementedError:
"""
raise NotImplementedError("Please Implement segment method")
class Grabcut(Segmenter):
compatible_color_spaces = [CS_BGR]
def __init__(self, mask_source, iterCount, color_space=CS_BGR):
self._mask_name = mask_source.split("/")[-1].split(".")[0]
self._mask = np.loadtxt(mask_source, np.uint8)
self._iterCount = iterCount
if color_space not in Grabcut.compatible_color_spaces:
raise AttributeError("Grabcut can't work with colorspace " + str(color_space))
self._colorspace = color_space
self.name = type(self).__name__ + str(self._iterCount) + self._mask_name
self.dict_name = {"Segmenter": str(type(self).__name__), "SegIter": self._iterCount,
"Mask": self._mask_name}
def segment(self, image):
"""
:param image:
:return: :raise TypeError:
"""
if not isinstance(image, Image):
raise TypeError("Must be a valid Image (package.image) object")
if image.colorspace != self._colorspace:
raise AttributeError("Image must be in BGR color space")
if app.DB:
try:
mask = app.DB[self.dbname(image.imgname)]
# print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25]))
return mask
except FileNotFoundError:
# Not in DataBase, continue calculating
pass
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
# mask = self._mask.copy()
mask = copy.copy(self._mask)
cv2.grabCut(image, mask, None, bgdmodel, fgdmodel, self._iterCount, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
if app.DB:
app.DB[self.dbname(image.imgname)] = mask
return mask
def dbname(self, imgname):
classname = type(self).__name__
foldername = imgname.split("/")[-2]
imgname = imgname.split("/")[-1]
imgname = imgname.split(".")[0] # Take out file extension
keys = ["masks", classname, "iter" + str(self._iterCount), self._mask_name, foldername, imgname]
return keys
|
<commit_before><commit_msg>Add Segmenter. Not working yet<commit_after>import copy
import package.app as app
import cv2
import numpy as np
from package.image import Image, CS_BGR
__author__ = 'luigolas'
class Segmenter():
compatible_color_spaces = []
def segment(self, image):
"""
:param image:
:raise NotImplementedError:
"""
raise NotImplementedError("Please Implement segment method")
class Grabcut(Segmenter):
compatible_color_spaces = [CS_BGR]
def __init__(self, mask_source, iterCount, color_space=CS_BGR):
self._mask_name = mask_source.split("/")[-1].split(".")[0]
self._mask = np.loadtxt(mask_source, np.uint8)
self._iterCount = iterCount
if color_space not in Grabcut.compatible_color_spaces:
raise AttributeError("Grabcut can't work with colorspace " + str(color_space))
self._colorspace = color_space
self.name = type(self).__name__ + str(self._iterCount) + self._mask_name
self.dict_name = {"Segmenter": str(type(self).__name__), "SegIter": self._iterCount,
"Mask": self._mask_name}
def segment(self, image):
"""
:param image:
:return: :raise TypeError:
"""
if not isinstance(image, Image):
raise TypeError("Must be a valid Image (package.image) object")
if image.colorspace != self._colorspace:
raise AttributeError("Image must be in BGR color space")
if app.DB:
try:
mask = app.DB[self.dbname(image.imgname)]
# print("returning mask for " + imgname + " [0][0:5]: " + str(mask[4][10:25]))
return mask
except FileNotFoundError:
# Not in DataBase, continue calculating
pass
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
# mask = self._mask.copy()
mask = copy.copy(self._mask)
cv2.grabCut(image, mask, None, bgdmodel, fgdmodel, self._iterCount, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
if app.DB:
app.DB[self.dbname(image.imgname)] = mask
return mask
def dbname(self, imgname):
classname = type(self).__name__
foldername = imgname.split("/")[-2]
imgname = imgname.split("/")[-1]
imgname = imgname.split(".")[0] # Take out file extension
keys = ["masks", classname, "iter" + str(self._iterCount), self._mask_name, foldername, imgname]
return keys
|
|
3c898782f8b51c4af9bff2c566adb80c1f740e53
|
lintcode/Medium/120_Word_Ladder.py
|
lintcode/Medium/120_Word_Ladder.py
|
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, dict):
# write your code here
queue = [start]
level = 1
levelIndex = 0
dict.add(end)
while (queue):
currentStr = queue.pop(0)
if (currentStr == end):
return level
i = 0
while (i < len(currentStr)):
for j in range(97, 123):
tmp = currentStr[:i] + chr(j) + currentStr[i+1:]
if (tmp in dict):
queue.append(tmp)
dict.remove(tmp)
i += 1
levelIndex -= 1
if (levelIndex <= 0):
level += 1
levelIndex = len(queue)
return 0
|
Add solution to lintcode question 120
|
Add solution to lintcode question 120
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 120
|
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, dict):
# write your code here
queue = [start]
level = 1
levelIndex = 0
dict.add(end)
while (queue):
currentStr = queue.pop(0)
if (currentStr == end):
return level
i = 0
while (i < len(currentStr)):
for j in range(97, 123):
tmp = currentStr[:i] + chr(j) + currentStr[i+1:]
if (tmp in dict):
queue.append(tmp)
dict.remove(tmp)
i += 1
levelIndex -= 1
if (levelIndex <= 0):
level += 1
levelIndex = len(queue)
return 0
|
<commit_before><commit_msg>Add solution to lintcode question 120<commit_after>
|
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, dict):
# write your code here
queue = [start]
level = 1
levelIndex = 0
dict.add(end)
while (queue):
currentStr = queue.pop(0)
if (currentStr == end):
return level
i = 0
while (i < len(currentStr)):
for j in range(97, 123):
tmp = currentStr[:i] + chr(j) + currentStr[i+1:]
if (tmp in dict):
queue.append(tmp)
dict.remove(tmp)
i += 1
levelIndex -= 1
if (levelIndex <= 0):
level += 1
levelIndex = len(queue)
return 0
|
Add solution to lintcode question 120class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, dict):
# write your code here
queue = [start]
level = 1
levelIndex = 0
dict.add(end)
while (queue):
currentStr = queue.pop(0)
if (currentStr == end):
return level
i = 0
while (i < len(currentStr)):
for j in range(97, 123):
tmp = currentStr[:i] + chr(j) + currentStr[i+1:]
if (tmp in dict):
queue.append(tmp)
dict.remove(tmp)
i += 1
levelIndex -= 1
if (levelIndex <= 0):
level += 1
levelIndex = len(queue)
return 0
|
<commit_before><commit_msg>Add solution to lintcode question 120<commit_after>class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return an integer
def ladderLength(self, start, end, dict):
# write your code here
queue = [start]
level = 1
levelIndex = 0
dict.add(end)
while (queue):
currentStr = queue.pop(0)
if (currentStr == end):
return level
i = 0
while (i < len(currentStr)):
for j in range(97, 123):
tmp = currentStr[:i] + chr(j) + currentStr[i+1:]
if (tmp in dict):
queue.append(tmp)
dict.remove(tmp)
i += 1
levelIndex -= 1
if (levelIndex <= 0):
level += 1
levelIndex = len(queue)
return 0
|
|
c891fd7fb399c5b1730eef33ccea12770c28b2fd
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
Add simple distutils script for modules
|
Add simple distutils script for modules
|
Python
|
mit
|
epierson9/multiphenotype_methods
|
Add simple distutils script for modules
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
<commit_before><commit_msg>Add simple distutils script for modules<commit_after>
|
from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
Add simple distutils script for modulesfrom distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
<commit_before><commit_msg>Add simple distutils script for modules<commit_after>from distutils.core import setup
setup(name='dimreducer',
version='1.0',
description='Dimension reduction methods',
py_modules=['dimreducer'],
)
setup(name='multiphenotype_utils',
version='1.0',
description='Utility functions for all methods',
py_modules=['multiphenotype_utils'],
)
setup(name='general_autoencoder',
version='1.0',
description='Autoencoder base class',
py_modules=['general_autoencoder'],
)
setup(name='standard_autoencoder',
version='1.0',
description='Standard autoencoder',
py_modules=['standard_autoencoder'],
)
setup(name='variational_autoencoder',
version='1.0',
description='VAE',
py_modules=['variational_autoencoder'],
)
setup(name='variational_age_autoencoder',
version='1.0',
description='VAE with age',
py_modules=['variational_age_autoencoder'],
)
|
|
8fa6beb9bd3fe866be11c97bed4fbe2532198dfc
|
ingestor/ingest_from_yaml.py
|
ingestor/ingest_from_yaml.py
|
import click
import os
import os.path
import yaml
from create_tiles import calc_target_names, create_tiles
from geotiff_to_netcdf import create_or_replace
from pprint import pprint
def read_yaml(filename):
with open(filename) as f:
data = yaml.load(f)
return data
def get_input_files(input_file, data):
bands = sorted([band for band_num, band in data['image']['bands'].items()], key=lambda band: band['number'])
input_files = [band['path'] for band in bands]
base_input_directory = os.path.dirname(input_file)
input_files = [os.path.join(base_input_directory, filename) for filename in input_files]
return input_files
@click.command()
@click.option('--output-dir', default='.')
@click.argument('yaml_file', type=click.Path(exists=True))
def main(yaml_file, output_dir):
os.chdir(output_dir)
data = read_yaml(yaml_file)
pprint(data, indent=2)
input_files = get_input_files(yaml_file, data)
basename = data['ga_label']
filename_format = 'combined_{x}_{y}.nc'
tile_options = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
# Create Tiles
create_tiles(input_files, output_dir, basename, tile_options)
renames = calc_target_names('test.csv', filename_format, data)
# Import into proper NetCDF files
for geotiff, netcdf in renames:
create_or_replace(geotiff, netcdf)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
Add new command line tool for injesting from YAML description to NetCDF
|
Add new command line tool for injesting from YAML description to NetCDF
|
Python
|
bsd-3-clause
|
omad/datacube-experiments
|
Add new command line tool for injesting from YAML description to NetCDF
|
import click
import os
import os.path
import yaml
from create_tiles import calc_target_names, create_tiles
from geotiff_to_netcdf import create_or_replace
from pprint import pprint
def read_yaml(filename):
with open(filename) as f:
data = yaml.load(f)
return data
def get_input_files(input_file, data):
bands = sorted([band for band_num, band in data['image']['bands'].items()], key=lambda band: band['number'])
input_files = [band['path'] for band in bands]
base_input_directory = os.path.dirname(input_file)
input_files = [os.path.join(base_input_directory, filename) for filename in input_files]
return input_files
@click.command()
@click.option('--output-dir', default='.')
@click.argument('yaml_file', type=click.Path(exists=True))
def main(yaml_file, output_dir):
os.chdir(output_dir)
data = read_yaml(yaml_file)
pprint(data, indent=2)
input_files = get_input_files(yaml_file, data)
basename = data['ga_label']
filename_format = 'combined_{x}_{y}.nc'
tile_options = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
# Create Tiles
create_tiles(input_files, output_dir, basename, tile_options)
renames = calc_target_names('test.csv', filename_format, data)
# Import into proper NetCDF files
for geotiff, netcdf in renames:
create_or_replace(geotiff, netcdf)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
<commit_before><commit_msg>Add new command line tool for injesting from YAML description to NetCDF<commit_after>
|
import click
import os
import os.path
import yaml
from create_tiles import calc_target_names, create_tiles
from geotiff_to_netcdf import create_or_replace
from pprint import pprint
def read_yaml(filename):
with open(filename) as f:
data = yaml.load(f)
return data
def get_input_files(input_file, data):
bands = sorted([band for band_num, band in data['image']['bands'].items()], key=lambda band: band['number'])
input_files = [band['path'] for band in bands]
base_input_directory = os.path.dirname(input_file)
input_files = [os.path.join(base_input_directory, filename) for filename in input_files]
return input_files
@click.command()
@click.option('--output-dir', default='.')
@click.argument('yaml_file', type=click.Path(exists=True))
def main(yaml_file, output_dir):
os.chdir(output_dir)
data = read_yaml(yaml_file)
pprint(data, indent=2)
input_files = get_input_files(yaml_file, data)
basename = data['ga_label']
filename_format = 'combined_{x}_{y}.nc'
tile_options = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
# Create Tiles
create_tiles(input_files, output_dir, basename, tile_options)
renames = calc_target_names('test.csv', filename_format, data)
# Import into proper NetCDF files
for geotiff, netcdf in renames:
create_or_replace(geotiff, netcdf)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
Add new command line tool for injesting from YAML description to NetCDF
import click
import os
import os.path
import yaml
from create_tiles import calc_target_names, create_tiles
from geotiff_to_netcdf import create_or_replace
from pprint import pprint
def read_yaml(filename):
with open(filename) as f:
data = yaml.load(f)
return data
def get_input_files(input_file, data):
bands = sorted([band for band_num, band in data['image']['bands'].items()], key=lambda band: band['number'])
input_files = [band['path'] for band in bands]
base_input_directory = os.path.dirname(input_file)
input_files = [os.path.join(base_input_directory, filename) for filename in input_files]
return input_files
@click.command()
@click.option('--output-dir', default='.')
@click.argument('yaml_file', type=click.Path(exists=True))
def main(yaml_file, output_dir):
os.chdir(output_dir)
data = read_yaml(yaml_file)
pprint(data, indent=2)
input_files = get_input_files(yaml_file, data)
basename = data['ga_label']
filename_format = 'combined_{x}_{y}.nc'
tile_options = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
# Create Tiles
create_tiles(input_files, output_dir, basename, tile_options)
renames = calc_target_names('test.csv', filename_format, data)
# Import into proper NetCDF files
for geotiff, netcdf in renames:
create_or_replace(geotiff, netcdf)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
<commit_before><commit_msg>Add new command line tool for injesting from YAML description to NetCDF<commit_after>
import click
import os
import os.path
import yaml
from create_tiles import calc_target_names, create_tiles
from geotiff_to_netcdf import create_or_replace
from pprint import pprint
def read_yaml(filename):
with open(filename) as f:
data = yaml.load(f)
return data
def get_input_files(input_file, data):
bands = sorted([band for band_num, band in data['image']['bands'].items()], key=lambda band: band['number'])
input_files = [band['path'] for band in bands]
base_input_directory = os.path.dirname(input_file)
input_files = [os.path.join(base_input_directory, filename) for filename in input_files]
return input_files
@click.command()
@click.option('--output-dir', default='.')
@click.argument('yaml_file', type=click.Path(exists=True))
def main(yaml_file, output_dir):
os.chdir(output_dir)
data = read_yaml(yaml_file)
pprint(data, indent=2)
input_files = get_input_files(yaml_file, data)
basename = data['ga_label']
filename_format = 'combined_{x}_{y}.nc'
tile_options = {
'output_format': 'GTiff',
'create_options': ['COMPRESS=DEFLATE', 'ZLEVEL=1']
}
# Create Tiles
create_tiles(input_files, output_dir, basename, tile_options)
renames = calc_target_names('test.csv', filename_format, data)
# Import into proper NetCDF files
for geotiff, netcdf in renames:
create_or_replace(geotiff, netcdf)
if __name__ == '__main__':
try:
from ipdb import launch_ipdb_on_exception
with launch_ipdb_on_exception():
main()
except ImportError:
main()
|
|
ae15b8db30d726990b3ba2ee1885e093b0b933bf
|
tests/test_core/test_compoundgenerator_performance.py
|
tests/test_core/test_compoundgenerator_performance.py
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
from test_util import ScanPointGeneratorTest
from scanpointgenerator import CompoundGenerator
from scanpointgenerator import LineGenerator
from scanpointgenerator import SpiralGenerator
from scanpointgenerator import Excluder
from scanpointgenerator.rois import CircularROI
from scanpointgenerator.mutators import FixedDurationMutator, RandomOffsetMutator
class CompoundGeneratorPerformanceTest(ScanPointGeneratorTest):
def test_200_million_time_constraint(self):
start_time = time.time()
s = SpiralGenerator(
["x", "y"], "mm", [0, 0], 6, 0.02,
alternate_direction=True) # ~2e5 points
z = LineGenerator("z", "mm", 0, 1, 100) #1e2 points
w = LineGenerator("w", "mm", 0, 1, 10) #1e1 points
r1 = CircularROI([-0.7, 4], 0.5)
r2 = CircularROI([0.5, 0.5], 0.3)
r3 = CircularROI([0.2, 4], 0.5)
e1 = Excluder(r1, ["x", "y"])
e2 = Excluder(r2, ["w", "z"])
e3 = Excluder(r3, ["z", "y"])
fm = FixedDurationMutator(0.1)
om = RandomOffsetMutator(0, ["x", "y"], {"x":0.2, "y":0.2})
g = CompoundGenerator([w, z, s], [e1, e3, e2], [fm, om])
g.prepare() # g.num ~3e5
end_time = time.time()
#self.assertLess(end_time - start_time, 5)
# TravisCI VMs are sometimes pretty weak
# if this test becomes problematic then we'll just have to remove it
self.assertLess(end_time - start_time, 12)
# we dont care about this right now
#start_time = time.time()
#for p in g.iterator():
# pass
#end_time = time.time()
## point objects are quite expensive to create
#self.assertLess(end_time - start_time, 20)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add time-sensitive test for compound generator for a large scan.
|
Add time-sensitive test for compound generator for a large scan.
Tests that point preparation for ~100 million points (before region
filtering) happens within a few seconds.
|
Python
|
apache-2.0
|
dls-controls/scanpointgenerator
|
Add time-sensitive test for compound generator for a large scan.
Tests that point preparation for ~100 million points (before region
filtering) happens within a few seconds.
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
from test_util import ScanPointGeneratorTest
from scanpointgenerator import CompoundGenerator
from scanpointgenerator import LineGenerator
from scanpointgenerator import SpiralGenerator
from scanpointgenerator import Excluder
from scanpointgenerator.rois import CircularROI
from scanpointgenerator.mutators import FixedDurationMutator, RandomOffsetMutator
class CompoundGeneratorPerformanceTest(ScanPointGeneratorTest):
def test_200_million_time_constraint(self):
start_time = time.time()
s = SpiralGenerator(
["x", "y"], "mm", [0, 0], 6, 0.02,
alternate_direction=True) # ~2e5 points
z = LineGenerator("z", "mm", 0, 1, 100) #1e2 points
w = LineGenerator("w", "mm", 0, 1, 10) #1e1 points
r1 = CircularROI([-0.7, 4], 0.5)
r2 = CircularROI([0.5, 0.5], 0.3)
r3 = CircularROI([0.2, 4], 0.5)
e1 = Excluder(r1, ["x", "y"])
e2 = Excluder(r2, ["w", "z"])
e3 = Excluder(r3, ["z", "y"])
fm = FixedDurationMutator(0.1)
om = RandomOffsetMutator(0, ["x", "y"], {"x":0.2, "y":0.2})
g = CompoundGenerator([w, z, s], [e1, e3, e2], [fm, om])
g.prepare() # g.num ~3e5
end_time = time.time()
#self.assertLess(end_time - start_time, 5)
# TravisCI VMs are sometimes pretty weak
# if this test becomes problematic then we'll just have to remove it
self.assertLess(end_time - start_time, 12)
# we dont care about this right now
#start_time = time.time()
#for p in g.iterator():
# pass
#end_time = time.time()
## point objects are quite expensive to create
#self.assertLess(end_time - start_time, 20)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add time-sensitive test for compound generator for a large scan.
Tests that point preparation for ~100 million points (before region
filtering) happens within a few seconds.<commit_after>
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
from test_util import ScanPointGeneratorTest
from scanpointgenerator import CompoundGenerator
from scanpointgenerator import LineGenerator
from scanpointgenerator import SpiralGenerator
from scanpointgenerator import Excluder
from scanpointgenerator.rois import CircularROI
from scanpointgenerator.mutators import FixedDurationMutator, RandomOffsetMutator
class CompoundGeneratorPerformanceTest(ScanPointGeneratorTest):
def test_200_million_time_constraint(self):
start_time = time.time()
s = SpiralGenerator(
["x", "y"], "mm", [0, 0], 6, 0.02,
alternate_direction=True) # ~2e5 points
z = LineGenerator("z", "mm", 0, 1, 100) #1e2 points
w = LineGenerator("w", "mm", 0, 1, 10) #1e1 points
r1 = CircularROI([-0.7, 4], 0.5)
r2 = CircularROI([0.5, 0.5], 0.3)
r3 = CircularROI([0.2, 4], 0.5)
e1 = Excluder(r1, ["x", "y"])
e2 = Excluder(r2, ["w", "z"])
e3 = Excluder(r3, ["z", "y"])
fm = FixedDurationMutator(0.1)
om = RandomOffsetMutator(0, ["x", "y"], {"x":0.2, "y":0.2})
g = CompoundGenerator([w, z, s], [e1, e3, e2], [fm, om])
g.prepare() # g.num ~3e5
end_time = time.time()
#self.assertLess(end_time - start_time, 5)
# TravisCI VMs are sometimes pretty weak
# if this test becomes problematic then we'll just have to remove it
self.assertLess(end_time - start_time, 12)
# we dont care about this right now
#start_time = time.time()
#for p in g.iterator():
# pass
#end_time = time.time()
## point objects are quite expensive to create
#self.assertLess(end_time - start_time, 20)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add time-sensitive test for compound generator for a large scan.
Tests that point preparation for ~100 million points (before region
filtering) happens within a few seconds.import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
from test_util import ScanPointGeneratorTest
from scanpointgenerator import CompoundGenerator
from scanpointgenerator import LineGenerator
from scanpointgenerator import SpiralGenerator
from scanpointgenerator import Excluder
from scanpointgenerator.rois import CircularROI
from scanpointgenerator.mutators import FixedDurationMutator, RandomOffsetMutator
class CompoundGeneratorPerformanceTest(ScanPointGeneratorTest):
def test_200_million_time_constraint(self):
start_time = time.time()
s = SpiralGenerator(
["x", "y"], "mm", [0, 0], 6, 0.02,
alternate_direction=True) # ~2e5 points
z = LineGenerator("z", "mm", 0, 1, 100) #1e2 points
w = LineGenerator("w", "mm", 0, 1, 10) #1e1 points
r1 = CircularROI([-0.7, 4], 0.5)
r2 = CircularROI([0.5, 0.5], 0.3)
r3 = CircularROI([0.2, 4], 0.5)
e1 = Excluder(r1, ["x", "y"])
e2 = Excluder(r2, ["w", "z"])
e3 = Excluder(r3, ["z", "y"])
fm = FixedDurationMutator(0.1)
om = RandomOffsetMutator(0, ["x", "y"], {"x":0.2, "y":0.2})
g = CompoundGenerator([w, z, s], [e1, e3, e2], [fm, om])
g.prepare() # g.num ~3e5
end_time = time.time()
#self.assertLess(end_time - start_time, 5)
# TravisCI VMs are sometimes pretty weak
# if this test becomes problematic then we'll just have to remove it
self.assertLess(end_time - start_time, 12)
# we dont care about this right now
#start_time = time.time()
#for p in g.iterator():
# pass
#end_time = time.time()
## point objects are quite expensive to create
#self.assertLess(end_time - start_time, 20)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add time-sensitive test for compound generator for a large scan.
Tests that point preparation for ~100 million points (before region
filtering) happens within a few seconds.<commit_after>import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import unittest
import time
from test_util import ScanPointGeneratorTest
from scanpointgenerator import CompoundGenerator
from scanpointgenerator import LineGenerator
from scanpointgenerator import SpiralGenerator
from scanpointgenerator import Excluder
from scanpointgenerator.rois import CircularROI
from scanpointgenerator.mutators import FixedDurationMutator, RandomOffsetMutator
class CompoundGeneratorPerformanceTest(ScanPointGeneratorTest):
def test_200_million_time_constraint(self):
start_time = time.time()
s = SpiralGenerator(
["x", "y"], "mm", [0, 0], 6, 0.02,
alternate_direction=True) # ~2e5 points
z = LineGenerator("z", "mm", 0, 1, 100) #1e2 points
w = LineGenerator("w", "mm", 0, 1, 10) #1e1 points
r1 = CircularROI([-0.7, 4], 0.5)
r2 = CircularROI([0.5, 0.5], 0.3)
r3 = CircularROI([0.2, 4], 0.5)
e1 = Excluder(r1, ["x", "y"])
e2 = Excluder(r2, ["w", "z"])
e3 = Excluder(r3, ["z", "y"])
fm = FixedDurationMutator(0.1)
om = RandomOffsetMutator(0, ["x", "y"], {"x":0.2, "y":0.2})
g = CompoundGenerator([w, z, s], [e1, e3, e2], [fm, om])
g.prepare() # g.num ~3e5
end_time = time.time()
#self.assertLess(end_time - start_time, 5)
# TravisCI VMs are sometimes pretty weak
# if this test becomes problematic then we'll just have to remove it
self.assertLess(end_time - start_time, 12)
# we dont care about this right now
#start_time = time.time()
#for p in g.iterator():
# pass
#end_time = time.time()
## point objects are quite expensive to create
#self.assertLess(end_time - start_time, 20)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
5898822218500487419256676ac57c70a7d40367
|
py/2-keys-keyboard.py
|
py/2-keys-keyboard.py
|
class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
ans = [n] * (n + 1)
ans[1] = 0
for i in xrange(1, n + 1):
for j in xrange(2, n / i + 1):
ans[j * i] = min(ans[i] + j, ans[j * i])
return ans[n]
|
Add py solution for 650. 2 Keys Keyboard
|
Add py solution for 650. 2 Keys Keyboard
650. 2 Keys Keyboard: https://leetcode.com/problems/2-keys-keyboard/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 650. 2 Keys Keyboard
650. 2 Keys Keyboard: https://leetcode.com/problems/2-keys-keyboard/
|
class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
ans = [n] * (n + 1)
ans[1] = 0
for i in xrange(1, n + 1):
for j in xrange(2, n / i + 1):
ans[j * i] = min(ans[i] + j, ans[j * i])
return ans[n]
|
<commit_before><commit_msg>Add py solution for 650. 2 Keys Keyboard
650. 2 Keys Keyboard: https://leetcode.com/problems/2-keys-keyboard/<commit_after>
|
class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
ans = [n] * (n + 1)
ans[1] = 0
for i in xrange(1, n + 1):
for j in xrange(2, n / i + 1):
ans[j * i] = min(ans[i] + j, ans[j * i])
return ans[n]
|
Add py solution for 650. 2 Keys Keyboard
650. 2 Keys Keyboard: https://leetcode.com/problems/2-keys-keyboard/class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
ans = [n] * (n + 1)
ans[1] = 0
for i in xrange(1, n + 1):
for j in xrange(2, n / i + 1):
ans[j * i] = min(ans[i] + j, ans[j * i])
return ans[n]
|
<commit_before><commit_msg>Add py solution for 650. 2 Keys Keyboard
650. 2 Keys Keyboard: https://leetcode.com/problems/2-keys-keyboard/<commit_after>class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
ans = [n] * (n + 1)
ans[1] = 0
for i in xrange(1, n + 1):
for j in xrange(2, n / i + 1):
ans[j * i] = min(ans[i] + j, ans[j * i])
return ans[n]
|
|
44971ec8f96302a04dbb9d4e66299462f245fb1d
|
bin/aggregate_metrics.py
|
bin/aggregate_metrics.py
|
import sys
import csv
import os
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator
def check_arguments():
if len(sys.argv) != 2:
print 'usage: aggregate_metrics.py <data directory>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.'
sys.exit(-1)
data_dir = sys.argv[1]
return data_dir
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data):
writer = csv.writer(sys.stdout, delimiter=',')
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'MATERIAL_SAMPLE', 'LITERATURE', 'UNKNOWN']
header = ['dataset_key', 'bor_preserved_specimen', 'fossil_specimen', 'bor_living_specimen', 'bor_observation', 'bor_human_observation', 'bor_machine_observation', 'bor_material_sample', 'bor_literature', 'bor_unknown']
writer.writerow(header)
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
writer.writerow(row)
def main():
data_dir = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data)
main()
|
Add script to aggregate metrics
|
Add script to aggregate metrics
|
Python
|
mit
|
Datafable/gbif-dataset-metrics,Datafable/gbif-dataset-metrics,Datafable/gbif-dataset-metrics
|
Add script to aggregate metrics
|
import sys
import csv
import os
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator
def check_arguments():
if len(sys.argv) != 2:
print 'usage: aggregate_metrics.py <data directory>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.'
sys.exit(-1)
data_dir = sys.argv[1]
return data_dir
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data):
writer = csv.writer(sys.stdout, delimiter=',')
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'MATERIAL_SAMPLE', 'LITERATURE', 'UNKNOWN']
header = ['dataset_key', 'bor_preserved_specimen', 'fossil_specimen', 'bor_living_specimen', 'bor_observation', 'bor_human_observation', 'bor_machine_observation', 'bor_material_sample', 'bor_literature', 'bor_unknown']
writer.writerow(header)
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
writer.writerow(row)
def main():
data_dir = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data)
main()
|
<commit_before><commit_msg>Add script to aggregate metrics<commit_after>
|
import sys
import csv
import os
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator
def check_arguments():
if len(sys.argv) != 2:
print 'usage: aggregate_metrics.py <data directory>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.'
sys.exit(-1)
data_dir = sys.argv[1]
return data_dir
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data):
writer = csv.writer(sys.stdout, delimiter=',')
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'MATERIAL_SAMPLE', 'LITERATURE', 'UNKNOWN']
header = ['dataset_key', 'bor_preserved_specimen', 'fossil_specimen', 'bor_living_specimen', 'bor_observation', 'bor_human_observation', 'bor_machine_observation', 'bor_material_sample', 'bor_literature', 'bor_unknown']
writer.writerow(header)
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
writer.writerow(row)
def main():
data_dir = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data)
main()
|
Add script to aggregate metricsimport sys
import csv
import os
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator
def check_arguments():
if len(sys.argv) != 2:
print 'usage: aggregate_metrics.py <data directory>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.'
sys.exit(-1)
data_dir = sys.argv[1]
return data_dir
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data):
writer = csv.writer(sys.stdout, delimiter=',')
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'MATERIAL_SAMPLE', 'LITERATURE', 'UNKNOWN']
header = ['dataset_key', 'bor_preserved_specimen', 'fossil_specimen', 'bor_living_specimen', 'bor_observation', 'bor_human_observation', 'bor_machine_observation', 'bor_material_sample', 'bor_literature', 'bor_unknown']
writer.writerow(header)
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
writer.writerow(row)
def main():
data_dir = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data)
main()
|
<commit_before><commit_msg>Add script to aggregate metrics<commit_after>import sys
import csv
import os
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator
def check_arguments():
if len(sys.argv) != 2:
print 'usage: aggregate_metrics.py <data directory>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.'
sys.exit(-1)
data_dir = sys.argv[1]
return data_dir
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data):
writer = csv.writer(sys.stdout, delimiter=',')
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'MATERIAL_SAMPLE', 'LITERATURE', 'UNKNOWN']
header = ['dataset_key', 'bor_preserved_specimen', 'fossil_specimen', 'bor_living_specimen', 'bor_observation', 'bor_human_observation', 'bor_machine_observation', 'bor_material_sample', 'bor_literature', 'bor_unknown']
writer.writerow(header)
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
writer.writerow(row)
def main():
data_dir = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data)
main()
|
|
800e5514d0e641a09a3539c6425309e1f149621b
|
scripts/validate_pccora_filename_dates.py
|
scripts/validate_pccora_filename_dates.py
|
#!/usr/bin/env python3
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj):
i = 0
for container in obj:
print("### Item %d" % (i+1))
for key in container:
print("%s -> %s" % (key, container[key]))
i = i + 1
def main():
directory = '/home/kinow/Downloads/1998/'
for filename in glob.glob('/'.join([directory, '**/*.*'])):
# first we check consistency between parent directory and filename
date1 = filename.split('/')[-2]
date2, extension = filename.split('/')[-1].split('.')
# ignore txt files that contain only info about the file
if 'txt' == extension:
continue
if not date2.startswith(date1):
print("Parent directory does not match with file name: {} - {} and {}".format(filename, date1, date2))
# now compare the date data from the identification section
pccora_parser = PCCORAParser()
pccora_parser.parse_file(filename)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
date_within_file = ''.join([
str(ident['message_year']), str(ident['message_month']), str(ident['message_day']),
str(ident['message_hour'])
])
if date_within_file != date2:
print("Date in identification section does not match with file name: {} - {} and {}".format(filename, date_within_file, date2))
if __name__ == '__main__':
main()
|
Add script to validate identification section date and filename date
|
Add script to validate identification section date and filename date
|
Python
|
mit
|
kinow/pccora
|
Add script to validate identification section date and filename date
|
#!/usr/bin/env python3
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj):
i = 0
for container in obj:
print("### Item %d" % (i+1))
for key in container:
print("%s -> %s" % (key, container[key]))
i = i + 1
def main():
directory = '/home/kinow/Downloads/1998/'
for filename in glob.glob('/'.join([directory, '**/*.*'])):
# first we check consistency between parent directory and filename
date1 = filename.split('/')[-2]
date2, extension = filename.split('/')[-1].split('.')
# ignore txt files that contain only info about the file
if 'txt' == extension:
continue
if not date2.startswith(date1):
print("Parent directory does not match with file name: {} - {} and {}".format(filename, date1, date2))
# now compare the date data from the identification section
pccora_parser = PCCORAParser()
pccora_parser.parse_file(filename)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
date_within_file = ''.join([
str(ident['message_year']), str(ident['message_month']), str(ident['message_day']),
str(ident['message_hour'])
])
if date_within_file != date2:
print("Date in identification section does not match with file name: {} - {} and {}".format(filename, date_within_file, date2))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to validate identification section date and filename date<commit_after>
|
#!/usr/bin/env python3
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj):
i = 0
for container in obj:
print("### Item %d" % (i+1))
for key in container:
print("%s -> %s" % (key, container[key]))
i = i + 1
def main():
directory = '/home/kinow/Downloads/1998/'
for filename in glob.glob('/'.join([directory, '**/*.*'])):
# first we check consistency between parent directory and filename
date1 = filename.split('/')[-2]
date2, extension = filename.split('/')[-1].split('.')
# ignore txt files that contain only info about the file
if 'txt' == extension:
continue
if not date2.startswith(date1):
print("Parent directory does not match with file name: {} - {} and {}".format(filename, date1, date2))
# now compare the date data from the identification section
pccora_parser = PCCORAParser()
pccora_parser.parse_file(filename)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
date_within_file = ''.join([
str(ident['message_year']), str(ident['message_month']), str(ident['message_day']),
str(ident['message_hour'])
])
if date_within_file != date2:
print("Date in identification section does not match with file name: {} - {} and {}".format(filename, date_within_file, date2))
if __name__ == '__main__':
main()
|
Add script to validate identification section date and filename date#!/usr/bin/env python3
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj):
i = 0
for container in obj:
print("### Item %d" % (i+1))
for key in container:
print("%s -> %s" % (key, container[key]))
i = i + 1
def main():
directory = '/home/kinow/Downloads/1998/'
for filename in glob.glob('/'.join([directory, '**/*.*'])):
# first we check consistency between parent directory and filename
date1 = filename.split('/')[-2]
date2, extension = filename.split('/')[-1].split('.')
# ignore txt files that contain only info about the file
if 'txt' == extension:
continue
if not date2.startswith(date1):
print("Parent directory does not match with file name: {} - {} and {}".format(filename, date1, date2))
# now compare the date data from the identification section
pccora_parser = PCCORAParser()
pccora_parser.parse_file(filename)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
date_within_file = ''.join([
str(ident['message_year']), str(ident['message_month']), str(ident['message_day']),
str(ident['message_hour'])
])
if date_within_file != date2:
print("Date in identification section does not match with file name: {} - {} and {}".format(filename, date_within_file, date2))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to validate identification section date and filename date<commit_after>#!/usr/bin/env python3
import glob
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj):
i = 0
for container in obj:
print("### Item %d" % (i+1))
for key in container:
print("%s -> %s" % (key, container[key]))
i = i + 1
def main():
directory = '/home/kinow/Downloads/1998/'
for filename in glob.glob('/'.join([directory, '**/*.*'])):
# first we check consistency between parent directory and filename
date1 = filename.split('/')[-2]
date2, extension = filename.split('/')[-1].split('.')
# ignore txt files that contain only info about the file
if 'txt' == extension:
continue
if not date2.startswith(date1):
print("Parent directory does not match with file name: {} - {} and {}".format(filename, date1, date2))
# now compare the date data from the identification section
pccora_parser = PCCORAParser()
pccora_parser.parse_file(filename)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
date_within_file = ''.join([
str(ident['message_year']), str(ident['message_month']), str(ident['message_day']),
str(ident['message_hour'])
])
if date_within_file != date2:
print("Date in identification section does not match with file name: {} - {} and {}".format(filename, date_within_file, date2))
if __name__ == '__main__':
main()
|
|
a20f2077ef910a930508a5ed1e5e598b66db3b03
|
ckanext/dataviewanalytics/db.py
|
ckanext/dataviewanalytics/db.py
|
'''Models definition for database tables creation
'''
from ckan.common import config
from sqlalchemy import (Column, Integer, String, ForeignKey, create_engine, types)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from ckan import model
from ckan.model import user_table
Base = declarative_base()
DB_LOCATION = config.get('sqlalchemy.url')
engine = create_engine(DB_LOCATION)
Session = sessionmaker(bind=engine)
def create_tables():
'''Create the tables on the database
'''
Base.metadata.create_all(engine)
class UserAnalytics(Base):
''' Model to create database table for extra user details
'''
__tablename__ = 'user_analytics'
id = Column(Integer, primary_key=True)
user_id = Column(types.UnicodeText, primary_key=False)
country = Column(String(256))
occupation = Column(String(256))
def __repr__(self):
return '<User_ID: {}, Occupation: {}, Country: {}>'.format(
self.user_id, self.occupation, self.country)
class DataAnalytics(Base):
'''Model to create database table for data analytics information
'''
__tablename__ = 'data_analytics'
id = Column(Integer, primary_key=True)
resource_id = Column(types.UnicodeText, primary_key=False)
user_id = Column(types.UnicodeText, primary_key=False)
occupation = Column(String(256))
country = Column(String(256))
def __repr__(self):
return '<Resource_ID: {}, User_ID: {}>'.format(self.resource_id, self.user_id)
|
Write models for analytics tables
|
Write models for analytics tables
|
Python
|
agpl-3.0
|
shemogumbe/ckanext-dataviewanalytics,shemogumbe/ckanext-dataviewanalytics,shemogumbe/ckanext-dataviewanalytics
|
Write models for analytics tables
|
'''Models definition for database tables creation
'''
from ckan.common import config
from sqlalchemy import (Column, Integer, String, ForeignKey, create_engine, types)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from ckan import model
from ckan.model import user_table
Base = declarative_base()
DB_LOCATION = config.get('sqlalchemy.url')
engine = create_engine(DB_LOCATION)
Session = sessionmaker(bind=engine)
def create_tables():
'''Create the tables on the database
'''
Base.metadata.create_all(engine)
class UserAnalytics(Base):
''' Model to create database table for extra user details
'''
__tablename__ = 'user_analytics'
id = Column(Integer, primary_key=True)
user_id = Column(types.UnicodeText, primary_key=False)
country = Column(String(256))
occupation = Column(String(256))
def __repr__(self):
return '<User_ID: {}, Occupation: {}, Country: {}>'.format(
self.user_id, self.occupation, self.country)
class DataAnalytics(Base):
'''Model to create database table for data analytics information
'''
__tablename__ = 'data_analytics'
id = Column(Integer, primary_key=True)
resource_id = Column(types.UnicodeText, primary_key=False)
user_id = Column(types.UnicodeText, primary_key=False)
occupation = Column(String(256))
country = Column(String(256))
def __repr__(self):
return '<Resource_ID: {}, User_ID: {}>'.format(self.resource_id, self.user_id)
|
<commit_before><commit_msg>Write models for analytics tables<commit_after>
|
'''Models definition for database tables creation
'''
from ckan.common import config
from sqlalchemy import (Column, Integer, String, ForeignKey, create_engine, types)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from ckan import model
from ckan.model import user_table
Base = declarative_base()
DB_LOCATION = config.get('sqlalchemy.url')
engine = create_engine(DB_LOCATION)
Session = sessionmaker(bind=engine)
def create_tables():
'''Create the tables on the database
'''
Base.metadata.create_all(engine)
class UserAnalytics(Base):
''' Model to create database table for extra user details
'''
__tablename__ = 'user_analytics'
id = Column(Integer, primary_key=True)
user_id = Column(types.UnicodeText, primary_key=False)
country = Column(String(256))
occupation = Column(String(256))
def __repr__(self):
return '<User_ID: {}, Occupation: {}, Country: {}>'.format(
self.user_id, self.occupation, self.country)
class DataAnalytics(Base):
'''Model to create database table for data analytics information
'''
__tablename__ = 'data_analytics'
id = Column(Integer, primary_key=True)
resource_id = Column(types.UnicodeText, primary_key=False)
user_id = Column(types.UnicodeText, primary_key=False)
occupation = Column(String(256))
country = Column(String(256))
def __repr__(self):
return '<Resource_ID: {}, User_ID: {}>'.format(self.resource_id, self.user_id)
|
Write models for analytics tables'''Models definition for database tables creation
'''
from ckan.common import config
from sqlalchemy import (Column, Integer, String, ForeignKey, create_engine, types)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from ckan import model
from ckan.model import user_table
Base = declarative_base()
DB_LOCATION = config.get('sqlalchemy.url')
engine = create_engine(DB_LOCATION)
Session = sessionmaker(bind=engine)
def create_tables():
'''Create the tables on the database
'''
Base.metadata.create_all(engine)
class UserAnalytics(Base):
''' Model to create database table for extra user details
'''
__tablename__ = 'user_analytics'
id = Column(Integer, primary_key=True)
user_id = Column(types.UnicodeText, primary_key=False)
country = Column(String(256))
occupation = Column(String(256))
def __repr__(self):
return '<User_ID: {}, Occupation: {}, Country: {}>'.format(
self.user_id, self.occupation, self.country)
class DataAnalytics(Base):
'''Model to create database table for data analytics information
'''
__tablename__ = 'data_analytics'
id = Column(Integer, primary_key=True)
resource_id = Column(types.UnicodeText, primary_key=False)
user_id = Column(types.UnicodeText, primary_key=False)
occupation = Column(String(256))
country = Column(String(256))
def __repr__(self):
return '<Resource_ID: {}, User_ID: {}>'.format(self.resource_id, self.user_id)
|
<commit_before><commit_msg>Write models for analytics tables<commit_after>'''Models definition for database tables creation
'''
from ckan.common import config
from sqlalchemy import (Column, Integer, String, ForeignKey, create_engine, types)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from ckan import model
from ckan.model import user_table
Base = declarative_base()
DB_LOCATION = config.get('sqlalchemy.url')
engine = create_engine(DB_LOCATION)
Session = sessionmaker(bind=engine)
def create_tables():
'''Create the tables on the database
'''
Base.metadata.create_all(engine)
class UserAnalytics(Base):
''' Model to create database table for extra user details
'''
__tablename__ = 'user_analytics'
id = Column(Integer, primary_key=True)
user_id = Column(types.UnicodeText, primary_key=False)
country = Column(String(256))
occupation = Column(String(256))
def __repr__(self):
return '<User_ID: {}, Occupation: {}, Country: {}>'.format(
self.user_id, self.occupation, self.country)
class DataAnalytics(Base):
'''Model to create database table for data analytics information
'''
__tablename__ = 'data_analytics'
id = Column(Integer, primary_key=True)
resource_id = Column(types.UnicodeText, primary_key=False)
user_id = Column(types.UnicodeText, primary_key=False)
occupation = Column(String(256))
country = Column(String(256))
def __repr__(self):
return '<Resource_ID: {}, User_ID: {}>'.format(self.resource_id, self.user_id)
|
|
798394af2580cc755dac4d70892c150bc0e69d32
|
config/andrew_list_violation.py
|
config/andrew_list_violation.py
|
from experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.invariant_checker import InvariantChecker
from sts.simulation_state import SimulationConfig
# Use POX as our controller
command_line = "./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__ sts.syncproto.pox_syncer samples.topo forwarding.l2_multi messenger.messenger samples.nommessenger"
controllers = [ControllerConfig(command_line, cwd="pox", sync="tcp:localhost:18899")]
topology_class = MeshTopology
topology_params = "num_switches=4"
dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
dataplane_trace=dataplane_trace)
control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True,
input_logger=InputLogger(),
invariant_check=InvariantChecker.check_liveness)
|
Add configuration file for Andrew to test changes on
|
Add configuration file for Andrew to test changes on
|
Python
|
apache-2.0
|
jmiserez/sts,ucb-sts/sts,ucb-sts/sts,jmiserez/sts
|
Add configuration file for Andrew to test changes on
|
from experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.invariant_checker import InvariantChecker
from sts.simulation_state import SimulationConfig
# Use POX as our controller
command_line = "./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__ sts.syncproto.pox_syncer samples.topo forwarding.l2_multi messenger.messenger samples.nommessenger"
controllers = [ControllerConfig(command_line, cwd="pox", sync="tcp:localhost:18899")]
topology_class = MeshTopology
topology_params = "num_switches=4"
dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
dataplane_trace=dataplane_trace)
control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True,
input_logger=InputLogger(),
invariant_check=InvariantChecker.check_liveness)
|
<commit_before><commit_msg>Add configuration file for Andrew to test changes on<commit_after>
|
from experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.invariant_checker import InvariantChecker
from sts.simulation_state import SimulationConfig
# Use POX as our controller
command_line = "./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__ sts.syncproto.pox_syncer samples.topo forwarding.l2_multi messenger.messenger samples.nommessenger"
controllers = [ControllerConfig(command_line, cwd="pox", sync="tcp:localhost:18899")]
topology_class = MeshTopology
topology_params = "num_switches=4"
dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
dataplane_trace=dataplane_trace)
control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True,
input_logger=InputLogger(),
invariant_check=InvariantChecker.check_liveness)
|
Add configuration file for Andrew to test changes onfrom experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.invariant_checker import InvariantChecker
from sts.simulation_state import SimulationConfig
# Use POX as our controller
command_line = "./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__ sts.syncproto.pox_syncer samples.topo forwarding.l2_multi messenger.messenger samples.nommessenger"
controllers = [ControllerConfig(command_line, cwd="pox", sync="tcp:localhost:18899")]
topology_class = MeshTopology
topology_params = "num_switches=4"
dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
dataplane_trace=dataplane_trace)
control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True,
input_logger=InputLogger(),
invariant_check=InvariantChecker.check_liveness)
|
<commit_before><commit_msg>Add configuration file for Andrew to test changes on<commit_after>from experiment_config_lib import ControllerConfig
from sts.topology import MeshTopology
from sts.control_flow import Fuzzer
from sts.input_traces.input_logger import InputLogger
from sts.invariant_checker import InvariantChecker
from sts.simulation_state import SimulationConfig
# Use POX as our controller
command_line = "./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__ sts.syncproto.pox_syncer samples.topo forwarding.l2_multi messenger.messenger samples.nommessenger"
controllers = [ControllerConfig(command_line, cwd="pox", sync="tcp:localhost:18899")]
topology_class = MeshTopology
topology_params = "num_switches=4"
dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace"
simulation_config = SimulationConfig(controller_configs=controllers,
topology_class=topology_class,
topology_params=topology_params,
dataplane_trace=dataplane_trace)
control_flow = Fuzzer(simulation_config, check_interval=1, halt_on_violation=True,
input_logger=InputLogger(),
invariant_check=InvariantChecker.check_liveness)
|
|
0032cfba11665de9200410cc132eb8aa368c7601
|
conveyor/utils.py
|
conveyor/utils.py
|
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
Add a utility for diffing dicts
|
Add a utility for diffing dicts
|
Python
|
bsd-2-clause
|
crateio/carrier
|
Add a utility for diffing dicts
|
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
<commit_before><commit_msg>Add a utility for diffing dicts<commit_after>
|
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
Add a utility for diffing dictsclass DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
<commit_before><commit_msg>Add a utility for diffing dicts<commit_after>class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
|
f073437f02737568b7d12cba6050afda92933a87
|
docs/source/conf.py
|
docs/source/conf.py
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
Remove the only remaining reference to `u'` string prefix
|
Remove the only remaining reference to `u'` string prefix
|
Python
|
mit
|
DoctorJellyface/powerline,lukw00/powerline,cyrixhero/powerline,bezhermoso/powerline,QuLogic/powerline,Luffin/powerline,bartvm/powerline,s0undt3ch/powerline,darac/powerline,junix/powerline,seanfisk/powerline,darac/powerline,IvanAli/powerline,DoctorJellyface/powerline,Liangjianghao/powerline,Liangjianghao/powerline,wfscheper/powerline,russellb/powerline,cyrixhero/powerline,wfscheper/powerline,bezhermoso/powerline,xxxhycl2010/powerline,seanfisk/powerline,wfscheper/powerline,firebitsbr/powerline,S0lll0s/powerline,cyrixhero/powerline,xxxhycl2010/powerline,blindFS/powerline,firebitsbr/powerline,xfumihiro/powerline,Luffin/powerline,areteix/powerline,s0undt3ch/powerline,xxxhycl2010/powerline,EricSB/powerline,EricSB/powerline,lukw00/powerline,dragon788/powerline,junix/powerline,blindFS/powerline,lukw00/powerline,prvnkumar/powerline,prvnkumar/powerline,Liangjianghao/powerline,areteix/powerline,xfumihiro/powerline,blindFS/powerline,dragon788/powerline,dragon788/powerline,IvanAli/powerline,s0undt3ch/powerline,xfumihiro/powerline,S0lll0s/powerline,russellb/powerline,IvanAli/powerline,kenrachynski/powerline,firebitsbr/powerline,bartvm/powerline,QuLogic/powerline,kenrachynski/powerline,areteix/powerline,prvnkumar/powerline,QuLogic/powerline,S0lll0s/powerline,junix/powerline,bartvm/powerline,DoctorJellyface/powerline,seanfisk/powerline,darac/powerline,bezhermoso/powerline,EricSB/powerline,russellb/powerline,kenrachynski/powerline,Luffin/powerline
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
Remove the only remaining reference to `u'` string prefix
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
<commit_before># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
<commit_msg>Remove the only remaining reference to `u'` string prefix<commit_after>
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
Remove the only remaining reference to `u'` string prefix# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
<commit_before># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = u'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
<commit_msg>Remove the only remaining reference to `u'` string prefix<commit_after># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd()))))
sys.path.insert(0, os.path.abspath(os.getcwd()))
extensions = ['powerline_autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
source_suffix = '.rst'
master_doc = 'index'
project = 'Powerline'
version = 'beta'
release = 'beta'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
html_show_copyright = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
|
5f15edde5d753423f70bfd7401c215053cbe746c
|
tests/test_http_messages.py
|
tests/test_http_messages.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
"""
Test the Request and Response classes
"""
from fnapy.utils import Response, Request
from lxml import etree
from fnapy.utils import xml2dict, remove_namespace
BATCH_ID = "BFACA5F5-67FD-C037-6209-F287800FBB17"
def elements_are_equal(e1, e2):
if e1.tag != e2.tag: return False
if e1.text != e2.text: return False
if e1.tail != e2.tail: return False
if e1.attrib != e2.attrib: return False
if len(e1) != len(e2): return False
return all(elements_are_equal(c1, c2) for c1, c2 in zip(e1, e2))
def test_request():
xml_request = """<?xml version='1.0' encoding='utf-8'?>
<batch_status xmlns="http://www.fnac.com/schemas/mp-dialog.xsd" partner_id="X" shop_id="X" token="X"><batch_id>{}</batch_id></batch_status>
""".format(BATCH_ID)
xml_request = remove_namespace(xml_request)
request = Request(xml_request)
element = etree.Element('batch_status', partner_id='X', shop_id='X', token='X')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert request.dict == xml2dict(xml_request)
assert request.xml == xml_request.encode('utf-8')
assert request.tag == 'batch_status'
assert elements_are_equal(request.element, element)
def test_response():
xml_response = """<?xml version="1.0" encoding="utf-8"?>
<offers_update_response status="OK" xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"><batch_id>{}</batch_id></offers_update_response>
""".format(BATCH_ID)
xml_response = remove_namespace(xml_response)
response = Response(xml_response)
element = etree.Element('offers_update_response', status='OK')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert response.dict == xml2dict(xml_response)
assert response.xml == xml_response.encode('utf-8')
assert response.tag == 'offers_update_response'
assert elements_are_equal(response.element, element)
|
Test the Request and Response classes
|
Test the Request and Response classes
|
Python
|
mit
|
alexandriagroup/fnapy,alexandriagroup/fnapy
|
Test the Request and Response classes
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
"""
Test the Request and Response classes
"""
from fnapy.utils import Response, Request
from lxml import etree
from fnapy.utils import xml2dict, remove_namespace
BATCH_ID = "BFACA5F5-67FD-C037-6209-F287800FBB17"
def elements_are_equal(e1, e2):
if e1.tag != e2.tag: return False
if e1.text != e2.text: return False
if e1.tail != e2.tail: return False
if e1.attrib != e2.attrib: return False
if len(e1) != len(e2): return False
return all(elements_are_equal(c1, c2) for c1, c2 in zip(e1, e2))
def test_request():
xml_request = """<?xml version='1.0' encoding='utf-8'?>
<batch_status xmlns="http://www.fnac.com/schemas/mp-dialog.xsd" partner_id="X" shop_id="X" token="X"><batch_id>{}</batch_id></batch_status>
""".format(BATCH_ID)
xml_request = remove_namespace(xml_request)
request = Request(xml_request)
element = etree.Element('batch_status', partner_id='X', shop_id='X', token='X')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert request.dict == xml2dict(xml_request)
assert request.xml == xml_request.encode('utf-8')
assert request.tag == 'batch_status'
assert elements_are_equal(request.element, element)
def test_response():
xml_response = """<?xml version="1.0" encoding="utf-8"?>
<offers_update_response status="OK" xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"><batch_id>{}</batch_id></offers_update_response>
""".format(BATCH_ID)
xml_response = remove_namespace(xml_response)
response = Response(xml_response)
element = etree.Element('offers_update_response', status='OK')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert response.dict == xml2dict(xml_response)
assert response.xml == xml_response.encode('utf-8')
assert response.tag == 'offers_update_response'
assert elements_are_equal(response.element, element)
|
<commit_before><commit_msg>Test the Request and Response classes<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
"""
Test the Request and Response classes
"""
from fnapy.utils import Response, Request
from lxml import etree
from fnapy.utils import xml2dict, remove_namespace
BATCH_ID = "BFACA5F5-67FD-C037-6209-F287800FBB17"
def elements_are_equal(e1, e2):
if e1.tag != e2.tag: return False
if e1.text != e2.text: return False
if e1.tail != e2.tail: return False
if e1.attrib != e2.attrib: return False
if len(e1) != len(e2): return False
return all(elements_are_equal(c1, c2) for c1, c2 in zip(e1, e2))
def test_request():
xml_request = """<?xml version='1.0' encoding='utf-8'?>
<batch_status xmlns="http://www.fnac.com/schemas/mp-dialog.xsd" partner_id="X" shop_id="X" token="X"><batch_id>{}</batch_id></batch_status>
""".format(BATCH_ID)
xml_request = remove_namespace(xml_request)
request = Request(xml_request)
element = etree.Element('batch_status', partner_id='X', shop_id='X', token='X')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert request.dict == xml2dict(xml_request)
assert request.xml == xml_request.encode('utf-8')
assert request.tag == 'batch_status'
assert elements_are_equal(request.element, element)
def test_response():
xml_response = """<?xml version="1.0" encoding="utf-8"?>
<offers_update_response status="OK" xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"><batch_id>{}</batch_id></offers_update_response>
""".format(BATCH_ID)
xml_response = remove_namespace(xml_response)
response = Response(xml_response)
element = etree.Element('offers_update_response', status='OK')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert response.dict == xml2dict(xml_response)
assert response.xml == xml_response.encode('utf-8')
assert response.tag == 'offers_update_response'
assert elements_are_equal(response.element, element)
|
Test the Request and Response classes#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
"""
Test the Request and Response classes
"""
from fnapy.utils import Response, Request
from lxml import etree
from fnapy.utils import xml2dict, remove_namespace
BATCH_ID = "BFACA5F5-67FD-C037-6209-F287800FBB17"
def elements_are_equal(e1, e2):
if e1.tag != e2.tag: return False
if e1.text != e2.text: return False
if e1.tail != e2.tail: return False
if e1.attrib != e2.attrib: return False
if len(e1) != len(e2): return False
return all(elements_are_equal(c1, c2) for c1, c2 in zip(e1, e2))
def test_request():
xml_request = """<?xml version='1.0' encoding='utf-8'?>
<batch_status xmlns="http://www.fnac.com/schemas/mp-dialog.xsd" partner_id="X" shop_id="X" token="X"><batch_id>{}</batch_id></batch_status>
""".format(BATCH_ID)
xml_request = remove_namespace(xml_request)
request = Request(xml_request)
element = etree.Element('batch_status', partner_id='X', shop_id='X', token='X')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert request.dict == xml2dict(xml_request)
assert request.xml == xml_request.encode('utf-8')
assert request.tag == 'batch_status'
assert elements_are_equal(request.element, element)
def test_response():
xml_response = """<?xml version="1.0" encoding="utf-8"?>
<offers_update_response status="OK" xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"><batch_id>{}</batch_id></offers_update_response>
""".format(BATCH_ID)
xml_response = remove_namespace(xml_response)
response = Response(xml_response)
element = etree.Element('offers_update_response', status='OK')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert response.dict == xml2dict(xml_response)
assert response.xml == xml_response.encode('utf-8')
assert response.tag == 'offers_update_response'
assert elements_are_equal(response.element, element)
|
<commit_before><commit_msg>Test the Request and Response classes<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
"""
Test the Request and Response classes
"""
from fnapy.utils import Response, Request
from lxml import etree
from fnapy.utils import xml2dict, remove_namespace
BATCH_ID = "BFACA5F5-67FD-C037-6209-F287800FBB17"
def elements_are_equal(e1, e2):
if e1.tag != e2.tag: return False
if e1.text != e2.text: return False
if e1.tail != e2.tail: return False
if e1.attrib != e2.attrib: return False
if len(e1) != len(e2): return False
return all(elements_are_equal(c1, c2) for c1, c2 in zip(e1, e2))
def test_request():
xml_request = """<?xml version='1.0' encoding='utf-8'?>
<batch_status xmlns="http://www.fnac.com/schemas/mp-dialog.xsd" partner_id="X" shop_id="X" token="X"><batch_id>{}</batch_id></batch_status>
""".format(BATCH_ID)
xml_request = remove_namespace(xml_request)
request = Request(xml_request)
element = etree.Element('batch_status', partner_id='X', shop_id='X', token='X')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert request.dict == xml2dict(xml_request)
assert request.xml == xml_request.encode('utf-8')
assert request.tag == 'batch_status'
assert elements_are_equal(request.element, element)
def test_response():
xml_response = """<?xml version="1.0" encoding="utf-8"?>
<offers_update_response status="OK" xmlns="http://www.fnac.com/schemas/mp-dialog.xsd"><batch_id>{}</batch_id></offers_update_response>
""".format(BATCH_ID)
xml_response = remove_namespace(xml_response)
response = Response(xml_response)
element = etree.Element('offers_update_response', status='OK')
etree.SubElement(element, 'batch_id').text = BATCH_ID
assert response.dict == xml2dict(xml_response)
assert response.xml == xml_response.encode('utf-8')
assert response.tag == 'offers_update_response'
assert elements_are_equal(response.element, element)
|
|
c90bde47b1faab13396052fa474bc802ea5b2438
|
testscroll.py
|
testscroll.py
|
from PySide import QtCore, QtGui
import os,sys
from bgsub import SpectrumData
from matplotlib import pyplot as plt
path = "/home/danielle/Documents/LMCE_one"
file_list = [f for f in sorted(os.listdir(path)) if f.endswith(".txt")]
os.chdir(path)
spectra = []
for each in file_list:
spectra.append(SpectrumData.from_file(each))
#import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
qapp = QtGui.QApplication(sys.argv)
qwidget = QtGui.QWidget()
qwidget.setGeometry(QtCore.QRect(0, 0, 500, 500))
qlayout = QtGui.QHBoxLayout(qwidget)
qwidget.setLayout(qlayout)
qscroll = QtGui.QScrollArea(qwidget)
qscroll.setGeometry(QtCore.QRect(0, 0, 500, 500))
qscroll.setFrameStyle(QtGui.QFrame.NoFrame)
qlayout.addWidget(qscroll)
qscrollContents = QtGui.QWidget()
qscrollLayout = QtGui.QHBoxLayout(qscrollContents)
qscrollLayout.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
qscroll.setWidget(qscrollContents)
qscroll.setWidgetResizable(True)
for spectrum in spectra:
qfigWidget = QtGui.QWidget(qscrollContents)
fig = Figure()
canvas = FigureCanvas(fig)
canvas.setParent(qfigWidget)
toolbar = NavigationToolbar(canvas, qfigWidget)
plt = fig.add_subplot(111)
plt.scatter(*zip(*spectrum.info))
# place plot components in a layout
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(canvas)
plotLayout.addWidget(toolbar)
qfigWidget.setLayout(plotLayout)
# prevent the canvas to shrink beyond a point
# original size looks like a good minimum size
canvas.setMinimumSize(canvas.size())
qscrollLayout.addWidget(qfigWidget)
qwidget.show()
exit(qapp.exec_())
|
Test interface for showing plots
|
Test interface for showing plots
|
Python
|
mit
|
danushkana/pyspectrum
|
Test interface for showing plots
|
from PySide import QtCore, QtGui
import os,sys
from bgsub import SpectrumData
from matplotlib import pyplot as plt
path = "/home/danielle/Documents/LMCE_one"
file_list = [f for f in sorted(os.listdir(path)) if f.endswith(".txt")]
os.chdir(path)
spectra = []
for each in file_list:
spectra.append(SpectrumData.from_file(each))
#import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
qapp = QtGui.QApplication(sys.argv)
qwidget = QtGui.QWidget()
qwidget.setGeometry(QtCore.QRect(0, 0, 500, 500))
qlayout = QtGui.QHBoxLayout(qwidget)
qwidget.setLayout(qlayout)
qscroll = QtGui.QScrollArea(qwidget)
qscroll.setGeometry(QtCore.QRect(0, 0, 500, 500))
qscroll.setFrameStyle(QtGui.QFrame.NoFrame)
qlayout.addWidget(qscroll)
qscrollContents = QtGui.QWidget()
qscrollLayout = QtGui.QHBoxLayout(qscrollContents)
qscrollLayout.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
qscroll.setWidget(qscrollContents)
qscroll.setWidgetResizable(True)
for spectrum in spectra:
qfigWidget = QtGui.QWidget(qscrollContents)
fig = Figure()
canvas = FigureCanvas(fig)
canvas.setParent(qfigWidget)
toolbar = NavigationToolbar(canvas, qfigWidget)
plt = fig.add_subplot(111)
plt.scatter(*zip(*spectrum.info))
# place plot components in a layout
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(canvas)
plotLayout.addWidget(toolbar)
qfigWidget.setLayout(plotLayout)
# prevent the canvas to shrink beyond a point
# original size looks like a good minimum size
canvas.setMinimumSize(canvas.size())
qscrollLayout.addWidget(qfigWidget)
qwidget.show()
exit(qapp.exec_())
|
<commit_before><commit_msg>Test interface for showing plots<commit_after>
|
from PySide import QtCore, QtGui
import os,sys
from bgsub import SpectrumData
from matplotlib import pyplot as plt
path = "/home/danielle/Documents/LMCE_one"
file_list = [f for f in sorted(os.listdir(path)) if f.endswith(".txt")]
os.chdir(path)
spectra = []
for each in file_list:
spectra.append(SpectrumData.from_file(each))
#import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
qapp = QtGui.QApplication(sys.argv)
qwidget = QtGui.QWidget()
qwidget.setGeometry(QtCore.QRect(0, 0, 500, 500))
qlayout = QtGui.QHBoxLayout(qwidget)
qwidget.setLayout(qlayout)
qscroll = QtGui.QScrollArea(qwidget)
qscroll.setGeometry(QtCore.QRect(0, 0, 500, 500))
qscroll.setFrameStyle(QtGui.QFrame.NoFrame)
qlayout.addWidget(qscroll)
qscrollContents = QtGui.QWidget()
qscrollLayout = QtGui.QHBoxLayout(qscrollContents)
qscrollLayout.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
qscroll.setWidget(qscrollContents)
qscroll.setWidgetResizable(True)
for spectrum in spectra:
qfigWidget = QtGui.QWidget(qscrollContents)
fig = Figure()
canvas = FigureCanvas(fig)
canvas.setParent(qfigWidget)
toolbar = NavigationToolbar(canvas, qfigWidget)
plt = fig.add_subplot(111)
plt.scatter(*zip(*spectrum.info))
# place plot components in a layout
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(canvas)
plotLayout.addWidget(toolbar)
qfigWidget.setLayout(plotLayout)
# prevent the canvas to shrink beyond a point
# original size looks like a good minimum size
canvas.setMinimumSize(canvas.size())
qscrollLayout.addWidget(qfigWidget)
qwidget.show()
exit(qapp.exec_())
|
Test interface for showing plotsfrom PySide import QtCore, QtGui
import os,sys
from bgsub import SpectrumData
from matplotlib import pyplot as plt
path = "/home/danielle/Documents/LMCE_one"
file_list = [f for f in sorted(os.listdir(path)) if f.endswith(".txt")]
os.chdir(path)
spectra = []
for each in file_list:
spectra.append(SpectrumData.from_file(each))
#import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
qapp = QtGui.QApplication(sys.argv)
qwidget = QtGui.QWidget()
qwidget.setGeometry(QtCore.QRect(0, 0, 500, 500))
qlayout = QtGui.QHBoxLayout(qwidget)
qwidget.setLayout(qlayout)
qscroll = QtGui.QScrollArea(qwidget)
qscroll.setGeometry(QtCore.QRect(0, 0, 500, 500))
qscroll.setFrameStyle(QtGui.QFrame.NoFrame)
qlayout.addWidget(qscroll)
qscrollContents = QtGui.QWidget()
qscrollLayout = QtGui.QHBoxLayout(qscrollContents)
qscrollLayout.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
qscroll.setWidget(qscrollContents)
qscroll.setWidgetResizable(True)
for spectrum in spectra:
qfigWidget = QtGui.QWidget(qscrollContents)
fig = Figure()
canvas = FigureCanvas(fig)
canvas.setParent(qfigWidget)
toolbar = NavigationToolbar(canvas, qfigWidget)
plt = fig.add_subplot(111)
plt.scatter(*zip(*spectrum.info))
# place plot components in a layout
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(canvas)
plotLayout.addWidget(toolbar)
qfigWidget.setLayout(plotLayout)
# prevent the canvas to shrink beyond a point
# original size looks like a good minimum size
canvas.setMinimumSize(canvas.size())
qscrollLayout.addWidget(qfigWidget)
qwidget.show()
exit(qapp.exec_())
|
<commit_before><commit_msg>Test interface for showing plots<commit_after>from PySide import QtCore, QtGui
import os,sys
from bgsub import SpectrumData
from matplotlib import pyplot as plt
path = "/home/danielle/Documents/LMCE_one"
file_list = [f for f in sorted(os.listdir(path)) if f.endswith(".txt")]
os.chdir(path)
spectra = []
for each in file_list:
spectra.append(SpectrumData.from_file(each))
#import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
qapp = QtGui.QApplication(sys.argv)
qwidget = QtGui.QWidget()
qwidget.setGeometry(QtCore.QRect(0, 0, 500, 500))
qlayout = QtGui.QHBoxLayout(qwidget)
qwidget.setLayout(qlayout)
qscroll = QtGui.QScrollArea(qwidget)
qscroll.setGeometry(QtCore.QRect(0, 0, 500, 500))
qscroll.setFrameStyle(QtGui.QFrame.NoFrame)
qlayout.addWidget(qscroll)
qscrollContents = QtGui.QWidget()
qscrollLayout = QtGui.QHBoxLayout(qscrollContents)
qscrollLayout.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
qscroll.setWidget(qscrollContents)
qscroll.setWidgetResizable(True)
for spectrum in spectra:
qfigWidget = QtGui.QWidget(qscrollContents)
fig = Figure()
canvas = FigureCanvas(fig)
canvas.setParent(qfigWidget)
toolbar = NavigationToolbar(canvas, qfigWidget)
plt = fig.add_subplot(111)
plt.scatter(*zip(*spectrum.info))
# place plot components in a layout
plotLayout = QtGui.QVBoxLayout()
plotLayout.addWidget(canvas)
plotLayout.addWidget(toolbar)
qfigWidget.setLayout(plotLayout)
# prevent the canvas to shrink beyond a point
# original size looks like a good minimum size
canvas.setMinimumSize(canvas.size())
qscrollLayout.addWidget(qfigWidget)
qwidget.show()
exit(qapp.exec_())
|
|
2207dd266887e812cae9da67ca00bef80c9985fd
|
thefuck/shells/__init__.py
|
thefuck/shells/__init__.py
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
try:
shell_name = Process(os.getpid()).parent().name()
except TypeError:
shell_name = Process(os.getpid()).parent.name
return shells.get(shell_name, Generic)()
shell = _get_shell()
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
proc = Process(os.getpid())
while (proc is not None):
name = None
try:
name = proc.name()
except TypeError:
name = proc.name
name = os.path.splitext(name)[0]
if name in shells:
return shells[name]()
try:
proc = proc.parent()
except TypeError:
proc = proc.parent
return Generic()
shell = _get_shell()
|
Update _get_shell to work with Windows
|
Update _get_shell to work with Windows
- _get_shell assumed the parent process would always be the shell process, in Powershell the
parent process is Python, with the grandparent being the shell
- Switched to walking the process tree so the same code path can be used in both places
|
Python
|
mit
|
mlk/thefuck,SimenB/thefuck,SimenB/thefuck,mlk/thefuck,nvbn/thefuck,Clpsplug/thefuck,scorphus/thefuck,Clpsplug/thefuck,scorphus/thefuck,nvbn/thefuck
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
try:
shell_name = Process(os.getpid()).parent().name()
except TypeError:
shell_name = Process(os.getpid()).parent.name
return shells.get(shell_name, Generic)()
shell = _get_shell()
Update _get_shell to work with Windows
- _get_shell assumed the parent process would always be the shell process, in Powershell the
parent process is Python, with the grandparent being the shell
- Switched to walking the process tree so the same code path can be used in both places
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
proc = Process(os.getpid())
while (proc is not None):
name = None
try:
name = proc.name()
except TypeError:
name = proc.name
name = os.path.splitext(name)[0]
if name in shells:
return shells[name]()
try:
proc = proc.parent()
except TypeError:
proc = proc.parent
return Generic()
shell = _get_shell()
|
<commit_before>"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
try:
shell_name = Process(os.getpid()).parent().name()
except TypeError:
shell_name = Process(os.getpid()).parent.name
return shells.get(shell_name, Generic)()
shell = _get_shell()
<commit_msg>Update _get_shell to work with Windows
- _get_shell assumed the parent process would always be the shell process, in Powershell the
parent process is Python, with the grandparent being the shell
- Switched to walking the process tree so the same code path can be used in both places<commit_after>
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
proc = Process(os.getpid())
while (proc is not None):
name = None
try:
name = proc.name()
except TypeError:
name = proc.name
name = os.path.splitext(name)[0]
if name in shells:
return shells[name]()
try:
proc = proc.parent()
except TypeError:
proc = proc.parent
return Generic()
shell = _get_shell()
|
"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
try:
shell_name = Process(os.getpid()).parent().name()
except TypeError:
shell_name = Process(os.getpid()).parent.name
return shells.get(shell_name, Generic)()
shell = _get_shell()
Update _get_shell to work with Windows
- _get_shell assumed the parent process would always be the shell process, in Powershell the
parent process is Python, with the grandparent being the shell
- Switched to walking the process tree so the same code path can be used in both places"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
proc = Process(os.getpid())
while (proc is not None):
name = None
try:
name = proc.name()
except TypeError:
name = proc.name
name = os.path.splitext(name)[0]
if name in shells:
return shells[name]()
try:
proc = proc.parent()
except TypeError:
proc = proc.parent
return Generic()
shell = _get_shell()
|
<commit_before>"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
try:
shell_name = Process(os.getpid()).parent().name()
except TypeError:
shell_name = Process(os.getpid()).parent.name
return shells.get(shell_name, Generic)()
shell = _get_shell()
<commit_msg>Update _get_shell to work with Windows
- _get_shell assumed the parent process would always be the shell process, in Powershell the
parent process is Python, with the grandparent being the shell
- Switched to walking the process tree so the same code path can be used in both places<commit_after>"""Package with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and
`get_aliases` methods.
"""
import os
from psutil import Process
from .bash import Bash
from .fish import Fish
from .generic import Generic
from .tcsh import Tcsh
from .zsh import Zsh
shells = {'bash': Bash,
'fish': Fish,
'zsh': Zsh,
'csh': Tcsh,
'tcsh': Tcsh}
def _get_shell():
proc = Process(os.getpid())
while (proc is not None):
name = None
try:
name = proc.name()
except TypeError:
name = proc.name
name = os.path.splitext(name)[0]
if name in shells:
return shells[name]()
try:
proc = proc.parent()
except TypeError:
proc = proc.parent
return Generic()
shell = _get_shell()
|
731b53a21ca5d027384b1db3d5bfdcdd055eff28
|
run_doodle_exp.py
|
run_doodle_exp.py
|
#!/usr/bin/env python3
import subprocess
import os
img_dir = 'samples/'
out_dir = 'outputs/'
ext = '.jpg'
sem_ext = '.png'
c_images = ['arthur', 'matheus', 'sergio', 'morgan']
s_image = 'morgan'
content_weight = 15
style_weight = 10
iterations = 200
### Create outputs' folder if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
### Name the output files and count iterations
i = 0
### Run until Ctrl+C
while True:
out_core = '{:04d}'.format(i)
output = out_dir + out_core + sem_ext
content = img_dir + c_images[i % 4] + ext
sem_content = img_dir + c_images[i % 4] + '_sem' + sem_ext
style = img_dir + s_image + ext
### It appears to be working properly
### To check if the lines are producing the correct outputs add 'echo' as the
### first term of the list to print it in the command line
subprocess.run(['python', 'doodle.py', '--device=cpu', '--save-every=0',
'--content={}'.format(content),
'--style={}'.format(style),
'--output={}'.format(output),
'--content-weight={}'.format(content_weight),
'--style-weight={}'.format(style_weight),
'--iterations={}'.format(iterations)])
subprocess.run(['cp', output, (img_dir + out_core + ext)])
subprocess.run(['cp', sem_content, '{}_sem{}'.format(img_dir + out_core, sem_ext)])
### We don't want to delete Morgan, but we can delete intermediate semantic maps
### and the outputs at sample folder (the ones at output folder are kept)
if s_image != 'morgan':
subprocess.run(['rm', style])
subprocess.run(['rm', '{}_sem{}'.format(img_dir + s_image, sem_ext)])
### Prepare for next round
s_image = out_core
i += 1
### Expected contents, styles and outputs (all from 'samples/')
# content=arthur.jpg , style=morgan.jpg, output=0000.jpg
# content=matheus.jpg, style=0000.jpg , output=0001.jpg
# content=sergio.jpg , style=0001.jpg , output=0002.jpg
# content=morgan.jpg , style=0002.jpg , output=0003.jpg
# content=arthur.jpg , style=0003.jpg , output=0004.jpg
# content=matheus.jpg, style=0004.jpg , output=0005.jpg
|
Change style for each execution of the experiment
|
Change style for each execution of the experiment
|
Python
|
agpl-3.0
|
msvolenski/transfer-style-deep-investigation
|
Change style for each execution of the experiment
|
#!/usr/bin/env python3
import subprocess
import os
img_dir = 'samples/'
out_dir = 'outputs/'
ext = '.jpg'
sem_ext = '.png'
c_images = ['arthur', 'matheus', 'sergio', 'morgan']
s_image = 'morgan'
content_weight = 15
style_weight = 10
iterations = 200
### Create outputs' folder if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
### Name the output files and count iterations
i = 0
### Run until Ctrl+C
while True:
out_core = '{:04d}'.format(i)
output = out_dir + out_core + sem_ext
content = img_dir + c_images[i % 4] + ext
sem_content = img_dir + c_images[i % 4] + '_sem' + sem_ext
style = img_dir + s_image + ext
### It appears to be working properly
### To check if the lines are producing the correct outputs add 'echo' as the
### first term of the list to print it in the command line
subprocess.run(['python', 'doodle.py', '--device=cpu', '--save-every=0',
'--content={}'.format(content),
'--style={}'.format(style),
'--output={}'.format(output),
'--content-weight={}'.format(content_weight),
'--style-weight={}'.format(style_weight),
'--iterations={}'.format(iterations)])
subprocess.run(['cp', output, (img_dir + out_core + ext)])
subprocess.run(['cp', sem_content, '{}_sem{}'.format(img_dir + out_core, sem_ext)])
### We don't want to delete Morgan, but we can delete intermediate semantic maps
### and the outputs at sample folder (the ones at output folder are kept)
if s_image != 'morgan':
subprocess.run(['rm', style])
subprocess.run(['rm', '{}_sem{}'.format(img_dir + s_image, sem_ext)])
### Prepare for next round
s_image = out_core
i += 1
### Expected contents, styles and outputs (all from 'samples/')
# content=arthur.jpg , style=morgan.jpg, output=0000.jpg
# content=matheus.jpg, style=0000.jpg , output=0001.jpg
# content=sergio.jpg , style=0001.jpg , output=0002.jpg
# content=morgan.jpg , style=0002.jpg , output=0003.jpg
# content=arthur.jpg , style=0003.jpg , output=0004.jpg
# content=matheus.jpg, style=0004.jpg , output=0005.jpg
|
<commit_before><commit_msg>Change style for each execution of the experiment<commit_after>
|
#!/usr/bin/env python3
import subprocess
import os
img_dir = 'samples/'
out_dir = 'outputs/'
ext = '.jpg'
sem_ext = '.png'
c_images = ['arthur', 'matheus', 'sergio', 'morgan']
s_image = 'morgan'
content_weight = 15
style_weight = 10
iterations = 200
### Create outputs' folder if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
### Name the output files and count iterations
i = 0
### Run until Ctrl+C
while True:
out_core = '{:04d}'.format(i)
output = out_dir + out_core + sem_ext
content = img_dir + c_images[i % 4] + ext
sem_content = img_dir + c_images[i % 4] + '_sem' + sem_ext
style = img_dir + s_image + ext
### It appears to be working properly
### To check if the lines are producing the correct outputs add 'echo' as the
### first term of the list to print it in the command line
subprocess.run(['python', 'doodle.py', '--device=cpu', '--save-every=0',
'--content={}'.format(content),
'--style={}'.format(style),
'--output={}'.format(output),
'--content-weight={}'.format(content_weight),
'--style-weight={}'.format(style_weight),
'--iterations={}'.format(iterations)])
subprocess.run(['cp', output, (img_dir + out_core + ext)])
subprocess.run(['cp', sem_content, '{}_sem{}'.format(img_dir + out_core, sem_ext)])
### We don't want to delete Morgan, but we can delete intermediate semantic maps
### and the outputs at sample folder (the ones at output folder are kept)
if s_image != 'morgan':
subprocess.run(['rm', style])
subprocess.run(['rm', '{}_sem{}'.format(img_dir + s_image, sem_ext)])
### Prepare for next round
s_image = out_core
i += 1
### Expected contents, styles and outputs (all from 'samples/')
# content=arthur.jpg , style=morgan.jpg, output=0000.jpg
# content=matheus.jpg, style=0000.jpg , output=0001.jpg
# content=sergio.jpg , style=0001.jpg , output=0002.jpg
# content=morgan.jpg , style=0002.jpg , output=0003.jpg
# content=arthur.jpg , style=0003.jpg , output=0004.jpg
# content=matheus.jpg, style=0004.jpg , output=0005.jpg
|
Change style for each execution of the experiment#!/usr/bin/env python3
import subprocess
import os
img_dir = 'samples/'
out_dir = 'outputs/'
ext = '.jpg'
sem_ext = '.png'
c_images = ['arthur', 'matheus', 'sergio', 'morgan']
s_image = 'morgan'
content_weight = 15
style_weight = 10
iterations = 200
### Create outputs' folder if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
### Name the output files and count iterations
i = 0
### Run until Ctrl+C
while True:
out_core = '{:04d}'.format(i)
output = out_dir + out_core + sem_ext
content = img_dir + c_images[i % 4] + ext
sem_content = img_dir + c_images[i % 4] + '_sem' + sem_ext
style = img_dir + s_image + ext
### It appears to be working properly
### To check if the lines are producing the correct outputs add 'echo' as the
### first term of the list to print it in the command line
subprocess.run(['python', 'doodle.py', '--device=cpu', '--save-every=0',
'--content={}'.format(content),
'--style={}'.format(style),
'--output={}'.format(output),
'--content-weight={}'.format(content_weight),
'--style-weight={}'.format(style_weight),
'--iterations={}'.format(iterations)])
subprocess.run(['cp', output, (img_dir + out_core + ext)])
subprocess.run(['cp', sem_content, '{}_sem{}'.format(img_dir + out_core, sem_ext)])
### We don't want to delete Morgan, but we can delete intermediate semantic maps
### and the outputs at sample folder (the ones at output folder are kept)
if s_image != 'morgan':
subprocess.run(['rm', style])
subprocess.run(['rm', '{}_sem{}'.format(img_dir + s_image, sem_ext)])
### Prepare for next round
s_image = out_core
i += 1
### Expected contents, styles and outputs (all from 'samples/')
# content=arthur.jpg , style=morgan.jpg, output=0000.jpg
# content=matheus.jpg, style=0000.jpg , output=0001.jpg
# content=sergio.jpg , style=0001.jpg , output=0002.jpg
# content=morgan.jpg , style=0002.jpg , output=0003.jpg
# content=arthur.jpg , style=0003.jpg , output=0004.jpg
# content=matheus.jpg, style=0004.jpg , output=0005.jpg
|
<commit_before><commit_msg>Change style for each execution of the experiment<commit_after>#!/usr/bin/env python3
import subprocess
import os
img_dir = 'samples/'
out_dir = 'outputs/'
ext = '.jpg'
sem_ext = '.png'
c_images = ['arthur', 'matheus', 'sergio', 'morgan']
s_image = 'morgan'
content_weight = 15
style_weight = 10
iterations = 200
### Create outputs' folder if it doesn't exist
if not os.path.exists(out_dir):
os.makedirs(out_dir)
### Name the output files and count iterations
i = 0
### Run until Ctrl+C
while True:
out_core = '{:04d}'.format(i)
output = out_dir + out_core + sem_ext
content = img_dir + c_images[i % 4] + ext
sem_content = img_dir + c_images[i % 4] + '_sem' + sem_ext
style = img_dir + s_image + ext
### It appears to be working properly
### To check if the lines are producing the correct outputs add 'echo' as the
### first term of the list to print it in the command line
subprocess.run(['python', 'doodle.py', '--device=cpu', '--save-every=0',
'--content={}'.format(content),
'--style={}'.format(style),
'--output={}'.format(output),
'--content-weight={}'.format(content_weight),
'--style-weight={}'.format(style_weight),
'--iterations={}'.format(iterations)])
subprocess.run(['cp', output, (img_dir + out_core + ext)])
subprocess.run(['cp', sem_content, '{}_sem{}'.format(img_dir + out_core, sem_ext)])
### We don't want to delete Morgan, but we can delete intermediate semantic maps
### and the outputs at sample folder (the ones at output folder are kept)
if s_image != 'morgan':
subprocess.run(['rm', style])
subprocess.run(['rm', '{}_sem{}'.format(img_dir + s_image, sem_ext)])
### Prepare for next round
s_image = out_core
i += 1
### Expected contents, styles and outputs (all from 'samples/')
# content=arthur.jpg , style=morgan.jpg, output=0000.jpg
# content=matheus.jpg, style=0000.jpg , output=0001.jpg
# content=sergio.jpg , style=0001.jpg , output=0002.jpg
# content=morgan.jpg , style=0002.jpg , output=0003.jpg
# content=arthur.jpg , style=0003.jpg , output=0004.jpg
# content=matheus.jpg, style=0004.jpg , output=0005.jpg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.