commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ceca4a068031e049ef906e0ce4f5c03b3db6a48
|
migrations/versions/0201_another_letter_org.py
|
migrations/versions/0201_another_letter_org.py
|
"""empty message
Revision ID: 0201_another_letter_org
Revises: 0200_another_letter_org
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0201_another_letter_org'
down_revision = '0200_another_letter_org'
from alembic import op
NEW_ORGANISATIONS = [
('509', 'Hackney Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter organisation for Hackney Council
|
Add letter organisation for Hackney Council
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter organisation for Hackney Council
|
"""empty message
Revision ID: 0201_another_letter_org
Revises: 0200_another_letter_org
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0201_another_letter_org'
down_revision = '0200_another_letter_org'
from alembic import op
NEW_ORGANISATIONS = [
('509', 'Hackney Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter organisation for Hackney Council<commit_after>
|
"""empty message
Revision ID: 0201_another_letter_org
Revises: 0200_another_letter_org
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0201_another_letter_org'
down_revision = '0200_another_letter_org'
from alembic import op
NEW_ORGANISATIONS = [
('509', 'Hackney Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter organisation for Hackney Council"""empty message
Revision ID: 0201_another_letter_org
Revises: 0200_another_letter_org
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0201_another_letter_org'
down_revision = '0200_another_letter_org'
from alembic import op
NEW_ORGANISATIONS = [
('509', 'Hackney Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter organisation for Hackney Council<commit_after>"""empty message
Revision ID: 0201_another_letter_org
Revises: 0200_another_letter_org
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0201_another_letter_org'
down_revision = '0200_another_letter_org'
from alembic import op
NEW_ORGANISATIONS = [
('509', 'Hackney Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
da186d5d36239f052dfb64e30b0e28ec830bf15d
|
scripts/calculate_handlebar_inertia.py
|
scripts/calculate_handlebar_inertia.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script estimates the inertia of the handlebars and upper part of the
steering column (everything above the torque sensor) about the steer axis.
Although we expect this value to be small, it still influences the dynamics of
the system and influences the values we read from the torque sensor.
For more information (and plots) refer to:
https://github.com/oliverlee/phobos/issues/90
"""
import sys
from scipy.signal import savgol_filter as sg
from load_sim import load_messages, get_records_from_messages, get_time_vector
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
KOLLMORGEN_MAX_TORQUE = 10.78 # N-m
ENCODER_COUNT_PER_REV = 152000
def estimate_inertia(records):
"""We assume no stiffness or damping for the equation of motion is:
T = I*alpha
where T is the sum of torques applied
I is the moment of inertia
alpha is the angular acceleration
"""
t = get_time_vector(records)
torque = (((records.sensors.kollmorgen_actual_torque).astype(float) -
2**11)/2**11 * KOLLMORGEN_MAX_TORQUE) # N-m
angle = np.unwrap(((records.sensors.steer_encoder_count).astype(float) *
2*np.pi / ENCODER_COUNT_PER_REV)) # radians
dt = np.diff(t).mean()
angle_d = sg(angle, 11, 3, deriv=1, delta=dt, mode='nearest')
angle_dd = sg(angle, 11, 3, deriv=2, delta=dt, mode='nearest')
color = sns.color_palette('Paired', 10)
fig, ax = plt.subplots()
ax.plot(t, angle, label='angle', color=color[1])
ax.plot(t, angle_d, label='angular rate', color=color[3])
ax.plot(t, angle_dd, label='angular accel', color=color[5])
ax.plot(t, torque, label='torque', color=color[7])
ax.legend()
plt.show()
ret = np.linalg.lstsq(np.reshape(angle_dd, (-1, 1)),
np.reshape(torque, (-1, 1)))
return np.squeeze(ret[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} <log_file>\n'.format(__file__))
print(' <log_file>\tFile containing serialized and framed ' +
'protobuf messages')
sys.exit(1)
messages = load_messages(sys.argv[1])
# ignore first sample as it transmitted before the simulation loop
records = get_records_from_messages(messages)[1:]
inertia = estimate_inertia(records)
print('Calculated handlebar inertia is: {} kg-m^2'.format(inertia))
sys.exit(0)
|
Add script to calculate handlebar inertia
|
Add script to calculate handlebar inertia
Script takes a protobuf log as input. The Kollmorgen actual torque is
converted from DAC values to N-m. The steer encoder count is converted
to an angular acceleration using a Savitzky-Golay filter with window
size 11 and polynomial order 3. The handlebar inertia is calculated from
a least-squares fit.
|
Python
|
bsd-2-clause
|
oliverlee/phobos,oliverlee/phobos,oliverlee/phobos,oliverlee/phobos
|
Add script to calculate handlebar inertia
Script takes a protobuf log as input. The Kollmorgen actual torque is
converted from DAC values to N-m. The steer encoder count is converted
to an angular acceleration using a Savitzky-Golay filter with window
size 11 and polynomial order 3. The handlebar inertia is calculated from
a least-squares fit.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script estimates the inertia of the handlebars and upper part of the
steering column (everything above the torque sensor) about the steer axis.
Although we expect this value to be small, it still influences the dynamics of
the system and influences the values we read from the torque sensor.
For more information (and plots) refer to:
https://github.com/oliverlee/phobos/issues/90
"""
import sys
from scipy.signal import savgol_filter as sg
from load_sim import load_messages, get_records_from_messages, get_time_vector
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
KOLLMORGEN_MAX_TORQUE = 10.78 # N-m
ENCODER_COUNT_PER_REV = 152000
def estimate_inertia(records):
"""We assume no stiffness or damping for the equation of motion is:
T = I*alpha
where T is the sum of torques applied
I is the moment of inertia
alpha is the angular acceleration
"""
t = get_time_vector(records)
torque = (((records.sensors.kollmorgen_actual_torque).astype(float) -
2**11)/2**11 * KOLLMORGEN_MAX_TORQUE) # N-m
angle = np.unwrap(((records.sensors.steer_encoder_count).astype(float) *
2*np.pi / ENCODER_COUNT_PER_REV)) # radians
dt = np.diff(t).mean()
angle_d = sg(angle, 11, 3, deriv=1, delta=dt, mode='nearest')
angle_dd = sg(angle, 11, 3, deriv=2, delta=dt, mode='nearest')
color = sns.color_palette('Paired', 10)
fig, ax = plt.subplots()
ax.plot(t, angle, label='angle', color=color[1])
ax.plot(t, angle_d, label='angular rate', color=color[3])
ax.plot(t, angle_dd, label='angular accel', color=color[5])
ax.plot(t, torque, label='torque', color=color[7])
ax.legend()
plt.show()
ret = np.linalg.lstsq(np.reshape(angle_dd, (-1, 1)),
np.reshape(torque, (-1, 1)))
return np.squeeze(ret[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} <log_file>\n'.format(__file__))
print(' <log_file>\tFile containing serialized and framed ' +
'protobuf messages')
sys.exit(1)
messages = load_messages(sys.argv[1])
# ignore first sample as it transmitted before the simulation loop
records = get_records_from_messages(messages)[1:]
inertia = estimate_inertia(records)
print('Calculated handlebar inertia is: {} kg-m^2'.format(inertia))
sys.exit(0)
|
<commit_before><commit_msg>Add script to calculate handlebar inertia
Script takes a protobuf log as input. The Kollmorgen actual torque is
converted from DAC values to N-m. The steer encoder count is converted
to an angular acceleration using a Savitzky-Golay filter with window
size 11 and polynomial order 3. The handlebar inertia is calculated from
a least-squares fit.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script estimates the inertia of the handlebars and upper part of the
steering column (everything above the torque sensor) about the steer axis.
Although we expect this value to be small, it still influences the dynamics of
the system and influences the values we read from the torque sensor.
For more information (and plots) refer to:
https://github.com/oliverlee/phobos/issues/90
"""
import sys
from scipy.signal import savgol_filter as sg
from load_sim import load_messages, get_records_from_messages, get_time_vector
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
KOLLMORGEN_MAX_TORQUE = 10.78 # N-m
ENCODER_COUNT_PER_REV = 152000
def estimate_inertia(records):
"""We assume no stiffness or damping for the equation of motion is:
T = I*alpha
where T is the sum of torques applied
I is the moment of inertia
alpha is the angular acceleration
"""
t = get_time_vector(records)
torque = (((records.sensors.kollmorgen_actual_torque).astype(float) -
2**11)/2**11 * KOLLMORGEN_MAX_TORQUE) # N-m
angle = np.unwrap(((records.sensors.steer_encoder_count).astype(float) *
2*np.pi / ENCODER_COUNT_PER_REV)) # radians
dt = np.diff(t).mean()
angle_d = sg(angle, 11, 3, deriv=1, delta=dt, mode='nearest')
angle_dd = sg(angle, 11, 3, deriv=2, delta=dt, mode='nearest')
color = sns.color_palette('Paired', 10)
fig, ax = plt.subplots()
ax.plot(t, angle, label='angle', color=color[1])
ax.plot(t, angle_d, label='angular rate', color=color[3])
ax.plot(t, angle_dd, label='angular accel', color=color[5])
ax.plot(t, torque, label='torque', color=color[7])
ax.legend()
plt.show()
ret = np.linalg.lstsq(np.reshape(angle_dd, (-1, 1)),
np.reshape(torque, (-1, 1)))
return np.squeeze(ret[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} <log_file>\n'.format(__file__))
print(' <log_file>\tFile containing serialized and framed ' +
'protobuf messages')
sys.exit(1)
messages = load_messages(sys.argv[1])
# ignore first sample as it transmitted before the simulation loop
records = get_records_from_messages(messages)[1:]
inertia = estimate_inertia(records)
print('Calculated handlebar inertia is: {} kg-m^2'.format(inertia))
sys.exit(0)
|
Add script to calculate handlebar inertia
Script takes a protobuf log as input. The Kollmorgen actual torque is
converted from DAC values to N-m. The steer encoder count is converted
to an angular acceleration using a Savitzky-Golay filter with window
size 11 and polynomial order 3. The handlebar inertia is calculated from
a least-squares fit.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script estimates the inertia of the handlebars and upper part of the
steering column (everything above the torque sensor) about the steer axis.
Although we expect this value to be small, it still influences the dynamics of
the system and influences the values we read from the torque sensor.
For more information (and plots) refer to:
https://github.com/oliverlee/phobos/issues/90
"""
import sys
from scipy.signal import savgol_filter as sg
from load_sim import load_messages, get_records_from_messages, get_time_vector
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
KOLLMORGEN_MAX_TORQUE = 10.78 # N-m
ENCODER_COUNT_PER_REV = 152000
def estimate_inertia(records):
"""We assume no stiffness or damping for the equation of motion is:
T = I*alpha
where T is the sum of torques applied
I is the moment of inertia
alpha is the angular acceleration
"""
t = get_time_vector(records)
torque = (((records.sensors.kollmorgen_actual_torque).astype(float) -
2**11)/2**11 * KOLLMORGEN_MAX_TORQUE) # N-m
angle = np.unwrap(((records.sensors.steer_encoder_count).astype(float) *
2*np.pi / ENCODER_COUNT_PER_REV)) # radians
dt = np.diff(t).mean()
angle_d = sg(angle, 11, 3, deriv=1, delta=dt, mode='nearest')
angle_dd = sg(angle, 11, 3, deriv=2, delta=dt, mode='nearest')
color = sns.color_palette('Paired', 10)
fig, ax = plt.subplots()
ax.plot(t, angle, label='angle', color=color[1])
ax.plot(t, angle_d, label='angular rate', color=color[3])
ax.plot(t, angle_dd, label='angular accel', color=color[5])
ax.plot(t, torque, label='torque', color=color[7])
ax.legend()
plt.show()
ret = np.linalg.lstsq(np.reshape(angle_dd, (-1, 1)),
np.reshape(torque, (-1, 1)))
return np.squeeze(ret[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} <log_file>\n'.format(__file__))
print(' <log_file>\tFile containing serialized and framed ' +
'protobuf messages')
sys.exit(1)
messages = load_messages(sys.argv[1])
# ignore first sample as it transmitted before the simulation loop
records = get_records_from_messages(messages)[1:]
inertia = estimate_inertia(records)
print('Calculated handlebar inertia is: {} kg-m^2'.format(inertia))
sys.exit(0)
|
<commit_before><commit_msg>Add script to calculate handlebar inertia
Script takes a protobuf log as input. The Kollmorgen actual torque is
converted from DAC values to N-m. The steer encoder count is converted
to an angular acceleration using a Savitzky-Golay filter with window
size 11 and polynomial order 3. The handlebar inertia is calculated from
a least-squares fit.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This script estimates the inertia of the handlebars and upper part of the
steering column (everything above the torque sensor) about the steer axis.
Although we expect this value to be small, it still influences the dynamics of
the system and influences the values we read from the torque sensor.
For more information (and plots) refer to:
https://github.com/oliverlee/phobos/issues/90
"""
import sys
from scipy.signal import savgol_filter as sg
from load_sim import load_messages, get_records_from_messages, get_time_vector
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
KOLLMORGEN_MAX_TORQUE = 10.78 # N-m
ENCODER_COUNT_PER_REV = 152000
def estimate_inertia(records):
"""We assume no stiffness or damping for the equation of motion is:
T = I*alpha
where T is the sum of torques applied
I is the moment of inertia
alpha is the angular acceleration
"""
t = get_time_vector(records)
torque = (((records.sensors.kollmorgen_actual_torque).astype(float) -
2**11)/2**11 * KOLLMORGEN_MAX_TORQUE) # N-m
angle = np.unwrap(((records.sensors.steer_encoder_count).astype(float) *
2*np.pi / ENCODER_COUNT_PER_REV)) # radians
dt = np.diff(t).mean()
angle_d = sg(angle, 11, 3, deriv=1, delta=dt, mode='nearest')
angle_dd = sg(angle, 11, 3, deriv=2, delta=dt, mode='nearest')
color = sns.color_palette('Paired', 10)
fig, ax = plt.subplots()
ax.plot(t, angle, label='angle', color=color[1])
ax.plot(t, angle_d, label='angular rate', color=color[3])
ax.plot(t, angle_dd, label='angular accel', color=color[5])
ax.plot(t, torque, label='torque', color=color[7])
ax.legend()
plt.show()
ret = np.linalg.lstsq(np.reshape(angle_dd, (-1, 1)),
np.reshape(torque, (-1, 1)))
return np.squeeze(ret[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} <log_file>\n'.format(__file__))
print(' <log_file>\tFile containing serialized and framed ' +
'protobuf messages')
sys.exit(1)
messages = load_messages(sys.argv[1])
# ignore first sample as it transmitted before the simulation loop
records = get_records_from_messages(messages)[1:]
inertia = estimate_inertia(records)
print('Calculated handlebar inertia is: {} kg-m^2'.format(inertia))
sys.exit(0)
|
|
2316dddecc7891d1544ef72485afd067ef759c6b
|
examples/xpath_test.py
|
examples/xpath_test.py
|
""" NOTE: Using CSS Selectors is better than using XPath!
XPath Selectors break very easily with website changes. """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_xpath(self):
self.open("https://xkcd.com/1319/")
self.assert_element('//img')
self.assert_element('/html/body/div[2]/div[2]/img')
self.click("//ul/li[6]/a")
self.assert_text("xkcd.com", "//h2")
|
Add an example test with XPath selectors
|
Add an example test with XPath selectors
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example test with XPath selectors
|
""" NOTE: Using CSS Selectors is better than using XPath!
XPath Selectors break very easily with website changes. """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_xpath(self):
self.open("https://xkcd.com/1319/")
self.assert_element('//img')
self.assert_element('/html/body/div[2]/div[2]/img')
self.click("//ul/li[6]/a")
self.assert_text("xkcd.com", "//h2")
|
<commit_before><commit_msg>Add an example test with XPath selectors<commit_after>
|
""" NOTE: Using CSS Selectors is better than using XPath!
XPath Selectors break very easily with website changes. """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_xpath(self):
self.open("https://xkcd.com/1319/")
self.assert_element('//img')
self.assert_element('/html/body/div[2]/div[2]/img')
self.click("//ul/li[6]/a")
self.assert_text("xkcd.com", "//h2")
|
Add an example test with XPath selectors""" NOTE: Using CSS Selectors is better than using XPath!
XPath Selectors break very easily with website changes. """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_xpath(self):
self.open("https://xkcd.com/1319/")
self.assert_element('//img')
self.assert_element('/html/body/div[2]/div[2]/img')
self.click("//ul/li[6]/a")
self.assert_text("xkcd.com", "//h2")
|
<commit_before><commit_msg>Add an example test with XPath selectors<commit_after>""" NOTE: Using CSS Selectors is better than using XPath!
XPath Selectors break very easily with website changes. """
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_xpath(self):
self.open("https://xkcd.com/1319/")
self.assert_element('//img')
self.assert_element('/html/body/div[2]/div[2]/img')
self.click("//ul/li[6]/a")
self.assert_text("xkcd.com", "//h2")
|
|
893a624f8a6646c9211ee6510869ad1b2aae3312
|
incubation/epub_for_kobo_py/hello.py
|
incubation/epub_for_kobo_py/hello.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
def main():
"""Main function"""
tmp_dir = "/tmp"
epub_file = "foo.epub"
# CHECK IF THE EPUBFILE EXISTS
# TODO
# MAKE A TMP DIR
# CHDIR
# UNZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("unzip " + epub_file)
# UPDATE TOC
# TODO
# ZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("zip -X -Z store " + epub_file + " mimetype")
os.system("zip -r " + epub_file + " META-INF/ OEBPS/")
# CHECK
os.system("epubcheck " + epub_file)
# MOVE EPUBFILE
# REMOVE TMPDIR
if __name__ == '__main__':
main()
|
Add a project in the 'incubator'.
|
Add a project in the 'incubator'.
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a project in the 'incubator'.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
def main():
"""Main function"""
tmp_dir = "/tmp"
epub_file = "foo.epub"
# CHECK IF THE EPUBFILE EXISTS
# TODO
# MAKE A TMP DIR
# CHDIR
# UNZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("unzip " + epub_file)
# UPDATE TOC
# TODO
# ZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("zip -X -Z store " + epub_file + " mimetype")
os.system("zip -r " + epub_file + " META-INF/ OEBPS/")
# CHECK
os.system("epubcheck " + epub_file)
# MOVE EPUBFILE
# REMOVE TMPDIR
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a project in the 'incubator'.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
def main():
"""Main function"""
tmp_dir = "/tmp"
epub_file = "foo.epub"
# CHECK IF THE EPUBFILE EXISTS
# TODO
# MAKE A TMP DIR
# CHDIR
# UNZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("unzip " + epub_file)
# UPDATE TOC
# TODO
# ZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("zip -X -Z store " + epub_file + " mimetype")
os.system("zip -r " + epub_file + " META-INF/ OEBPS/")
# CHECK
os.system("epubcheck " + epub_file)
# MOVE EPUBFILE
# REMOVE TMPDIR
if __name__ == '__main__':
main()
|
Add a project in the 'incubator'.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
def main():
"""Main function"""
tmp_dir = "/tmp"
epub_file = "foo.epub"
# CHECK IF THE EPUBFILE EXISTS
# TODO
# MAKE A TMP DIR
# CHDIR
# UNZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("unzip " + epub_file)
# UPDATE TOC
# TODO
# ZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("zip -X -Z store " + epub_file + " mimetype")
os.system("zip -r " + epub_file + " META-INF/ OEBPS/")
# CHECK
os.system("epubcheck " + epub_file)
# MOVE EPUBFILE
# REMOVE TMPDIR
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a project in the 'incubator'.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os
def main():
"""Main function"""
tmp_dir = "/tmp"
epub_file = "foo.epub"
# CHECK IF THE EPUBFILE EXISTS
# TODO
# MAKE A TMP DIR
# CHDIR
# UNZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("unzip " + epub_file)
# UPDATE TOC
# TODO
# ZIP
# see http://fr.flossmanuals.net/creer-un-epub/ch011_extraire-et-recompresser-un-epub
os.system("zip -X -Z store " + epub_file + " mimetype")
os.system("zip -r " + epub_file + " META-INF/ OEBPS/")
# CHECK
os.system("epubcheck " + epub_file)
# MOVE EPUBFILE
# REMOVE TMPDIR
if __name__ == '__main__':
main()
|
|
829ed9d8de8f1df73d24edbd2f9c52438eab8858
|
common_configs/utils.py
|
common_configs/utils.py
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
def merge_items(base, new_items):
"""
Merges two lists and eliminates duplicates
:type base: list
:type new_items: list
:rtype: list
"""
for item in new_items:
if not item in base:
base = base + [item]
return base
|
Include merge_items helper for merging config lists and eliminating dupes
|
Include merge_items helper for merging config lists and eliminating dupes
|
Python
|
bsd-3-clause
|
nigma/django-common-configs,nigma/django-common-configs
|
Include merge_items helper for merging config lists and eliminating dupes
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
def merge_items(base, new_items):
"""
Merges two lists and eliminates duplicates
:type base: list
:type new_items: list
:rtype: list
"""
for item in new_items:
if not item in base:
base = base + [item]
return base
|
<commit_before><commit_msg>Include merge_items helper for merging config lists and eliminating dupes<commit_after>
|
#-*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
def merge_items(base, new_items):
"""
Merges two lists and eliminates duplicates
:type base: list
:type new_items: list
:rtype: list
"""
for item in new_items:
if not item in base:
base = base + [item]
return base
|
Include merge_items helper for merging config lists and eliminating dupes#-*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
def merge_items(base, new_items):
"""
Merges two lists and eliminates duplicates
:type base: list
:type new_items: list
:rtype: list
"""
for item in new_items:
if not item in base:
base = base + [item]
return base
|
<commit_before><commit_msg>Include merge_items helper for merging config lists and eliminating dupes<commit_after>#-*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
def merge_items(base, new_items):
"""
Merges two lists and eliminates duplicates
:type base: list
:type new_items: list
:rtype: list
"""
for item in new_items:
if not item in base:
base = base + [item]
return base
|
|
6edee679e6c3478369175fa53fd3520d9b39abf1
|
gen_transfer.py
|
gen_transfer.py
|
from redis import Redis
from chain import Transaction
from user import LazyUser
from config import *
import json
if __name__ == '__main__':
r = Redis()
hero = LazyUser()
receiver = LazyUser()
t = Transaction(
prev_hash=r.get(PREV_HASH_KEY),
transaction_type='SEND',
sender=hero.pub,
receiver=receiver.pub,
)
message, signature = hero.sign(t)
t.add_signature(signature)
print(json.dumps(t.to_redis(), indent=4))
t.write_to_redis(r)
print(r.llen(SEND_TRANSACTION_QUEUE_KEY))
|
Add a script that allows transfer of bitcoins between two users
|
Add a script that allows transfer of bitcoins between two users
|
Python
|
mit
|
paramsingh/lazycoin,paramsingh/lazycoin,paramsingh/lazycoin
|
Add a script that allows transfer of bitcoins between two users
|
from redis import Redis
from chain import Transaction
from user import LazyUser
from config import *
import json
if __name__ == '__main__':
r = Redis()
hero = LazyUser()
receiver = LazyUser()
t = Transaction(
prev_hash=r.get(PREV_HASH_KEY),
transaction_type='SEND',
sender=hero.pub,
receiver=receiver.pub,
)
message, signature = hero.sign(t)
t.add_signature(signature)
print(json.dumps(t.to_redis(), indent=4))
t.write_to_redis(r)
print(r.llen(SEND_TRANSACTION_QUEUE_KEY))
|
<commit_before><commit_msg>Add a script that allows transfer of bitcoins between two users<commit_after>
|
from redis import Redis
from chain import Transaction
from user import LazyUser
from config import *
import json
if __name__ == '__main__':
r = Redis()
hero = LazyUser()
receiver = LazyUser()
t = Transaction(
prev_hash=r.get(PREV_HASH_KEY),
transaction_type='SEND',
sender=hero.pub,
receiver=receiver.pub,
)
message, signature = hero.sign(t)
t.add_signature(signature)
print(json.dumps(t.to_redis(), indent=4))
t.write_to_redis(r)
print(r.llen(SEND_TRANSACTION_QUEUE_KEY))
|
Add a script that allows transfer of bitcoins between two usersfrom redis import Redis
from chain import Transaction
from user import LazyUser
from config import *
import json
if __name__ == '__main__':
r = Redis()
hero = LazyUser()
receiver = LazyUser()
t = Transaction(
prev_hash=r.get(PREV_HASH_KEY),
transaction_type='SEND',
sender=hero.pub,
receiver=receiver.pub,
)
message, signature = hero.sign(t)
t.add_signature(signature)
print(json.dumps(t.to_redis(), indent=4))
t.write_to_redis(r)
print(r.llen(SEND_TRANSACTION_QUEUE_KEY))
|
<commit_before><commit_msg>Add a script that allows transfer of bitcoins between two users<commit_after>from redis import Redis
from chain import Transaction
from user import LazyUser
from config import *
import json
if __name__ == '__main__':
r = Redis()
hero = LazyUser()
receiver = LazyUser()
t = Transaction(
prev_hash=r.get(PREV_HASH_KEY),
transaction_type='SEND',
sender=hero.pub,
receiver=receiver.pub,
)
message, signature = hero.sign(t)
t.add_signature(signature)
print(json.dumps(t.to_redis(), indent=4))
t.write_to_redis(r)
print(r.llen(SEND_TRANSACTION_QUEUE_KEY))
|
|
6cd85f616d933c006abb45f4d682194157df4005
|
morse_trainer/test_receive_group.py
|
morse_trainer/test_receive_group.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Test of grouping - make the receive boxgroup.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QRadioButton
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout, QGroupBox
class GroupBox(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Test of Receive group control')
layout = QGridLayout()
self.setLayout(layout)
groupbox = QGroupBox("Receive")
# groupbox.setCheckable(True)
layout.addWidget(groupbox)
vbox = QVBoxLayout()
groupbox.setLayout(vbox)
radiobutton = QRadioButton("Characters")
radiobutton.setChecked(True)
vbox.addWidget(radiobutton)
radiobutton = QRadioButton("Groups")
vbox.addWidget(radiobutton)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = GroupBox()
screen.show()
sys.exit(app.exec())
|
Test program for the 'Receive' group control
|
Test program for the 'Receive' group control
|
Python
|
mit
|
rzzzwilson/morse,rzzzwilson/morse
|
Test program for the 'Receive' group control
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Test of grouping - make the receive boxgroup.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QRadioButton
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout, QGroupBox
class GroupBox(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Test of Receive group control')
layout = QGridLayout()
self.setLayout(layout)
groupbox = QGroupBox("Receive")
# groupbox.setCheckable(True)
layout.addWidget(groupbox)
vbox = QVBoxLayout()
groupbox.setLayout(vbox)
radiobutton = QRadioButton("Characters")
radiobutton.setChecked(True)
vbox.addWidget(radiobutton)
radiobutton = QRadioButton("Groups")
vbox.addWidget(radiobutton)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = GroupBox()
screen.show()
sys.exit(app.exec())
|
<commit_before><commit_msg>Test program for the 'Receive' group control<commit_after>
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Test of grouping - make the receive boxgroup.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QRadioButton
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout, QGroupBox
class GroupBox(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Test of Receive group control')
layout = QGridLayout()
self.setLayout(layout)
groupbox = QGroupBox("Receive")
# groupbox.setCheckable(True)
layout.addWidget(groupbox)
vbox = QVBoxLayout()
groupbox.setLayout(vbox)
radiobutton = QRadioButton("Characters")
radiobutton.setChecked(True)
vbox.addWidget(radiobutton)
radiobutton = QRadioButton("Groups")
vbox.addWidget(radiobutton)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = GroupBox()
screen.show()
sys.exit(app.exec())
|
Test program for the 'Receive' group control#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Test of grouping - make the receive boxgroup.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QRadioButton
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout, QGroupBox
class GroupBox(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Test of Receive group control')
layout = QGridLayout()
self.setLayout(layout)
groupbox = QGroupBox("Receive")
# groupbox.setCheckable(True)
layout.addWidget(groupbox)
vbox = QVBoxLayout()
groupbox.setLayout(vbox)
radiobutton = QRadioButton("Characters")
radiobutton.setChecked(True)
vbox.addWidget(radiobutton)
radiobutton = QRadioButton("Groups")
vbox.addWidget(radiobutton)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = GroupBox()
screen.show()
sys.exit(app.exec())
|
<commit_before><commit_msg>Test program for the 'Receive' group control<commit_after>#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Test of grouping - make the receive boxgroup.
"""
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget, QRadioButton
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QGridLayout, QGroupBox
class GroupBox(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Test of Receive group control')
layout = QGridLayout()
self.setLayout(layout)
groupbox = QGroupBox("Receive")
# groupbox.setCheckable(True)
layout.addWidget(groupbox)
vbox = QVBoxLayout()
groupbox.setLayout(vbox)
radiobutton = QRadioButton("Characters")
radiobutton.setChecked(True)
vbox.addWidget(radiobutton)
radiobutton = QRadioButton("Groups")
vbox.addWidget(radiobutton)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
screen = GroupBox()
screen.show()
sys.exit(app.exec())
|
|
85896d2fc168278c2cadeb3ddb02830591c2fd8e
|
tests/mpath_test.py
|
tests/mpath_test.py
|
import unittest
import os
from utils import create_sparse_tempfile
from gi.repository import BlockDev
if not BlockDev.is_initialized():
BlockDev.init(None, None)
class MpathTestCase(unittest.TestCase):
def setUp(self):
self.dev_file = create_sparse_tempfile("mpath_test", 1024**3)
succ, loop = BlockDev.loop_setup(self.dev_file)
if not succ:
raise RuntimeError("Failed to setup loop device for testing")
self.loop_dev = "/dev/%s" % loop
def test_is_mpath_member(self):
"""Verify that is_mpath_member works as expected"""
# just test that some non-mpath is not reported as a multipath member
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
def tearDown(self):
succ = BlockDev.loop_teardown(self.loop_dev)
if not succ:
os.unlink(self.dev_file)
raise RuntimeError("Failed to tear down loop device used for testing")
os.unlink(self.dev_file)
|
Add tests for what we can easily test from the mpath plugin
|
Add tests for what we can easily test from the mpath plugin
Testing mpath is generally not easy as we would need actual multipath devices
that are kind of hard to setup on top of loop devices.
|
Python
|
lgpl-2.1
|
atodorov/libblockdev,rhinstaller/libblockdev,dashea/libblockdev,dashea/libblockdev,rhinstaller/libblockdev,rhinstaller/libblockdev,vpodzime/libblockdev,snbueno/libblockdev,atodorov/libblockdev,snbueno/libblockdev,vpodzime/libblockdev,vpodzime/libblockdev,atodorov/libblockdev
|
Add tests for what we can easily test from the mpath plugin
Testing mpath is generally not easy as we would need actual multipath devices
that are kind of hard to setup on top of loop devices.
|
import unittest
import os
from utils import create_sparse_tempfile
from gi.repository import BlockDev
if not BlockDev.is_initialized():
BlockDev.init(None, None)
class MpathTestCase(unittest.TestCase):
def setUp(self):
self.dev_file = create_sparse_tempfile("mpath_test", 1024**3)
succ, loop = BlockDev.loop_setup(self.dev_file)
if not succ:
raise RuntimeError("Failed to setup loop device for testing")
self.loop_dev = "/dev/%s" % loop
def test_is_mpath_member(self):
"""Verify that is_mpath_member works as expected"""
# just test that some non-mpath is not reported as a multipath member
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
def tearDown(self):
succ = BlockDev.loop_teardown(self.loop_dev)
if not succ:
os.unlink(self.dev_file)
raise RuntimeError("Failed to tear down loop device used for testing")
os.unlink(self.dev_file)
|
<commit_before><commit_msg>Add tests for what we can easily test from the mpath plugin
Testing mpath is generally not easy as we would need actual multipath devices
that are kind of hard to setup on top of loop devices.<commit_after>
|
import unittest
import os
from utils import create_sparse_tempfile
from gi.repository import BlockDev
if not BlockDev.is_initialized():
BlockDev.init(None, None)
class MpathTestCase(unittest.TestCase):
def setUp(self):
self.dev_file = create_sparse_tempfile("mpath_test", 1024**3)
succ, loop = BlockDev.loop_setup(self.dev_file)
if not succ:
raise RuntimeError("Failed to setup loop device for testing")
self.loop_dev = "/dev/%s" % loop
def test_is_mpath_member(self):
"""Verify that is_mpath_member works as expected"""
# just test that some non-mpath is not reported as a multipath member
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
def tearDown(self):
succ = BlockDev.loop_teardown(self.loop_dev)
if not succ:
os.unlink(self.dev_file)
raise RuntimeError("Failed to tear down loop device used for testing")
os.unlink(self.dev_file)
|
Add tests for what we can easily test from the mpath plugin
Testing mpath is generally not easy as we would need actual multipath devices
that are kind of hard to setup on top of loop devices.import unittest
import os
from utils import create_sparse_tempfile
from gi.repository import BlockDev
if not BlockDev.is_initialized():
BlockDev.init(None, None)
class MpathTestCase(unittest.TestCase):
def setUp(self):
self.dev_file = create_sparse_tempfile("mpath_test", 1024**3)
succ, loop = BlockDev.loop_setup(self.dev_file)
if not succ:
raise RuntimeError("Failed to setup loop device for testing")
self.loop_dev = "/dev/%s" % loop
def test_is_mpath_member(self):
"""Verify that is_mpath_member works as expected"""
# just test that some non-mpath is not reported as a multipath member
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
def tearDown(self):
succ = BlockDev.loop_teardown(self.loop_dev)
if not succ:
os.unlink(self.dev_file)
raise RuntimeError("Failed to tear down loop device used for testing")
os.unlink(self.dev_file)
|
<commit_before><commit_msg>Add tests for what we can easily test from the mpath plugin
Testing mpath is generally not easy as we would need actual multipath devices
that are kind of hard to setup on top of loop devices.<commit_after>import unittest
import os
from utils import create_sparse_tempfile
from gi.repository import BlockDev
if not BlockDev.is_initialized():
BlockDev.init(None, None)
class MpathTestCase(unittest.TestCase):
def setUp(self):
self.dev_file = create_sparse_tempfile("mpath_test", 1024**3)
succ, loop = BlockDev.loop_setup(self.dev_file)
if not succ:
raise RuntimeError("Failed to setup loop device for testing")
self.loop_dev = "/dev/%s" % loop
def test_is_mpath_member(self):
"""Verify that is_mpath_member works as expected"""
# just test that some non-mpath is not reported as a multipath member
# device and no error is reported
self.assertFalse(BlockDev.mpath_is_mpath_member("/dev/loop0"))
def tearDown(self):
succ = BlockDev.loop_teardown(self.loop_dev)
if not succ:
os.unlink(self.dev_file)
raise RuntimeError("Failed to tear down loop device used for testing")
os.unlink(self.dev_file)
|
|
bc7f9eba295cf1a3d9d8036ac1db993b6a37b58c
|
umibukela/models.py
|
umibukela/models.py
|
from django.db import models
class Partner(models.Model):
short_name = models.CharField(max_length=200)
full_name = models.CharField(max_length=200)
physical_address = models.CharField(max_length=200)
contact_person = models.CharField(max_length=200)
telephone = models.CharField(max_length=200)
email_address = models.EmailField(max_length=200)
intro_title = models.CharField(max_length=200)
intro_statement = models.CharField(max_length=200)
intro_image = models.ImageField()
context_quote = models.CharField(max_length=200)
context_statement = models.CharField(max_length=200)
context_image = models.ImageField()
|
Add very rough, potientially unusable model for field names
|
Add very rough, potientially unusable model for field names
|
Python
|
mit
|
Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela,Code4SA/umibukela
|
Add very rough, potientially unusable model for field names
|
from django.db import models
class Partner(models.Model):
short_name = models.CharField(max_length=200)
full_name = models.CharField(max_length=200)
physical_address = models.CharField(max_length=200)
contact_person = models.CharField(max_length=200)
telephone = models.CharField(max_length=200)
email_address = models.EmailField(max_length=200)
intro_title = models.CharField(max_length=200)
intro_statement = models.CharField(max_length=200)
intro_image = models.ImageField()
context_quote = models.CharField(max_length=200)
context_statement = models.CharField(max_length=200)
context_image = models.ImageField()
|
<commit_before><commit_msg>Add very rough, potientially unusable model for field names<commit_after>
|
from django.db import models
class Partner(models.Model):
short_name = models.CharField(max_length=200)
full_name = models.CharField(max_length=200)
physical_address = models.CharField(max_length=200)
contact_person = models.CharField(max_length=200)
telephone = models.CharField(max_length=200)
email_address = models.EmailField(max_length=200)
intro_title = models.CharField(max_length=200)
intro_statement = models.CharField(max_length=200)
intro_image = models.ImageField()
context_quote = models.CharField(max_length=200)
context_statement = models.CharField(max_length=200)
context_image = models.ImageField()
|
Add very rough, potientially unusable model for field namesfrom django.db import models
class Partner(models.Model):
short_name = models.CharField(max_length=200)
full_name = models.CharField(max_length=200)
physical_address = models.CharField(max_length=200)
contact_person = models.CharField(max_length=200)
telephone = models.CharField(max_length=200)
email_address = models.EmailField(max_length=200)
intro_title = models.CharField(max_length=200)
intro_statement = models.CharField(max_length=200)
intro_image = models.ImageField()
context_quote = models.CharField(max_length=200)
context_statement = models.CharField(max_length=200)
context_image = models.ImageField()
|
<commit_before><commit_msg>Add very rough, potientially unusable model for field names<commit_after>from django.db import models
class Partner(models.Model):
short_name = models.CharField(max_length=200)
full_name = models.CharField(max_length=200)
physical_address = models.CharField(max_length=200)
contact_person = models.CharField(max_length=200)
telephone = models.CharField(max_length=200)
email_address = models.EmailField(max_length=200)
intro_title = models.CharField(max_length=200)
intro_statement = models.CharField(max_length=200)
intro_image = models.ImageField()
context_quote = models.CharField(max_length=200)
context_statement = models.CharField(max_length=200)
context_image = models.ImageField()
|
|
c434d182d2f6e0535314e8fcdbac01324bf9395b
|
tasks.py
|
tasks.py
|
from invoke import run, task
@task
def deploy():
print('So you want to deploy? Let\'s get started.')
# Static Files.
print('- Run the stylesheets through Compass using "Production" settings...')
run('compass compile -e production --force -q')
print('- Collecting the static files and throwing them on S3...')
run('python manage.py collectstatic --configuration=Production --noinput -v 0')
# Heroku.
print('- Deploying Hello! Base to Heroku...')
run('git push heroku master')
# Done!
print('')
print('All done!')
|
Add our little deploy script to H!B.
|
Add our little deploy script to H!B.
|
Python
|
apache-2.0
|
hello-base/web,hello-base/web,hello-base/web,hello-base/web
|
Add our little deploy script to H!B.
|
from invoke import run, task
@task
def deploy():
print('So you want to deploy? Let\'s get started.')
# Static Files.
print('- Run the stylesheets through Compass using "Production" settings...')
run('compass compile -e production --force -q')
print('- Collecting the static files and throwing them on S3...')
run('python manage.py collectstatic --configuration=Production --noinput -v 0')
# Heroku.
print('- Deploying Hello! Base to Heroku...')
run('git push heroku master')
# Done!
print('')
print('All done!')
|
<commit_before><commit_msg>Add our little deploy script to H!B.<commit_after>
|
from invoke import run, task
@task
def deploy():
print('So you want to deploy? Let\'s get started.')
# Static Files.
print('- Run the stylesheets through Compass using "Production" settings...')
run('compass compile -e production --force -q')
print('- Collecting the static files and throwing them on S3...')
run('python manage.py collectstatic --configuration=Production --noinput -v 0')
# Heroku.
print('- Deploying Hello! Base to Heroku...')
run('git push heroku master')
# Done!
print('')
print('All done!')
|
Add our little deploy script to H!B.from invoke import run, task
@task
def deploy():
print('So you want to deploy? Let\'s get started.')
# Static Files.
print('- Run the stylesheets through Compass using "Production" settings...')
run('compass compile -e production --force -q')
print('- Collecting the static files and throwing them on S3...')
run('python manage.py collectstatic --configuration=Production --noinput -v 0')
# Heroku.
print('- Deploying Hello! Base to Heroku...')
run('git push heroku master')
# Done!
print('')
print('All done!')
|
<commit_before><commit_msg>Add our little deploy script to H!B.<commit_after>from invoke import run, task
@task
def deploy():
print('So you want to deploy? Let\'s get started.')
# Static Files.
print('- Run the stylesheets through Compass using "Production" settings...')
run('compass compile -e production --force -q')
print('- Collecting the static files and throwing them on S3...')
run('python manage.py collectstatic --configuration=Production --noinput -v 0')
# Heroku.
print('- Deploying Hello! Base to Heroku...')
run('git push heroku master')
# Done!
print('')
print('All done!')
|
|
3f7b1ceb95b0f918f03d08e9b796c88d85764280
|
logicaldelete/managers.py
|
logicaldelete/managers.py
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_query_set(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_query_set().filter(*args, **kwargs)
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_queryset(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_queryset().filter(*args, **kwargs)
|
Use get_queryset instead of get_query_set
|
Use get_queryset instead of get_query_set
Starting in Django 1.8, get_query_set will now be get_queryset. This change was
announced as part of the Django 1.6 release and now in Django 1.7 throws a
RemovedInDjango18Warning. See:
https://docs.djangoproject.com/en/1.7/releases/1.6/#get-query-set-and-similar-methods-renamed-to-get-queryset
|
Python
|
mit
|
pinax/pinax-models,naringas/pinax-models,Ubiwhere/pinax-models
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_query_set(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_query_set().filter(*args, **kwargs)
Use get_queryset instead of get_query_set
Starting in Django 1.8, get_query_set will now be get_queryset. This change was
announced as part of the Django 1.6 release and now in Django 1.7 throws a
RemovedInDjango18Warning. See:
https://docs.djangoproject.com/en/1.7/releases/1.6/#get-query-set-and-similar-methods-renamed-to-get-queryset
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_queryset(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_queryset().filter(*args, **kwargs)
|
<commit_before>from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_query_set(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_query_set().filter(*args, **kwargs)
<commit_msg>Use get_queryset instead of get_query_set
Starting in Django 1.8, get_query_set will now be get_queryset. This change was
announced as part of the Django 1.6 release and now in Django 1.7 throws a
RemovedInDjango18Warning. See:
https://docs.djangoproject.com/en/1.7/releases/1.6/#get-query-set-and-similar-methods-renamed-to-get-queryset<commit_after>
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_queryset(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_queryset().filter(*args, **kwargs)
|
from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_query_set(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_query_set().filter(*args, **kwargs)
Use get_queryset instead of get_query_set
Starting in Django 1.8, get_query_set will now be get_queryset. This change was
announced as part of the Django 1.6 release and now in Django 1.7 throws a
RemovedInDjango18Warning. See:
https://docs.djangoproject.com/en/1.7/releases/1.6/#get-query-set-and-similar-methods-renamed-to-get-querysetfrom django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_queryset(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_queryset().filter(*args, **kwargs)
|
<commit_before>from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_query_set(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_query_set().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_query_set().filter(*args, **kwargs)
<commit_msg>Use get_queryset instead of get_query_set
Starting in Django 1.8, get_query_set will now be get_queryset. This change was
announced as part of the Django 1.6 release and now in Django 1.7 throws a
RemovedInDjango18Warning. See:
https://docs.djangoproject.com/en/1.7/releases/1.6/#get-query-set-and-similar-methods-renamed-to-get-queryset<commit_after>from django.db import models
from logicaldelete.query import LogicalDeleteQuerySet
class LogicalDeletedManager(models.Manager):
"""
A manager that serves as the default manager for `logicaldelete.models.Model`
providing the filtering out of logically deleted objects. In addition, it
provides named querysets for getting the deleted objects.
"""
def get_queryset(self):
if self.model:
return LogicalDeleteQuerySet(self.model, using=self._db).filter(
date_removed__isnull=True
)
def all_with_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset()
def only_deleted(self):
if self.model:
return super(LogicalDeletedManager, self).get_queryset().filter(
date_removed__isnull=False
)
def get(self, *args, **kwargs):
return self.all_with_deleted().get(*args, **kwargs)
def filter(self, *args, **kwargs):
if "pk" in kwargs:
return self.all_with_deleted().filter(*args, **kwargs)
return self.get_queryset().filter(*args, **kwargs)
|
debc76258e35aaa9ebc1eaeb717b82d7bded2bb5
|
ceph_deploy/tests/test_cli_admin.py
|
ceph_deploy/tests/test_cli_admin.py
|
import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'admin', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy admin' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_hosts(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy admin' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'No such file or directory: \'ceph.conf\'' in result
assert err.value.status == 1
def test_bad_no_key(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'ceph.client.admin.keyring not found' in result
assert err.value.status == 1
|
Add CLI test for "ceph-deploy admin"
|
[RM-11694] Add CLI test for "ceph-deploy admin"
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
Vicente-Cheng/ceph-deploy,SUSE/ceph-deploy,alfredodeza/ceph-deploy,ceph/ceph-deploy,isyippee/ceph-deploy,imzhulei/ceph-deploy,trhoden/ceph-deploy,isyippee/ceph-deploy,ghxandsky/ceph-deploy,Vicente-Cheng/ceph-deploy,zhouyuan/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ghxandsky/ceph-deploy,osynge/ceph-deploy,SUSE/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,zhouyuan/ceph-deploy,shenhequnying/ceph-deploy,shenhequnying/ceph-deploy,ceph/ceph-deploy,ddiss/ceph-deploy,branto1/ceph-deploy,branto1/ceph-deploy,codenrhoden/ceph-deploy,ddiss/ceph-deploy,osynge/ceph-deploy,alfredodeza/ceph-deploy,imzhulei/ceph-deploy,trhoden/ceph-deploy
|
[RM-11694] Add CLI test for "ceph-deploy admin"
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'admin', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy admin' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_hosts(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy admin' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'No such file or directory: \'ceph.conf\'' in result
assert err.value.status == 1
def test_bad_no_key(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'ceph.client.admin.keyring not found' in result
assert err.value.status == 1
|
<commit_before><commit_msg>[RM-11694] Add CLI test for "ceph-deploy admin"
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'admin', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy admin' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_hosts(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy admin' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'No such file or directory: \'ceph.conf\'' in result
assert err.value.status == 1
def test_bad_no_key(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'ceph.client.admin.keyring not found' in result
assert err.value.status == 1
|
[RM-11694] Add CLI test for "ceph-deploy admin"
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'admin', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy admin' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_hosts(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy admin' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'No such file or directory: \'ceph.conf\'' in result
assert err.value.status == 1
def test_bad_no_key(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'ceph.client.admin.keyring not found' in result
assert err.value.status == 1
|
<commit_before><commit_msg>[RM-11694] Add CLI test for "ceph-deploy admin"
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
import subprocess
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'admin', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy admin' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_hosts(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy admin' in result
assert 'too few arguments' in result
assert err.value.status == 2
def test_bad_no_conf(tmpdir, cli):
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'No such file or directory: \'ceph.conf\'' in result
assert err.value.status == 1
def test_bad_no_key(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'admin', 'host1'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'ceph.client.admin.keyring not found' in result
assert err.value.status == 1
|
|
a79c484349e852551e84411125bd88978a2fe52b
|
makemigrations.py
|
makemigrations.py
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.documents",
"pinax.documents.tests"
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.documents.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_documents",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
Add a script to help manage migrations
|
Add a script to help manage migrations
|
Python
|
mit
|
pinax/pinax-documents
|
Add a script to help manage migrations
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.documents",
"pinax.documents.tests"
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.documents.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_documents",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
<commit_before><commit_msg>Add a script to help manage migrations<commit_after>
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.documents",
"pinax.documents.tests"
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.documents.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_documents",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
Add a script to help manage migrations#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.documents",
"pinax.documents.tests"
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.documents.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_documents",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
<commit_before><commit_msg>Add a script to help manage migrations<commit_after>#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.documents",
"pinax.documents.tests"
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.documents.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"pinax_documents",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
|
|
ee6da6e9004dd0ef38e1ab62fd39825efd3f9a92
|
program_synthesis/naps/pipelines/print_naps.py
|
program_synthesis/naps/pipelines/print_naps.py
|
"""
Print several solutions from the NAPS dataset.
"""
from program_synthesis.naps.pipelines.read_naps import read_naps_dataset
from program_synthesis.naps.uast import uast_pprint
if __name__ == "__main__":
for name, ds in zip(("trainA", "trainB", "test"), read_naps_dataset()):
print("DATASET %s" % name)
with ds:
for d, _ in zip(ds, range(5)):
if "is_partial" in d and d["is_partial"]:
continue
print(' '.join(d["text"]))
uast_pprint.pprint(d["code_tree"])
print()
|
Add demo of how to print/view UAST
|
Add demo of how to print/view UAST
|
Python
|
apache-2.0
|
nearai/program_synthesis,nearai/program_synthesis
|
Add demo of how to print/view UAST
|
"""
Print several solutions from the NAPS dataset.
"""
from program_synthesis.naps.pipelines.read_naps import read_naps_dataset
from program_synthesis.naps.uast import uast_pprint
if __name__ == "__main__":
for name, ds in zip(("trainA", "trainB", "test"), read_naps_dataset()):
print("DATASET %s" % name)
with ds:
for d, _ in zip(ds, range(5)):
if "is_partial" in d and d["is_partial"]:
continue
print(' '.join(d["text"]))
uast_pprint.pprint(d["code_tree"])
print()
|
<commit_before><commit_msg>Add demo of how to print/view UAST<commit_after>
|
"""
Print several solutions from the NAPS dataset.
"""
from program_synthesis.naps.pipelines.read_naps import read_naps_dataset
from program_synthesis.naps.uast import uast_pprint
if __name__ == "__main__":
for name, ds in zip(("trainA", "trainB", "test"), read_naps_dataset()):
print("DATASET %s" % name)
with ds:
for d, _ in zip(ds, range(5)):
if "is_partial" in d and d["is_partial"]:
continue
print(' '.join(d["text"]))
uast_pprint.pprint(d["code_tree"])
print()
|
Add demo of how to print/view UAST"""
Print several solutions from the NAPS dataset.
"""
from program_synthesis.naps.pipelines.read_naps import read_naps_dataset
from program_synthesis.naps.uast import uast_pprint
if __name__ == "__main__":
for name, ds in zip(("trainA", "trainB", "test"), read_naps_dataset()):
print("DATASET %s" % name)
with ds:
for d, _ in zip(ds, range(5)):
if "is_partial" in d and d["is_partial"]:
continue
print(' '.join(d["text"]))
uast_pprint.pprint(d["code_tree"])
print()
|
<commit_before><commit_msg>Add demo of how to print/view UAST<commit_after>"""
Print several solutions from the NAPS dataset.
"""
from program_synthesis.naps.pipelines.read_naps import read_naps_dataset
from program_synthesis.naps.uast import uast_pprint
if __name__ == "__main__":
for name, ds in zip(("trainA", "trainB", "test"), read_naps_dataset()):
print("DATASET %s" % name)
with ds:
for d, _ in zip(ds, range(5)):
if "is_partial" in d and d["is_partial"]:
continue
print(' '.join(d["text"]))
uast_pprint.pprint(d["code_tree"])
print()
|
|
9a6ad07049dc4bfdb2677f7a503851a193ee454d
|
iteration_schemes.py
|
iteration_schemes.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:51:07 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from fuel.schemes import BatchSizeScheme, ShuffledScheme
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels)
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
if __name__ == '__main__':
batch_size = 20
# s = ShuffledScheme(10, 3)
labels = sum([[i]*10 for i in range(10)], [])
s = NPairLossScheme(labels, batch_size)
i = 0
for indexes in s.get_request_iterator():
a_indexes = indexes[:batch_size / 2]
p_indexes = indexes[batch_size / 2:]
print a_indexes
print p_indexes
print
i += 1
if i > 5:
break
|
Implement an iteration scheme for N-pair-mc-loss
|
Implement an iteration scheme for N-pair-mc-loss
|
Python
|
mit
|
ronekko/deep_metric_learning
|
Implement an iteration scheme for N-pair-mc-loss
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:51:07 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from fuel.schemes import BatchSizeScheme, ShuffledScheme
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels)
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
if __name__ == '__main__':
batch_size = 20
# s = ShuffledScheme(10, 3)
labels = sum([[i]*10 for i in range(10)], [])
s = NPairLossScheme(labels, batch_size)
i = 0
for indexes in s.get_request_iterator():
a_indexes = indexes[:batch_size / 2]
p_indexes = indexes[batch_size / 2:]
print a_indexes
print p_indexes
print
i += 1
if i > 5:
break
|
<commit_before><commit_msg>Implement an iteration scheme for N-pair-mc-loss<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:51:07 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from fuel.schemes import BatchSizeScheme, ShuffledScheme
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels)
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
if __name__ == '__main__':
batch_size = 20
# s = ShuffledScheme(10, 3)
labels = sum([[i]*10 for i in range(10)], [])
s = NPairLossScheme(labels, batch_size)
i = 0
for indexes in s.get_request_iterator():
a_indexes = indexes[:batch_size / 2]
p_indexes = indexes[batch_size / 2:]
print a_indexes
print p_indexes
print
i += 1
if i > 5:
break
|
Implement an iteration scheme for N-pair-mc-loss# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:51:07 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from fuel.schemes import BatchSizeScheme, ShuffledScheme
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels)
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
if __name__ == '__main__':
batch_size = 20
# s = ShuffledScheme(10, 3)
labels = sum([[i]*10 for i in range(10)], [])
s = NPairLossScheme(labels, batch_size)
i = 0
for indexes in s.get_request_iterator():
a_indexes = indexes[:batch_size / 2]
p_indexes = indexes[batch_size / 2:]
print a_indexes
print p_indexes
print
i += 1
if i > 5:
break
|
<commit_before><commit_msg>Implement an iteration scheme for N-pair-mc-loss<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:51:07 2017
@author: sakurai
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from fuel.schemes import BatchSizeScheme, ShuffledScheme
class NPairLossScheme(BatchSizeScheme):
def __init__(self, labels, batch_size):
self._labels = np.array(labels)
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __iter__(self):
return self
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return indexes
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
def get_request_iterator(self):
return self
if __name__ == '__main__':
batch_size = 20
# s = ShuffledScheme(10, 3)
labels = sum([[i]*10 for i in range(10)], [])
s = NPairLossScheme(labels, batch_size)
i = 0
for indexes in s.get_request_iterator():
a_indexes = indexes[:batch_size / 2]
p_indexes = indexes[batch_size / 2:]
print a_indexes
print p_indexes
print
i += 1
if i > 5:
break
|
|
06166ca6e48831510525bbffe14a9c84b3bfd098
|
randomDistributionTest.py
|
randomDistributionTest.py
|
import random
import sys
testSizes = [ 5, 13, 23, 37, 47, 61, 111 ]
usernameList = map(lambda u: u[0:-1], sys.stdin.readlines())
userrand = random.Random()
for numElements in testSizes:
summary = dict()
for i in range(numElements):
summary[i] = 0
for username in usernameList:
userrand.seed(username)
selected = userrand.sample(range(numElements),3)
for slot in selected:
summary[slot] += 1
counts = summary.values()
counts.sort()
lowest = counts[0]
byslot = summary.items()
byslot.sort(cmp=lambda (k1,v1), (k2,v2): cmp(k1,k2) )
peaks = map (lambda (s, c): (s, (c - lowest + 0.0) / len(usernameList)), byslot)
print numElements, peaks
|
Check the spread of the sampled nodes given keys as input
|
Check the spread of the sampled nodes given keys as input
|
Python
|
mit
|
danianr/NINJa,danianr/NINJa,danianr/NINJa
|
Check the spread of the sampled nodes given keys as input
|
import random
import sys
testSizes = [ 5, 13, 23, 37, 47, 61, 111 ]
usernameList = map(lambda u: u[0:-1], sys.stdin.readlines())
userrand = random.Random()
for numElements in testSizes:
summary = dict()
for i in range(numElements):
summary[i] = 0
for username in usernameList:
userrand.seed(username)
selected = userrand.sample(range(numElements),3)
for slot in selected:
summary[slot] += 1
counts = summary.values()
counts.sort()
lowest = counts[0]
byslot = summary.items()
byslot.sort(cmp=lambda (k1,v1), (k2,v2): cmp(k1,k2) )
peaks = map (lambda (s, c): (s, (c - lowest + 0.0) / len(usernameList)), byslot)
print numElements, peaks
|
<commit_before><commit_msg>Check the spread of the sampled nodes given keys as input<commit_after>
|
import random
import sys
testSizes = [ 5, 13, 23, 37, 47, 61, 111 ]
usernameList = map(lambda u: u[0:-1], sys.stdin.readlines())
userrand = random.Random()
for numElements in testSizes:
summary = dict()
for i in range(numElements):
summary[i] = 0
for username in usernameList:
userrand.seed(username)
selected = userrand.sample(range(numElements),3)
for slot in selected:
summary[slot] += 1
counts = summary.values()
counts.sort()
lowest = counts[0]
byslot = summary.items()
byslot.sort(cmp=lambda (k1,v1), (k2,v2): cmp(k1,k2) )
peaks = map (lambda (s, c): (s, (c - lowest + 0.0) / len(usernameList)), byslot)
print numElements, peaks
|
Check the spread of the sampled nodes given keys as inputimport random
import sys
testSizes = [ 5, 13, 23, 37, 47, 61, 111 ]
usernameList = map(lambda u: u[0:-1], sys.stdin.readlines())
userrand = random.Random()
for numElements in testSizes:
summary = dict()
for i in range(numElements):
summary[i] = 0
for username in usernameList:
userrand.seed(username)
selected = userrand.sample(range(numElements),3)
for slot in selected:
summary[slot] += 1
counts = summary.values()
counts.sort()
lowest = counts[0]
byslot = summary.items()
byslot.sort(cmp=lambda (k1,v1), (k2,v2): cmp(k1,k2) )
peaks = map (lambda (s, c): (s, (c - lowest + 0.0) / len(usernameList)), byslot)
print numElements, peaks
|
<commit_before><commit_msg>Check the spread of the sampled nodes given keys as input<commit_after>import random
import sys
testSizes = [ 5, 13, 23, 37, 47, 61, 111 ]
usernameList = map(lambda u: u[0:-1], sys.stdin.readlines())
userrand = random.Random()
for numElements in testSizes:
summary = dict()
for i in range(numElements):
summary[i] = 0
for username in usernameList:
userrand.seed(username)
selected = userrand.sample(range(numElements),3)
for slot in selected:
summary[slot] += 1
counts = summary.values()
counts.sort()
lowest = counts[0]
byslot = summary.items()
byslot.sort(cmp=lambda (k1,v1), (k2,v2): cmp(k1,k2) )
peaks = map (lambda (s, c): (s, (c - lowest + 0.0) / len(usernameList)), byslot)
print numElements, peaks
|
|
93faafef2686802743dd6b58ca7bbbe53672cf9f
|
py/shortest-unsorted-continuous-subarray.py
|
py/shortest-unsorted-continuous-subarray.py
|
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
stackl, stackr = [], []
stopl, stopr = False, False
for i, n in enumerate(nums):
if not stackl or stackl[-1] <= n:
if not stopl:
stackl.append(n)
else:
while stackl and stackl[-1] > n:
stackl.pop()
stopl = True
for i in xrange(len(nums) - 1, -1, -1):
n = nums[i]
if not stackr or stackr[-1] >= n:
if not stopr:
stackr.append(n)
else:
while stackr and stackr[-1] < n:
stackr.pop()
stopr = True
return max(len(nums) - len(stackl) - len(stackr), 0)
|
Add py solution for 581. Shortest Unsorted Continuous Subarray
|
Add py solution for 581. Shortest Unsorted Continuous Subarray
581. Shortest Unsorted Continuous Subarray: https://leetcode.com/problems/shortest-unsorted-continuous-subarray/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 581. Shortest Unsorted Continuous Subarray
581. Shortest Unsorted Continuous Subarray: https://leetcode.com/problems/shortest-unsorted-continuous-subarray/
|
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
stackl, stackr = [], []
stopl, stopr = False, False
for i, n in enumerate(nums):
if not stackl or stackl[-1] <= n:
if not stopl:
stackl.append(n)
else:
while stackl and stackl[-1] > n:
stackl.pop()
stopl = True
for i in xrange(len(nums) - 1, -1, -1):
n = nums[i]
if not stackr or stackr[-1] >= n:
if not stopr:
stackr.append(n)
else:
while stackr and stackr[-1] < n:
stackr.pop()
stopr = True
return max(len(nums) - len(stackl) - len(stackr), 0)
|
<commit_before><commit_msg>Add py solution for 581. Shortest Unsorted Continuous Subarray
581. Shortest Unsorted Continuous Subarray: https://leetcode.com/problems/shortest-unsorted-continuous-subarray/<commit_after>
|
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
stackl, stackr = [], []
stopl, stopr = False, False
for i, n in enumerate(nums):
if not stackl or stackl[-1] <= n:
if not stopl:
stackl.append(n)
else:
while stackl and stackl[-1] > n:
stackl.pop()
stopl = True
for i in xrange(len(nums) - 1, -1, -1):
n = nums[i]
if not stackr or stackr[-1] >= n:
if not stopr:
stackr.append(n)
else:
while stackr and stackr[-1] < n:
stackr.pop()
stopr = True
return max(len(nums) - len(stackl) - len(stackr), 0)
|
Add py solution for 581. Shortest Unsorted Continuous Subarray
581. Shortest Unsorted Continuous Subarray: https://leetcode.com/problems/shortest-unsorted-continuous-subarray/class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
stackl, stackr = [], []
stopl, stopr = False, False
for i, n in enumerate(nums):
if not stackl or stackl[-1] <= n:
if not stopl:
stackl.append(n)
else:
while stackl and stackl[-1] > n:
stackl.pop()
stopl = True
for i in xrange(len(nums) - 1, -1, -1):
n = nums[i]
if not stackr or stackr[-1] >= n:
if not stopr:
stackr.append(n)
else:
while stackr and stackr[-1] < n:
stackr.pop()
stopr = True
return max(len(nums) - len(stackl) - len(stackr), 0)
|
<commit_before><commit_msg>Add py solution for 581. Shortest Unsorted Continuous Subarray
581. Shortest Unsorted Continuous Subarray: https://leetcode.com/problems/shortest-unsorted-continuous-subarray/<commit_after>class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
stackl, stackr = [], []
stopl, stopr = False, False
for i, n in enumerate(nums):
if not stackl or stackl[-1] <= n:
if not stopl:
stackl.append(n)
else:
while stackl and stackl[-1] > n:
stackl.pop()
stopl = True
for i in xrange(len(nums) - 1, -1, -1):
n = nums[i]
if not stackr or stackr[-1] >= n:
if not stopr:
stackr.append(n)
else:
while stackr and stackr[-1] < n:
stackr.pop()
stopr = True
return max(len(nums) - len(stackl) - len(stackr), 0)
|
|
671cb655ce975931bb15bf17f1feffe5b89c9a75
|
bd_rate_jm.py
|
bd_rate_jm.py
|
#!/usr/bin/env python3
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
import sys
a = loadtxt(sys.argv[1]);
b = loadtxt(sys.argv[2]);
rates = [0.005,0.02,0.06,0.2];
ra = a[:,2]*8./a[:,1]
rb = b[:,2]*8./b[:,1];
interp_type = 'cubic';
met_name = [' PSNR', ' PSNRHVS', ' SSIM', 'FASTSSIM'];
print(" LOW (%%)\tMEDIUM (%%)\tHIGH (%%)");
bdr = zeros((4,4))
for m in range(0,4):
ya = a[:,3+m];
yb = b[:,3+m];
for k in range(0,len(rates)-1):
try:
p0 = interp1d(ra, ya, interp_type)(rates[k]);
p1 = interp1d(ra, ya, interp_type)(rates[k+1]);
except ValueError:
bdr[m,k] = NaN;
continue
a_rate = interp1d(ya, log(ra), interp_type)(arange(p0,p1,0.01));
b_rate = interp1d(yb, log(rb), interp_type)(arange(p0,p1,0.01));
if not len(a_rate) or not len(b_rate):
bdr[m,k] = NaN;
else:
bdr[m,k]=100 * (exp(mean(b_rate-a_rate))-1);
print("%s\t%4f%%\t%4f%%\t%4f%%" % (met_name[m], bdr[m, 0], bdr[m, 1], bdr[m, 2]));
|
Add Python implementation of bd_rate.
|
Add Python implementation of bd_rate.
|
Python
|
mit
|
tdaede/awcy,tdaede/awcy,tdaede/awcy,tdaede/awcy,tdaede/awcy,tdaede/awcy
|
Add Python implementation of bd_rate.
|
#!/usr/bin/env python3
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
import sys
a = loadtxt(sys.argv[1]);
b = loadtxt(sys.argv[2]);
rates = [0.005,0.02,0.06,0.2];
ra = a[:,2]*8./a[:,1]
rb = b[:,2]*8./b[:,1];
interp_type = 'cubic';
met_name = [' PSNR', ' PSNRHVS', ' SSIM', 'FASTSSIM'];
print(" LOW (%%)\tMEDIUM (%%)\tHIGH (%%)");
bdr = zeros((4,4))
for m in range(0,4):
ya = a[:,3+m];
yb = b[:,3+m];
for k in range(0,len(rates)-1):
try:
p0 = interp1d(ra, ya, interp_type)(rates[k]);
p1 = interp1d(ra, ya, interp_type)(rates[k+1]);
except ValueError:
bdr[m,k] = NaN;
continue
a_rate = interp1d(ya, log(ra), interp_type)(arange(p0,p1,0.01));
b_rate = interp1d(yb, log(rb), interp_type)(arange(p0,p1,0.01));
if not len(a_rate) or not len(b_rate):
bdr[m,k] = NaN;
else:
bdr[m,k]=100 * (exp(mean(b_rate-a_rate))-1);
print("%s\t%4f%%\t%4f%%\t%4f%%" % (met_name[m], bdr[m, 0], bdr[m, 1], bdr[m, 2]));
|
<commit_before><commit_msg>Add Python implementation of bd_rate.<commit_after>
|
#!/usr/bin/env python3
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
import sys
a = loadtxt(sys.argv[1]);
b = loadtxt(sys.argv[2]);
rates = [0.005,0.02,0.06,0.2];
ra = a[:,2]*8./a[:,1]
rb = b[:,2]*8./b[:,1];
interp_type = 'cubic';
met_name = [' PSNR', ' PSNRHVS', ' SSIM', 'FASTSSIM'];
print(" LOW (%%)\tMEDIUM (%%)\tHIGH (%%)");
bdr = zeros((4,4))
for m in range(0,4):
ya = a[:,3+m];
yb = b[:,3+m];
for k in range(0,len(rates)-1):
try:
p0 = interp1d(ra, ya, interp_type)(rates[k]);
p1 = interp1d(ra, ya, interp_type)(rates[k+1]);
except ValueError:
bdr[m,k] = NaN;
continue
a_rate = interp1d(ya, log(ra), interp_type)(arange(p0,p1,0.01));
b_rate = interp1d(yb, log(rb), interp_type)(arange(p0,p1,0.01));
if not len(a_rate) or not len(b_rate):
bdr[m,k] = NaN;
else:
bdr[m,k]=100 * (exp(mean(b_rate-a_rate))-1);
print("%s\t%4f%%\t%4f%%\t%4f%%" % (met_name[m], bdr[m, 0], bdr[m, 1], bdr[m, 2]));
|
Add Python implementation of bd_rate.#!/usr/bin/env python3
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
import sys
a = loadtxt(sys.argv[1]);
b = loadtxt(sys.argv[2]);
rates = [0.005,0.02,0.06,0.2];
ra = a[:,2]*8./a[:,1]
rb = b[:,2]*8./b[:,1];
interp_type = 'cubic';
met_name = [' PSNR', ' PSNRHVS', ' SSIM', 'FASTSSIM'];
print(" LOW (%%)\tMEDIUM (%%)\tHIGH (%%)");
bdr = zeros((4,4))
for m in range(0,4):
ya = a[:,3+m];
yb = b[:,3+m];
for k in range(0,len(rates)-1):
try:
p0 = interp1d(ra, ya, interp_type)(rates[k]);
p1 = interp1d(ra, ya, interp_type)(rates[k+1]);
except ValueError:
bdr[m,k] = NaN;
continue
a_rate = interp1d(ya, log(ra), interp_type)(arange(p0,p1,0.01));
b_rate = interp1d(yb, log(rb), interp_type)(arange(p0,p1,0.01));
if not len(a_rate) or not len(b_rate):
bdr[m,k] = NaN;
else:
bdr[m,k]=100 * (exp(mean(b_rate-a_rate))-1);
print("%s\t%4f%%\t%4f%%\t%4f%%" % (met_name[m], bdr[m, 0], bdr[m, 1], bdr[m, 2]));
|
<commit_before><commit_msg>Add Python implementation of bd_rate.<commit_after>#!/usr/bin/env python3
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
import sys
a = loadtxt(sys.argv[1]);
b = loadtxt(sys.argv[2]);
rates = [0.005,0.02,0.06,0.2];
ra = a[:,2]*8./a[:,1]
rb = b[:,2]*8./b[:,1];
interp_type = 'cubic';
met_name = [' PSNR', ' PSNRHVS', ' SSIM', 'FASTSSIM'];
print(" LOW (%%)\tMEDIUM (%%)\tHIGH (%%)");
bdr = zeros((4,4))
for m in range(0,4):
ya = a[:,3+m];
yb = b[:,3+m];
for k in range(0,len(rates)-1):
try:
p0 = interp1d(ra, ya, interp_type)(rates[k]);
p1 = interp1d(ra, ya, interp_type)(rates[k+1]);
except ValueError:
bdr[m,k] = NaN;
continue
a_rate = interp1d(ya, log(ra), interp_type)(arange(p0,p1,0.01));
b_rate = interp1d(yb, log(rb), interp_type)(arange(p0,p1,0.01));
if not len(a_rate) or not len(b_rate):
bdr[m,k] = NaN;
else:
bdr[m,k]=100 * (exp(mean(b_rate-a_rate))-1);
print("%s\t%4f%%\t%4f%%\t%4f%%" % (met_name[m], bdr[m, 0], bdr[m, 1], bdr[m, 2]));
|
|
4d5d931c03d742588454e53ee7a579fce400edca
|
tests/test_units/test_route_escapes.py
|
tests/test_units/test_route_escapes.py
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(r.routelist, ['/prefix/escaped:escaped/foo=', {'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/', {'name': 'brackets', 'type': ':'}, '/', {'name': 'colon', 'type': ':'}])
|
Add tests for backslash escapes in route paths
|
Add tests for backslash escapes in route paths
|
Python
|
mit
|
webknjaz/routes,bbangert/routes
|
Add tests for backslash escapes in route paths
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(r.routelist, ['/prefix/escaped:escaped/foo=', {'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/', {'name': 'brackets', 'type': ':'}, '/', {'name': 'colon', 'type': ':'}])
|
<commit_before><commit_msg>Add tests for backslash escapes in route paths<commit_after>
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(r.routelist, ['/prefix/escaped:escaped/foo=', {'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/', {'name': 'brackets', 'type': ':'}, '/', {'name': 'colon', 'type': ':'}])
|
Add tests for backslash escapes in route pathsimport unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(r.routelist, ['/prefix/escaped:escaped/foo=', {'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/', {'name': 'brackets', 'type': ':'}, '/', {'name': 'colon', 'type': ':'}])
|
<commit_before><commit_msg>Add tests for backslash escapes in route paths<commit_after>import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(r.routelist, ['/prefix/escaped:escaped/foo=', {'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/', {'name': 'brackets', 'type': ':'}, '/', {'name': 'colon', 'type': ':'}])
|
|
f1f8a455b71980ec3df83db3ff1d9175f0c3d900
|
openpathsampling/visit_all_states.py
|
openpathsampling/visit_all_states.py
|
import openpathsampling as paths
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
report_str = "Ran {n_steps} steps "
if timestep is not None:
report_str += "[{traj_time}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}]")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str)
class VisitAllStatesEnsemble(paths.WrappedEnsemble):
def __init__(self, states, progress='default', timestep=None):
self.states = states
self.all_states = paths.join_volumes(states)
all_states_ens = paths.join_ensemble([paths.AllOutXEnsemble(s)
for s in states])
ensemble = paths.SequentialEnsemble([
all_states_ens,
paths.AllInXEnsemble(self.all_states) & paths.LengthEnsemble(1)
])
super(VisitAllStatesEnsemble, self).__init__(ensemble)
self.timestep = timestep
self.report_frequency = 10
self.progress = self._progress_indicator(progress)
@staticmethod
def _progress_indicator(progress):
indicator_dict = {
None: None,
'default': default_state_progress_report,
}
try:
indicator = indicator_dict[progress]
except KeyError:
indicator = progress
return indicator
def _state_for_frame(self, snapshot):
in_states = [state for state in self.states if state(snapshot)]
if len(in_states) > 1:
raise RuntimeError("Frame is in more than one state.")
elif len(in_states) == 1:
state = set(in_states)
else:
state = set([])
return state
def _update_for_progress(self, trajectory, frame_number):
len_traj = len(trajectory)
if len_traj - 1 % self.report_frequency == 0:
frame = trajectory[frame_number]
self.found_states.update(self._state_for_frame(trajectory[-1]))
report_string = self.progress(n_steps=len_traj - 1,
timestep=self.timestep,
found_states=self.found_states,
all_states=self.states)
paths.tools.refresh_output(report_string)
def can_append(self, trajectory, trusted):
return_value = super(VisitAllStatesEnsemble, self).can_append(
trajectory=trajectory,
trusted=trusted
)
if self.progress:
self._update_for_progress(trajectory, frame_number=-1)
return return_value
strict_can_append = can_append
|
Add start to visit all states ensemble
|
Add start to visit all states ensemble
|
Python
|
mit
|
dwhswenson/openpathsampling,choderalab/openpathsampling,dwhswenson/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,choderalab/openpathsampling,openpathsampling/openpathsampling,dwhswenson/openpathsampling,openpathsampling/openpathsampling,openpathsampling/openpathsampling
|
Add start to visit all states ensemble
|
import openpathsampling as paths
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
report_str = "Ran {n_steps} steps "
if timestep is not None:
report_str += "[{traj_time}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}]")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str)
class VisitAllStatesEnsemble(paths.WrappedEnsemble):
def __init__(self, states, progress='default', timestep=None):
self.states = states
self.all_states = paths.join_volumes(states)
all_states_ens = paths.join_ensemble([paths.AllOutXEnsemble(s)
for s in states])
ensemble = paths.SequentialEnsemble([
all_states_ens,
paths.AllInXEnsemble(self.all_states) & paths.LengthEnsemble(1)
])
super(VisitAllStatesEnsemble, self).__init__(ensemble)
self.timestep = timestep
self.report_frequency = 10
self.progress = self._progress_indicator(progress)
@staticmethod
def _progress_indicator(progress):
indicator_dict = {
None: None,
'default': default_state_progress_report,
}
try:
indicator = indicator_dict[progress]
except KeyError:
indicator = progress
return indicator
def _state_for_frame(self, snapshot):
in_states = [state for state in self.states if state(snapshot)]
if len(in_states) > 1:
raise RuntimeError("Frame is in more than one state.")
elif len(in_states) == 1:
state = set(in_states)
else:
state = set([])
return state
def _update_for_progress(self, trajectory, frame_number):
len_traj = len(trajectory)
if len_traj - 1 % self.report_frequency == 0:
frame = trajectory[frame_number]
self.found_states.update(self._state_for_frame(trajectory[-1]))
report_string = self.progress(n_steps=len_traj - 1,
timestep=self.timestep,
found_states=self.found_states,
all_states=self.states)
paths.tools.refresh_output(report_string)
def can_append(self, trajectory, trusted):
return_value = super(VisitAllStatesEnsemble, self).can_append(
trajectory=trajectory,
trusted=trusted
)
if self.progress:
self._update_for_progress(trajectory, frame_number=-1)
return return_value
strict_can_append = can_append
|
<commit_before><commit_msg>Add start to visit all states ensemble<commit_after>
|
import openpathsampling as paths
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
report_str = "Ran {n_steps} steps "
if timestep is not None:
report_str += "[{traj_time}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}]")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str)
class VisitAllStatesEnsemble(paths.WrappedEnsemble):
def __init__(self, states, progress='default', timestep=None):
self.states = states
self.all_states = paths.join_volumes(states)
all_states_ens = paths.join_ensemble([paths.AllOutXEnsemble(s)
for s in states])
ensemble = paths.SequentialEnsemble([
all_states_ens,
paths.AllInXEnsemble(self.all_states) & paths.LengthEnsemble(1)
])
super(VisitAllStatesEnsemble, self).__init__(ensemble)
self.timestep = timestep
self.report_frequency = 10
self.progress = self._progress_indicator(progress)
@staticmethod
def _progress_indicator(progress):
indicator_dict = {
None: None,
'default': default_state_progress_report,
}
try:
indicator = indicator_dict[progress]
except KeyError:
indicator = progress
return indicator
def _state_for_frame(self, snapshot):
in_states = [state for state in self.states if state(snapshot)]
if len(in_states) > 1:
raise RuntimeError("Frame is in more than one state.")
elif len(in_states) == 1:
state = set(in_states)
else:
state = set([])
return state
def _update_for_progress(self, trajectory, frame_number):
len_traj = len(trajectory)
if len_traj - 1 % self.report_frequency == 0:
frame = trajectory[frame_number]
self.found_states.update(self._state_for_frame(trajectory[-1]))
report_string = self.progress(n_steps=len_traj - 1,
timestep=self.timestep,
found_states=self.found_states,
all_states=self.states)
paths.tools.refresh_output(report_string)
def can_append(self, trajectory, trusted):
return_value = super(VisitAllStatesEnsemble, self).can_append(
trajectory=trajectory,
trusted=trusted
)
if self.progress:
self._update_for_progress(trajectory, frame_number=-1)
return return_value
strict_can_append = can_append
|
Add start to visit all states ensembleimport openpathsampling as paths
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
report_str = "Ran {n_steps} steps "
if timestep is not None:
report_str += "[{traj_time}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}]")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str)
class VisitAllStatesEnsemble(paths.WrappedEnsemble):
def __init__(self, states, progress='default', timestep=None):
self.states = states
self.all_states = paths.join_volumes(states)
all_states_ens = paths.join_ensemble([paths.AllOutXEnsemble(s)
for s in states])
ensemble = paths.SequentialEnsemble([
all_states_ens,
paths.AllInXEnsemble(self.all_states) & paths.LengthEnsemble(1)
])
super(VisitAllStatesEnsemble, self).__init__(ensemble)
self.timestep = timestep
self.report_frequency = 10
self.progress = self._progress_indicator(progress)
@staticmethod
def _progress_indicator(progress):
indicator_dict = {
None: None,
'default': default_state_progress_report,
}
try:
indicator = indicator_dict[progress]
except KeyError:
indicator = progress
return indicator
def _state_for_frame(self, snapshot):
in_states = [state for state in self.states if state(snapshot)]
if len(in_states) > 1:
raise RuntimeError("Frame is in more than one state.")
elif len(in_states) == 1:
state = set(in_states)
else:
state = set([])
return state
def _update_for_progress(self, trajectory, frame_number):
len_traj = len(trajectory)
if len_traj - 1 % self.report_frequency == 0:
frame = trajectory[frame_number]
self.found_states.update(self._state_for_frame(trajectory[-1]))
report_string = self.progress(n_steps=len_traj - 1,
timestep=self.timestep,
found_states=self.found_states,
all_states=self.states)
paths.tools.refresh_output(report_string)
def can_append(self, trajectory, trusted):
return_value = super(VisitAllStatesEnsemble, self).can_append(
trajectory=trajectory,
trusted=trusted
)
if self.progress:
self._update_for_progress(trajectory, frame_number=-1)
return return_value
strict_can_append = can_append
|
<commit_before><commit_msg>Add start to visit all states ensemble<commit_after>import openpathsampling as paths
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
report_str = "Ran {n_steps} steps "
if timestep is not None:
report_str += "[{traj_time}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}]")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str)
class VisitAllStatesEnsemble(paths.WrappedEnsemble):
def __init__(self, states, progress='default', timestep=None):
self.states = states
self.all_states = paths.join_volumes(states)
all_states_ens = paths.join_ensemble([paths.AllOutXEnsemble(s)
for s in states])
ensemble = paths.SequentialEnsemble([
all_states_ens,
paths.AllInXEnsemble(self.all_states) & paths.LengthEnsemble(1)
])
super(VisitAllStatesEnsemble, self).__init__(ensemble)
self.timestep = timestep
self.report_frequency = 10
self.progress = self._progress_indicator(progress)
@staticmethod
def _progress_indicator(progress):
indicator_dict = {
None: None,
'default': default_state_progress_report,
}
try:
indicator = indicator_dict[progress]
except KeyError:
indicator = progress
return indicator
def _state_for_frame(self, snapshot):
in_states = [state for state in self.states if state(snapshot)]
if len(in_states) > 1:
raise RuntimeError("Frame is in more than one state.")
elif len(in_states) == 1:
state = set(in_states)
else:
state = set([])
return state
def _update_for_progress(self, trajectory, frame_number):
len_traj = len(trajectory)
if len_traj - 1 % self.report_frequency == 0:
frame = trajectory[frame_number]
self.found_states.update(self._state_for_frame(trajectory[-1]))
report_string = self.progress(n_steps=len_traj - 1,
timestep=self.timestep,
found_states=self.found_states,
all_states=self.states)
paths.tools.refresh_output(report_string)
def can_append(self, trajectory, trusted):
return_value = super(VisitAllStatesEnsemble, self).can_append(
trajectory=trajectory,
trusted=trusted
)
if self.progress:
self._update_for_progress(trajectory, frame_number=-1)
return return_value
strict_can_append = can_append
|
|
14926079ea76bbeba718d4ce59e944fa40a7581f
|
prepopulate-boundaries.py
|
prepopulate-boundaries.py
|
#!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
import os.path
import re
import shutil
shutil.rmtree('boundaries', True)
os.mkdir('boundaries')
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(26915)
def get_zips():
for fn in glob('counties/*.txt'):
for line in open(fn).readlines():
yield line.strip()
name_regex = re.compile(r't(\d+)r(\d+)([we])(\d).*')
features = {}
ds = ogr.Open('/vsizip/trs.zip/tr.shp')
layer = ds.GetLayerByIndex(0)
t_driver = ogr.GetDriverByName('GeoJSON')
for feature in layer:
twp = feature.GetField('TOWN')
rang = feature.GetField('RANG')
ew = 'w' if feature.GetField('DIR') == 2 else 'e'
featid = (twp, rang, ew)
features[featid] = features.get(featid, []) + [feature]
zips = set(get_zips())
for i in zips:
trsid = os.path.splitext(i)[0]
twp, rng, ew, meridian = name_regex.match(trsid).groups()
featid = (int(twp), int(rng), ew)
t_ds = t_driver.CreateDataSource("boundaries/{0}.geojson".format(trsid))
t_layer = t_ds.CreateLayer('tiles', t_srs, ogr.wkbPolygon)
field_trsid = ogr.FieldDefn('trsid', ogr.OFTString)
field_trsid.SetWidth(16)
t_layer.CreateField(field_trsid)
for feature in features[featid]:
t_feature = ogr.Feature(t_layer.GetLayerDefn())
t_feature.SetGeometry(feature.GetGeometryRef())
t_feature.SetField('trsid', trsid)
t_layer.CreateFeature(t_feature)
t_ds.Destroy()
|
Create a python script to create initial boundaries faster
|
Create a python script to create initial boundaries faster
It also makes the boundaries in the EPSG:26915 SRS
|
Python
|
mit
|
simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic
|
Create a python script to create initial boundaries faster
It also makes the boundaries in the EPSG:26915 SRS
|
#!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
import os.path
import re
import shutil
shutil.rmtree('boundaries', True)
os.mkdir('boundaries')
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(26915)
def get_zips():
for fn in glob('counties/*.txt'):
for line in open(fn).readlines():
yield line.strip()
name_regex = re.compile(r't(\d+)r(\d+)([we])(\d).*')
features = {}
ds = ogr.Open('/vsizip/trs.zip/tr.shp')
layer = ds.GetLayerByIndex(0)
t_driver = ogr.GetDriverByName('GeoJSON')
for feature in layer:
twp = feature.GetField('TOWN')
rang = feature.GetField('RANG')
ew = 'w' if feature.GetField('DIR') == 2 else 'e'
featid = (twp, rang, ew)
features[featid] = features.get(featid, []) + [feature]
zips = set(get_zips())
for i in zips:
trsid = os.path.splitext(i)[0]
twp, rng, ew, meridian = name_regex.match(trsid).groups()
featid = (int(twp), int(rng), ew)
t_ds = t_driver.CreateDataSource("boundaries/{0}.geojson".format(trsid))
t_layer = t_ds.CreateLayer('tiles', t_srs, ogr.wkbPolygon)
field_trsid = ogr.FieldDefn('trsid', ogr.OFTString)
field_trsid.SetWidth(16)
t_layer.CreateField(field_trsid)
for feature in features[featid]:
t_feature = ogr.Feature(t_layer.GetLayerDefn())
t_feature.SetGeometry(feature.GetGeometryRef())
t_feature.SetField('trsid', trsid)
t_layer.CreateFeature(t_feature)
t_ds.Destroy()
|
<commit_before><commit_msg>Create a python script to create initial boundaries faster
It also makes the boundaries in the EPSG:26915 SRS<commit_after>
|
#!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
import os.path
import re
import shutil
shutil.rmtree('boundaries', True)
os.mkdir('boundaries')
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(26915)
def get_zips():
for fn in glob('counties/*.txt'):
for line in open(fn).readlines():
yield line.strip()
name_regex = re.compile(r't(\d+)r(\d+)([we])(\d).*')
features = {}
ds = ogr.Open('/vsizip/trs.zip/tr.shp')
layer = ds.GetLayerByIndex(0)
t_driver = ogr.GetDriverByName('GeoJSON')
for feature in layer:
twp = feature.GetField('TOWN')
rang = feature.GetField('RANG')
ew = 'w' if feature.GetField('DIR') == 2 else 'e'
featid = (twp, rang, ew)
features[featid] = features.get(featid, []) + [feature]
zips = set(get_zips())
for i in zips:
trsid = os.path.splitext(i)[0]
twp, rng, ew, meridian = name_regex.match(trsid).groups()
featid = (int(twp), int(rng), ew)
t_ds = t_driver.CreateDataSource("boundaries/{0}.geojson".format(trsid))
t_layer = t_ds.CreateLayer('tiles', t_srs, ogr.wkbPolygon)
field_trsid = ogr.FieldDefn('trsid', ogr.OFTString)
field_trsid.SetWidth(16)
t_layer.CreateField(field_trsid)
for feature in features[featid]:
t_feature = ogr.Feature(t_layer.GetLayerDefn())
t_feature.SetGeometry(feature.GetGeometryRef())
t_feature.SetField('trsid', trsid)
t_layer.CreateFeature(t_feature)
t_ds.Destroy()
|
Create a python script to create initial boundaries faster
It also makes the boundaries in the EPSG:26915 SRS#!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
import os.path
import re
import shutil
shutil.rmtree('boundaries', True)
os.mkdir('boundaries')
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(26915)
def get_zips():
for fn in glob('counties/*.txt'):
for line in open(fn).readlines():
yield line.strip()
name_regex = re.compile(r't(\d+)r(\d+)([we])(\d).*')
features = {}
ds = ogr.Open('/vsizip/trs.zip/tr.shp')
layer = ds.GetLayerByIndex(0)
t_driver = ogr.GetDriverByName('GeoJSON')
for feature in layer:
twp = feature.GetField('TOWN')
rang = feature.GetField('RANG')
ew = 'w' if feature.GetField('DIR') == 2 else 'e'
featid = (twp, rang, ew)
features[featid] = features.get(featid, []) + [feature]
zips = set(get_zips())
for i in zips:
trsid = os.path.splitext(i)[0]
twp, rng, ew, meridian = name_regex.match(trsid).groups()
featid = (int(twp), int(rng), ew)
t_ds = t_driver.CreateDataSource("boundaries/{0}.geojson".format(trsid))
t_layer = t_ds.CreateLayer('tiles', t_srs, ogr.wkbPolygon)
field_trsid = ogr.FieldDefn('trsid', ogr.OFTString)
field_trsid.SetWidth(16)
t_layer.CreateField(field_trsid)
for feature in features[featid]:
t_feature = ogr.Feature(t_layer.GetLayerDefn())
t_feature.SetGeometry(feature.GetGeometryRef())
t_feature.SetField('trsid', trsid)
t_layer.CreateFeature(t_feature)
t_ds.Destroy()
|
<commit_before><commit_msg>Create a python script to create initial boundaries faster
It also makes the boundaries in the EPSG:26915 SRS<commit_after>#!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
import os.path
import re
import shutil
shutil.rmtree('boundaries', True)
os.mkdir('boundaries')
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(26915)
def get_zips():
for fn in glob('counties/*.txt'):
for line in open(fn).readlines():
yield line.strip()
name_regex = re.compile(r't(\d+)r(\d+)([we])(\d).*')
features = {}
ds = ogr.Open('/vsizip/trs.zip/tr.shp')
layer = ds.GetLayerByIndex(0)
t_driver = ogr.GetDriverByName('GeoJSON')
for feature in layer:
twp = feature.GetField('TOWN')
rang = feature.GetField('RANG')
ew = 'w' if feature.GetField('DIR') == 2 else 'e'
featid = (twp, rang, ew)
features[featid] = features.get(featid, []) + [feature]
zips = set(get_zips())
for i in zips:
trsid = os.path.splitext(i)[0]
twp, rng, ew, meridian = name_regex.match(trsid).groups()
featid = (int(twp), int(rng), ew)
t_ds = t_driver.CreateDataSource("boundaries/{0}.geojson".format(trsid))
t_layer = t_ds.CreateLayer('tiles', t_srs, ogr.wkbPolygon)
field_trsid = ogr.FieldDefn('trsid', ogr.OFTString)
field_trsid.SetWidth(16)
t_layer.CreateField(field_trsid)
for feature in features[featid]:
t_feature = ogr.Feature(t_layer.GetLayerDefn())
t_feature.SetGeometry(feature.GetGeometryRef())
t_feature.SetField('trsid', trsid)
t_layer.CreateFeature(t_feature)
t_ds.Destroy()
|
|
0051c05964220ef8e80bddda2a1010a088aa60c7
|
spacy/tests/serialize/test_serialize_parser.py
|
spacy/tests/serialize/test_serialize_parser.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralDependencyParser as Parser
import pytest
def test_serialize_parser_roundtrip_bytes(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
parser_b = parser.to_bytes()
new_parser = Parser(en_vocab)
new_parser.model, _ = new_parser.Model(0)
new_parser = new_parser.from_bytes(parser_b)
assert new_parser.to_bytes() == parser_b
def test_serialize_parser_roundtrip_disk(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
with make_tempdir() as d:
file_path = d / 'parser'
parser.to_disk(file_path)
parser_d = Parser(en_vocab)
parser_d.model, _ = parser_d.Model(0)
parser_d = parser_d.from_disk(file_path)
assert parser.to_bytes() == parser_d.to_bytes()
|
Add tests for serializing parser
|
Add tests for serializing parser
|
Python
|
mit
|
explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,honnibal/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,honnibal/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy
|
Add tests for serializing parser
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralDependencyParser as Parser
import pytest
def test_serialize_parser_roundtrip_bytes(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
parser_b = parser.to_bytes()
new_parser = Parser(en_vocab)
new_parser.model, _ = new_parser.Model(0)
new_parser = new_parser.from_bytes(parser_b)
assert new_parser.to_bytes() == parser_b
def test_serialize_parser_roundtrip_disk(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
with make_tempdir() as d:
file_path = d / 'parser'
parser.to_disk(file_path)
parser_d = Parser(en_vocab)
parser_d.model, _ = parser_d.Model(0)
parser_d = parser_d.from_disk(file_path)
assert parser.to_bytes() == parser_d.to_bytes()
|
<commit_before><commit_msg>Add tests for serializing parser<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralDependencyParser as Parser
import pytest
def test_serialize_parser_roundtrip_bytes(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
parser_b = parser.to_bytes()
new_parser = Parser(en_vocab)
new_parser.model, _ = new_parser.Model(0)
new_parser = new_parser.from_bytes(parser_b)
assert new_parser.to_bytes() == parser_b
def test_serialize_parser_roundtrip_disk(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
with make_tempdir() as d:
file_path = d / 'parser'
parser.to_disk(file_path)
parser_d = Parser(en_vocab)
parser_d.model, _ = parser_d.Model(0)
parser_d = parser_d.from_disk(file_path)
assert parser.to_bytes() == parser_d.to_bytes()
|
Add tests for serializing parser# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralDependencyParser as Parser
import pytest
def test_serialize_parser_roundtrip_bytes(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
parser_b = parser.to_bytes()
new_parser = Parser(en_vocab)
new_parser.model, _ = new_parser.Model(0)
new_parser = new_parser.from_bytes(parser_b)
assert new_parser.to_bytes() == parser_b
def test_serialize_parser_roundtrip_disk(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
with make_tempdir() as d:
file_path = d / 'parser'
parser.to_disk(file_path)
parser_d = Parser(en_vocab)
parser_d.model, _ = parser_d.Model(0)
parser_d = parser_d.from_disk(file_path)
assert parser.to_bytes() == parser_d.to_bytes()
|
<commit_before><commit_msg>Add tests for serializing parser<commit_after># coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralDependencyParser as Parser
import pytest
def test_serialize_parser_roundtrip_bytes(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
parser_b = parser.to_bytes()
new_parser = Parser(en_vocab)
new_parser.model, _ = new_parser.Model(0)
new_parser = new_parser.from_bytes(parser_b)
assert new_parser.to_bytes() == parser_b
def test_serialize_parser_roundtrip_disk(en_vocab):
parser = Parser(en_vocab)
parser.model, _ = parser.Model(0)
with make_tempdir() as d:
file_path = d / 'parser'
parser.to_disk(file_path)
parser_d = Parser(en_vocab)
parser_d.model, _ = parser_d.Model(0)
parser_d = parser_d.from_disk(file_path)
assert parser.to_bytes() == parser_d.to_bytes()
|
|
e5007a5729fdf6cdd743b247cb0ab65613f90656
|
bumblebee_status/modules/contrib/optman.py
|
bumblebee_status/modules/contrib/optman.py
|
"""Displays currently active gpu by optimus-manager
Requires the following packages:
* optimus-manager
"""
import subprocess
import core.module
import core.widget
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__gpumode = ""
def output(self, _):
return "GPU: {}".format(self.__gpumode)
def update(self):
cmd = ["optimus-manager", "--print-mode"]
output = (
subprocess.Popen(cmd, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
.lower()
)
if "intel" in output:
self.__gpumode = "Intel"
elif "nvidia" in output:
self.__gpumode = "Nvidia"
elif "amd" in output:
self.__gpumode = "AMD"
|
Add active gpu module using optimus-manager
|
Add active gpu module using optimus-manager
|
Python
|
mit
|
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
|
Add active gpu module using optimus-manager
|
"""Displays currently active gpu by optimus-manager
Requires the following packages:
* optimus-manager
"""
import subprocess
import core.module
import core.widget
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__gpumode = ""
def output(self, _):
return "GPU: {}".format(self.__gpumode)
def update(self):
cmd = ["optimus-manager", "--print-mode"]
output = (
subprocess.Popen(cmd, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
.lower()
)
if "intel" in output:
self.__gpumode = "Intel"
elif "nvidia" in output:
self.__gpumode = "Nvidia"
elif "amd" in output:
self.__gpumode = "AMD"
|
<commit_before><commit_msg>Add active gpu module using optimus-manager<commit_after>
|
"""Displays currently active gpu by optimus-manager
Requires the following packages:
* optimus-manager
"""
import subprocess
import core.module
import core.widget
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__gpumode = ""
def output(self, _):
return "GPU: {}".format(self.__gpumode)
def update(self):
cmd = ["optimus-manager", "--print-mode"]
output = (
subprocess.Popen(cmd, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
.lower()
)
if "intel" in output:
self.__gpumode = "Intel"
elif "nvidia" in output:
self.__gpumode = "Nvidia"
elif "amd" in output:
self.__gpumode = "AMD"
|
Add active gpu module using optimus-manager"""Displays currently active gpu by optimus-manager
Requires the following packages:
* optimus-manager
"""
import subprocess
import core.module
import core.widget
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__gpumode = ""
def output(self, _):
return "GPU: {}".format(self.__gpumode)
def update(self):
cmd = ["optimus-manager", "--print-mode"]
output = (
subprocess.Popen(cmd, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
.lower()
)
if "intel" in output:
self.__gpumode = "Intel"
elif "nvidia" in output:
self.__gpumode = "Nvidia"
elif "amd" in output:
self.__gpumode = "AMD"
|
<commit_before><commit_msg>Add active gpu module using optimus-manager<commit_after>"""Displays currently active gpu by optimus-manager
Requires the following packages:
* optimus-manager
"""
import subprocess
import core.module
import core.widget
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.output))
self.__gpumode = ""
def output(self, _):
return "GPU: {}".format(self.__gpumode)
def update(self):
cmd = ["optimus-manager", "--print-mode"]
output = (
subprocess.Popen(cmd, stdout=subprocess.PIPE)
.communicate()[0]
.decode("utf-8")
.lower()
)
if "intel" in output:
self.__gpumode = "Intel"
elif "nvidia" in output:
self.__gpumode = "Nvidia"
elif "amd" in output:
self.__gpumode = "AMD"
|
|
78d19287bd1e560ae063d2971e393df54b87f93d
|
py/number-complement.py
|
py/number-complement.py
|
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
n = num
n |= (n >> 1)
n |= (n >> 2)
n |= (n >> 4)
n |= (n >> 8)
n |= (n >> 16)
return n ^ num
|
Add py solution for 476. Number Complement
|
Add py solution for 476. Number Complement
476. Number Complement: https://leetcode.com/problems/number-complement/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 476. Number Complement
476. Number Complement: https://leetcode.com/problems/number-complement/
|
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
n = num
n |= (n >> 1)
n |= (n >> 2)
n |= (n >> 4)
n |= (n >> 8)
n |= (n >> 16)
return n ^ num
|
<commit_before><commit_msg>Add py solution for 476. Number Complement
476. Number Complement: https://leetcode.com/problems/number-complement/<commit_after>
|
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
n = num
n |= (n >> 1)
n |= (n >> 2)
n |= (n >> 4)
n |= (n >> 8)
n |= (n >> 16)
return n ^ num
|
Add py solution for 476. Number Complement
476. Number Complement: https://leetcode.com/problems/number-complement/class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
n = num
n |= (n >> 1)
n |= (n >> 2)
n |= (n >> 4)
n |= (n >> 8)
n |= (n >> 16)
return n ^ num
|
<commit_before><commit_msg>Add py solution for 476. Number Complement
476. Number Complement: https://leetcode.com/problems/number-complement/<commit_after>class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
n = num
n |= (n >> 1)
n |= (n >> 2)
n |= (n >> 4)
n |= (n >> 8)
n |= (n >> 16)
return n ^ num
|
|
42b651bd1a3d6994c6eb727ff8cc66e5699d9cd2
|
sahgutils/spatialtools.py
|
sahgutils/spatialtools.py
|
"""A collection of tools for spatial data and GIS tasks.
"""
def point_in_poly(pnt, poly):
"""Calculate whether a point lies inside a polygon
Algorithm is based on the ray-tracing procedure described at
http://geospatialpython.com/2011/01/point-in-polygon.html
Parameters
----------
pnt : seq
A point (x, y), which should be a two-element sequence object.
poly : seq
A sequence of points describing the polygon.
Returns
-------
True if `pnt` is inside `poly`, False otherwise.
"""
x, y = pnt
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
|
Add a point in polygon routine
|
ENH: Add a point in polygon routine
|
Python
|
bsd-3-clause
|
sahg/SAHGutils
|
ENH: Add a point in polygon routine
|
"""A collection of tools for spatial data and GIS tasks.
"""
def point_in_poly(pnt, poly):
"""Calculate whether a point lies inside a polygon
Algorithm is based on the ray-tracing procedure described at
http://geospatialpython.com/2011/01/point-in-polygon.html
Parameters
----------
pnt : seq
A point (x, y), which should be a two-element sequence object.
poly : seq
A sequence of points describing the polygon.
Returns
-------
True if `pnt` is inside `poly`, False otherwise.
"""
x, y = pnt
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
|
<commit_before><commit_msg>ENH: Add a point in polygon routine<commit_after>
|
"""A collection of tools for spatial data and GIS tasks.
"""
def point_in_poly(pnt, poly):
"""Calculate whether a point lies inside a polygon
Algorithm is based on the ray-tracing procedure described at
http://geospatialpython.com/2011/01/point-in-polygon.html
Parameters
----------
pnt : seq
A point (x, y), which should be a two-element sequence object.
poly : seq
A sequence of points describing the polygon.
Returns
-------
True if `pnt` is inside `poly`, False otherwise.
"""
x, y = pnt
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
|
ENH: Add a point in polygon routine"""A collection of tools for spatial data and GIS tasks.
"""
def point_in_poly(pnt, poly):
"""Calculate whether a point lies inside a polygon
Algorithm is based on the ray-tracing procedure described at
http://geospatialpython.com/2011/01/point-in-polygon.html
Parameters
----------
pnt : seq
A point (x, y), which should be a two-element sequence object.
poly : seq
A sequence of points describing the polygon.
Returns
-------
True if `pnt` is inside `poly`, False otherwise.
"""
x, y = pnt
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
|
<commit_before><commit_msg>ENH: Add a point in polygon routine<commit_after>"""A collection of tools for spatial data and GIS tasks.
"""
def point_in_poly(pnt, poly):
"""Calculate whether a point lies inside a polygon
Algorithm is based on the ray-tracing procedure described at
http://geospatialpython.com/2011/01/point-in-polygon.html
Parameters
----------
pnt : seq
A point (x, y), which should be a two-element sequence object.
poly : seq
A sequence of points describing the polygon.
Returns
-------
True if `pnt` is inside `poly`, False otherwise.
"""
x, y = pnt
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
|
|
dc5d1341031e642ccf1b586dd2e81e6e99e76e0d
|
scripts/setup-tracing-for-project.py
|
scripts/setup-tracing-for-project.py
|
#!/usr/bin/python
# This is a small helper script to add the required database entries
# to enable tracing of a particular project with treelines,
# connectors, etc. This should really be done in a larger project
# creation script.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
import sys
import psycopg2
import os
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[1])
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
conn = psycopg2.connect(database="catmaid",user=catmaid_db_user,password=catmaid_db_password)
c = conn.cursor()
classes_required = [ ( "skeleton", True ),
( "neuron", True ),
( "group", True ),
( "label", False ),
( "root", False ),
( "synapse", True ),
( "presynaptic terminal", True ),
( "postsynaptic terminal", True ) ]
class_dictionary = {}
for required_class, show_in_tree in classes_required:
class_dictionary[required_class] = {'show_in_tree': show_in_tree};
c.execute("INSERT INTO class (user_id, project_id, class_name, showintree) "+
"VALUES (%s, %s, %s, %s) RETURNING id",
(user_id, project_id, required_class, show_in_tree))
class_dictionary[required_class]['id'] = c.fetchone()[0]
c.execute("INSERT INTO class_instance (user_id, project_id, class_id, name) "+
"VALUES (%s, %s, %s, %s)",
(user_id,
project_id,
class_dictionary['root']['id'],
'neuropile'))
relations_required = (
"labeled_as",
"postsynaptic_to",
"presynaptic_to",
"element_of",
"model_of",
"part_of",
"is_a"
)
for required_relation in relations_required:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, required_relation))
|
Add a helper script to add the required db entries to enable tracing for a project
|
Add a helper script to add the required db entries to enable tracing for a project
|
Python
|
agpl-3.0
|
fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID
|
Add a helper script to add the required db entries to enable tracing for a project
|
#!/usr/bin/python
# This is a small helper script to add the required database entries
# to enable tracing of a particular project with treelines,
# connectors, etc. This should really be done in a larger project
# creation script.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
import sys
import psycopg2
import os
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[1])
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
conn = psycopg2.connect(database="catmaid",user=catmaid_db_user,password=catmaid_db_password)
c = conn.cursor()
classes_required = [ ( "skeleton", True ),
( "neuron", True ),
( "group", True ),
( "label", False ),
( "root", False ),
( "synapse", True ),
( "presynaptic terminal", True ),
( "postsynaptic terminal", True ) ]
class_dictionary = {}
for required_class, show_in_tree in classes_required:
class_dictionary[required_class] = {'show_in_tree': show_in_tree};
c.execute("INSERT INTO class (user_id, project_id, class_name, showintree) "+
"VALUES (%s, %s, %s, %s) RETURNING id",
(user_id, project_id, required_class, show_in_tree))
class_dictionary[required_class]['id'] = c.fetchone()[0]
c.execute("INSERT INTO class_instance (user_id, project_id, class_id, name) "+
"VALUES (%s, %s, %s, %s)",
(user_id,
project_id,
class_dictionary['root']['id'],
'neuropile'))
relations_required = (
"labeled_as",
"postsynaptic_to",
"presynaptic_to",
"element_of",
"model_of",
"part_of",
"is_a"
)
for required_relation in relations_required:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, required_relation))
|
<commit_before><commit_msg>Add a helper script to add the required db entries to enable tracing for a project<commit_after>
|
#!/usr/bin/python
# This is a small helper script to add the required database entries
# to enable tracing of a particular project with treelines,
# connectors, etc. This should really be done in a larger project
# creation script.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
import sys
import psycopg2
import os
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[1])
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
conn = psycopg2.connect(database="catmaid",user=catmaid_db_user,password=catmaid_db_password)
c = conn.cursor()
classes_required = [ ( "skeleton", True ),
( "neuron", True ),
( "group", True ),
( "label", False ),
( "root", False ),
( "synapse", True ),
( "presynaptic terminal", True ),
( "postsynaptic terminal", True ) ]
class_dictionary = {}
for required_class, show_in_tree in classes_required:
class_dictionary[required_class] = {'show_in_tree': show_in_tree};
c.execute("INSERT INTO class (user_id, project_id, class_name, showintree) "+
"VALUES (%s, %s, %s, %s) RETURNING id",
(user_id, project_id, required_class, show_in_tree))
class_dictionary[required_class]['id'] = c.fetchone()[0]
c.execute("INSERT INTO class_instance (user_id, project_id, class_id, name) "+
"VALUES (%s, %s, %s, %s)",
(user_id,
project_id,
class_dictionary['root']['id'],
'neuropile'))
relations_required = (
"labeled_as",
"postsynaptic_to",
"presynaptic_to",
"element_of",
"model_of",
"part_of",
"is_a"
)
for required_relation in relations_required:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, required_relation))
|
Add a helper script to add the required db entries to enable tracing for a project#!/usr/bin/python
# This is a small helper script to add the required database entries
# to enable tracing of a particular project with treelines,
# connectors, etc. This should really be done in a larger project
# creation script.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
import sys
import psycopg2
import os
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[1])
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
conn = psycopg2.connect(database="catmaid",user=catmaid_db_user,password=catmaid_db_password)
c = conn.cursor()
classes_required = [ ( "skeleton", True ),
( "neuron", True ),
( "group", True ),
( "label", False ),
( "root", False ),
( "synapse", True ),
( "presynaptic terminal", True ),
( "postsynaptic terminal", True ) ]
class_dictionary = {}
for required_class, show_in_tree in classes_required:
class_dictionary[required_class] = {'show_in_tree': show_in_tree};
c.execute("INSERT INTO class (user_id, project_id, class_name, showintree) "+
"VALUES (%s, %s, %s, %s) RETURNING id",
(user_id, project_id, required_class, show_in_tree))
class_dictionary[required_class]['id'] = c.fetchone()[0]
c.execute("INSERT INTO class_instance (user_id, project_id, class_id, name) "+
"VALUES (%s, %s, %s, %s)",
(user_id,
project_id,
class_dictionary['root']['id'],
'neuropile'))
relations_required = (
"labeled_as",
"postsynaptic_to",
"presynaptic_to",
"element_of",
"model_of",
"part_of",
"is_a"
)
for required_relation in relations_required:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, required_relation))
|
<commit_before><commit_msg>Add a helper script to add the required db entries to enable tracing for a project<commit_after>#!/usr/bin/python
# This is a small helper script to add the required database entries
# to enable tracing of a particular project with treelines,
# connectors, etc. This should really be done in a larger project
# creation script.
# You may need to install psycopg2, e.g. with:
# sudo apt-get install python-psycopg2
import sys
import psycopg2
import os
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>"
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[1])
db_login_filename = os.path.join(os.environ['HOME'],'.catmaid-db')
fp = open(db_login_filename)
for i, line in enumerate(fp):
if i == 0:
catmaid_db_user = line.strip()
elif i == 1:
catmaid_db_password = line.strip()
conn = psycopg2.connect(database="catmaid",user=catmaid_db_user,password=catmaid_db_password)
c = conn.cursor()
classes_required = [ ( "skeleton", True ),
( "neuron", True ),
( "group", True ),
( "label", False ),
( "root", False ),
( "synapse", True ),
( "presynaptic terminal", True ),
( "postsynaptic terminal", True ) ]
class_dictionary = {}
for required_class, show_in_tree in classes_required:
class_dictionary[required_class] = {'show_in_tree': show_in_tree};
c.execute("INSERT INTO class (user_id, project_id, class_name, showintree) "+
"VALUES (%s, %s, %s, %s) RETURNING id",
(user_id, project_id, required_class, show_in_tree))
class_dictionary[required_class]['id'] = c.fetchone()[0]
c.execute("INSERT INTO class_instance (user_id, project_id, class_id, name) "+
"VALUES (%s, %s, %s, %s)",
(user_id,
project_id,
class_dictionary['root']['id'],
'neuropile'))
relations_required = (
"labeled_as",
"postsynaptic_to",
"presynaptic_to",
"element_of",
"model_of",
"part_of",
"is_a"
)
for required_relation in relations_required:
c.execute("INSERT INTO relation (user_id, project_id, relation_name) "+
"VALUES (%s, %s, %s)",
(user_id, project_id, required_relation))
|
|
a468de144929b7d5633dd0738e9eaf5940d1b6c7
|
other/unterm-generator.py
|
other/unterm-generator.py
|
"""Demonstrated an error garbage collecting an h5py object at Python shutdown
https://github.com/h5py/h5py/issues/1495
"""
import h5py
def yield_groups(filename):
with h5py.File(filename, 'r') as fh:
for group in fh:
yield group
filename = "file_with_10_groups.hdf5"
grp_generator = yield_groups(filename)
next(grp_generator)
|
Add script demonstrating issue gh-1495
|
Add script demonstrating issue gh-1495
|
Python
|
bsd-3-clause
|
h5py/h5py,h5py/h5py,h5py/h5py
|
Add script demonstrating issue gh-1495
|
"""Demonstrated an error garbage collecting an h5py object at Python shutdown
https://github.com/h5py/h5py/issues/1495
"""
import h5py
def yield_groups(filename):
with h5py.File(filename, 'r') as fh:
for group in fh:
yield group
filename = "file_with_10_groups.hdf5"
grp_generator = yield_groups(filename)
next(grp_generator)
|
<commit_before><commit_msg>Add script demonstrating issue gh-1495<commit_after>
|
"""Demonstrated an error garbage collecting an h5py object at Python shutdown
https://github.com/h5py/h5py/issues/1495
"""
import h5py
def yield_groups(filename):
with h5py.File(filename, 'r') as fh:
for group in fh:
yield group
filename = "file_with_10_groups.hdf5"
grp_generator = yield_groups(filename)
next(grp_generator)
|
Add script demonstrating issue gh-1495"""Demonstrated an error garbage collecting an h5py object at Python shutdown
https://github.com/h5py/h5py/issues/1495
"""
import h5py
def yield_groups(filename):
with h5py.File(filename, 'r') as fh:
for group in fh:
yield group
filename = "file_with_10_groups.hdf5"
grp_generator = yield_groups(filename)
next(grp_generator)
|
<commit_before><commit_msg>Add script demonstrating issue gh-1495<commit_after>"""Demonstrated an error garbage collecting an h5py object at Python shutdown
https://github.com/h5py/h5py/issues/1495
"""
import h5py
def yield_groups(filename):
with h5py.File(filename, 'r') as fh:
for group in fh:
yield group
filename = "file_with_10_groups.hdf5"
grp_generator = yield_groups(filename)
next(grp_generator)
|
|
f35b1282eba2d305d18c5b041518cdecd86dab59
|
tests/test_views.py
|
tests/test_views.py
|
"""
Tests for the frontend.
"""
import unittest
import ga4gh.server as server
import ga4gh.cli
class TestFrontend(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def testServer(self):
self.assertTrue('404' in self.app.get('/').data)
# TODO: Fill in actual test cases here...
if __name__ == '__main__':
unittest.main()
|
Add a small unit test for the frontend.
|
Add a small unit test for the frontend.
|
Python
|
apache-2.0
|
dcolligan/server,hjellinek/server,hjellinek/server,ekalosak/server,ohsu-computational-biology/server,ekalosak/server,srblum/server,pcingola/server,adamnovak/server,ga4gh/server,adamnovak/server,shajoezhu/server,shajoezhu/server,srblum/hackathon-server,pcingola/server,diekhans/ga4gh-server,saupchurch/server,saupchurch/server,jeromekelleher/server,diekhans/ga4gh-server,dcolligan/server,ga4gh/server,UMMS-Biocore/ga4gh-server,jeromekelleher/server,diekhans/ga4gh-server,srblum/server,macieksmuga/server,david4096/ga4gh-server,srblum/hackathon-server,pansapiens/server,ohsu-computational-biology/server,macieksmuga/server,pansapiens/server,macieksmuga/server,pcingola/server,ekalosak/server,ohsu-computational-biology/server,srblum/server,UMMS-Biocore/ga4gh-server,hjellinek/server,saupchurch/server,dcolligan/server,adamnovak/server,pansapiens/server,david4096/ga4gh-server,srblum/hackathon-server,ga4gh/server,david4096/ga4gh-server,UMMS-Biocore/ga4gh-server
|
Add a small unit test for the frontend.
|
"""
Tests for the frontend.
"""
import unittest
import ga4gh.server as server
import ga4gh.cli
class TestFrontend(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def testServer(self):
self.assertTrue('404' in self.app.get('/').data)
# TODO: Fill in actual test cases here...
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a small unit test for the frontend.<commit_after>
|
"""
Tests for the frontend.
"""
import unittest
import ga4gh.server as server
import ga4gh.cli
class TestFrontend(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def testServer(self):
self.assertTrue('404' in self.app.get('/').data)
# TODO: Fill in actual test cases here...
if __name__ == '__main__':
unittest.main()
|
Add a small unit test for the frontend."""
Tests for the frontend.
"""
import unittest
import ga4gh.server as server
import ga4gh.cli
class TestFrontend(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def testServer(self):
self.assertTrue('404' in self.app.get('/').data)
# TODO: Fill in actual test cases here...
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a small unit test for the frontend.<commit_after>"""
Tests for the frontend.
"""
import unittest
import ga4gh.server as server
import ga4gh.cli
class TestFrontend(unittest.TestCase):
def setUp(self):
self.app = server.app.test_client()
def testServer(self):
self.assertTrue('404' in self.app.get('/').data)
# TODO: Fill in actual test cases here...
if __name__ == '__main__':
unittest.main()
|
|
05648421c1fa77f6f339f68be2c43bb7952e918a
|
pymatgen/__init__.py
|
pymatgen/__init__.py
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .util.io_utils import zopen
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
Remove zopen in pymatgen root.
|
Remove zopen in pymatgen root.
|
Python
|
mit
|
sonium0/pymatgen,rousseab/pymatgen,rousseab/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,sonium0/pymatgen,ctoher/pymatgen,Dioptas/pymatgen,sonium0/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,Dioptas/pymatgen
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .util.io_utils import zopen
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
Remove zopen in pymatgen root.
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
<commit_before>__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .util.io_utils import zopen
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
<commit_msg>Remove zopen in pymatgen root.<commit_after>
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .util.io_utils import zopen
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
Remove zopen in pymatgen root.__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
<commit_before>__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .util.io_utils import zopen
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
<commit_msg>Remove zopen in pymatgen root.<commit_after>__author__ = ", ".join(["Shyue Ping Ong", "Anubhav Jain", "Geoffroy Hautier",
"William Davidson Richard", "Stephen Dacek",
"Sai Jayaraman", "Michael Kocher", "Dan Gunter",
"Shreyas Cholia", "Vincent L Chevrier",
"Rickard Armiento"])
__date__ = "Dec 18 2013"
__version__ = "2.8.10"
#Useful aliases for commonly used objects and modules.
from .core import *
from .serializers.json_coders import PMGJSONEncoder, PMGJSONDecoder, \
pmg_dump, pmg_load
from .electronic_structure.core import Spin, Orbital
from .io.smartio import read_structure, write_structure, read_mol, write_mol
from .matproj.rest import MPRester
|
aad87fe5e924de73eea7b1a34ea9412d2cc0705d
|
scripts/simple_blend.py
|
scripts/simple_blend.py
|
import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
|
Add simplest blending script to apply fixed weights
|
Add simplest blending script to apply fixed weights
|
Python
|
mit
|
jvanbrug/netflix,jvanbrug/netflix
|
Add simplest blending script to apply fixed weights
|
import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simplest blending script to apply fixed weights<commit_after>
|
import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
|
Add simplest blending script to apply fixed weightsimport numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simplest blending script to apply fixed weights<commit_after>import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
|
|
6810fdd7156b9839acee7a27aae57041850603e0
|
examples/fosdem_2015/power.py
|
examples/fosdem_2015/power.py
|
from zeroservices import ZeroMQMedium, ResourceService
from zeroservices.backend.mongodb import MongoDBCollection
class PowerCollection(MongoDBCollection):
def __init__(self, *args, **kwargs):
super(PowerCollection, self).__init__(*args, **kwargs)
self.collection.ensure_index([('description', 'text')])
if __name__ == '__main__':
todo = ResourceService('fosdem_2015_power', ZeroMQMedium(port_random=True))
todo.register_resource(PowerCollection("power", "fosdem_db"))
todo.main()
|
Add missing file in fosdem 2015 example
|
Add missing file in fosdem 2015 example
|
Python
|
mit
|
Lothiraldan/ZeroServices
|
Add missing file in fosdem 2015 example
|
from zeroservices import ZeroMQMedium, ResourceService
from zeroservices.backend.mongodb import MongoDBCollection
class PowerCollection(MongoDBCollection):
def __init__(self, *args, **kwargs):
super(PowerCollection, self).__init__(*args, **kwargs)
self.collection.ensure_index([('description', 'text')])
if __name__ == '__main__':
todo = ResourceService('fosdem_2015_power', ZeroMQMedium(port_random=True))
todo.register_resource(PowerCollection("power", "fosdem_db"))
todo.main()
|
<commit_before><commit_msg>Add missing file in fosdem 2015 example<commit_after>
|
from zeroservices import ZeroMQMedium, ResourceService
from zeroservices.backend.mongodb import MongoDBCollection
class PowerCollection(MongoDBCollection):
def __init__(self, *args, **kwargs):
super(PowerCollection, self).__init__(*args, **kwargs)
self.collection.ensure_index([('description', 'text')])
if __name__ == '__main__':
todo = ResourceService('fosdem_2015_power', ZeroMQMedium(port_random=True))
todo.register_resource(PowerCollection("power", "fosdem_db"))
todo.main()
|
Add missing file in fosdem 2015 examplefrom zeroservices import ZeroMQMedium, ResourceService
from zeroservices.backend.mongodb import MongoDBCollection
class PowerCollection(MongoDBCollection):
def __init__(self, *args, **kwargs):
super(PowerCollection, self).__init__(*args, **kwargs)
self.collection.ensure_index([('description', 'text')])
if __name__ == '__main__':
todo = ResourceService('fosdem_2015_power', ZeroMQMedium(port_random=True))
todo.register_resource(PowerCollection("power", "fosdem_db"))
todo.main()
|
<commit_before><commit_msg>Add missing file in fosdem 2015 example<commit_after>from zeroservices import ZeroMQMedium, ResourceService
from zeroservices.backend.mongodb import MongoDBCollection
class PowerCollection(MongoDBCollection):
def __init__(self, *args, **kwargs):
super(PowerCollection, self).__init__(*args, **kwargs)
self.collection.ensure_index([('description', 'text')])
if __name__ == '__main__':
todo = ResourceService('fosdem_2015_power', ZeroMQMedium(port_random=True))
todo.register_resource(PowerCollection("power", "fosdem_db"))
todo.main()
|
|
72f484c35a79d7be6dc04e7ac108b05652bc1f36
|
readthedocs/core/tasks.py
|
readthedocs/core/tasks.py
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
@task(queue='web')
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
EMAIL_TIME_LIMIT = 30
@task(queue='web', time_limit=EMAIL_TIME_LIMIT)
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
Add timeout to email task
|
Add timeout to email task
Email task needs a timeout, as there were some cases where celery caused the
task to hang.
|
Python
|
mit
|
safwanrahman/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,tddv/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,rtfd/readthedocs.org
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
@task(queue='web')
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
Add timeout to email task
Email task needs a timeout, as there were some cases where celery caused the
task to hang.
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
EMAIL_TIME_LIMIT = 30
@task(queue='web', time_limit=EMAIL_TIME_LIMIT)
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
<commit_before>"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
@task(queue='web')
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
<commit_msg>Add timeout to email task
Email task needs a timeout, as there were some cases where celery caused the
task to hang.<commit_after>
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
EMAIL_TIME_LIMIT = 30
@task(queue='web', time_limit=EMAIL_TIME_LIMIT)
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
@task(queue='web')
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
Add timeout to email task
Email task needs a timeout, as there were some cases where celery caused the
task to hang."""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
EMAIL_TIME_LIMIT = 30
@task(queue='web', time_limit=EMAIL_TIME_LIMIT)
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
<commit_before>"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
@task(queue='web')
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
<commit_msg>Add timeout to email task
Email task needs a timeout, as there were some cases where celery caused the
task to hang.<commit_after>"""Basic tasks"""
import logging
from celery import task
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
log = logging.getLogger(__name__)
EMAIL_TIME_LIMIT = 30
@task(queue='web', time_limit=EMAIL_TIME_LIMIT)
def send_email_task(recipient, subject, template, template_html, context=None):
"""Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
"""
msg = EmailMultiAlternatives(
subject,
get_template(template).render(context),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(context),
'text/html')
msg.send()
log.info('Sent email to recipient: %s', recipient)
|
00ff016a7f45784a786c0bbebfe31a64291e9bbd
|
logistic_regression/logistic_regression.py
|
logistic_regression/logistic_regression.py
|
#!/usr/bin/env python
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def test_sigmoid():
x = 0
print("Input x: {}, the sigmoid value is: {}".format(x, sigmoid(x)))
def main():
# Prepare dataset
train_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
train_labels = np.array([1, 0], dtype=np.int)
test_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
test_labels = np.array([1, 0], dtype=np.int)
feature_size = 3
batch_size = 2
# Define hyperparameters
epoch_number = 10
learning_rate = 0.01
weights = np.ones(feature_size)
# Start training
for epoch_index in range(epoch_number):
print("Start the epoch: {}".format(epoch_index))
''' Implement with batch size
# [2, 3] = [3] * [2, 3]
multiple_weights_result = weights * train_features
# [2] = [2, 3]
predict = np.sum(multiple_weights_result, 1)
# [2] = [2]
sigmoid_predict = sigmoid(predict)
# [2] = [2]
predict_difference = train_labels - sigmoid_predict
# TODO: [2, 3, 1] = [2, 3] * [2]
batch_grad = train_features * predict_difference
# TODO: fix that
grad = batch_grad
# [3, 1] = [3, 1]
weights += learning_rate * grad
'''
# Train with single example
train_features = np.array([1, 0, 25], dtype=np.float)
train_labels = np.array([0], dtype=np.int)
# [3] = [3] * [3]
multiple_weights_result = train_features * weights
# [1] = [3]
predict = np.sum(multiple_weights_result)
# [1] = [1]
sigmoid_predict = sigmoid(predict)
# [1] = [1]
predict_difference = train_labels - sigmoid_predict
# [3] = [3] * [1]
grad = train_features * predict_difference
# [3] = [3]
weights += learning_rate * grad
print("Current weights is: {}".format(weights))
# TODO: Predict with validate dataset
predict_true_probability = sigmoid(np.sum(train_features * weights))
print("Current predict true probability is: {}".format(
predict_true_probability))
likehood = 1 - predict_true_probability
print("Current likehood is: {}\n".format(likehood))
if __name__ == "__main__":
main()
|
Add logistic regression without batch
|
Add logistic regression without batch
|
Python
|
mit
|
tobegit3hub/ml_implementation,erwin00776/copy_ml_implements
|
Add logistic regression without batch
|
#!/usr/bin/env python
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def test_sigmoid():
x = 0
print("Input x: {}, the sigmoid value is: {}".format(x, sigmoid(x)))
def main():
# Prepare dataset
train_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
train_labels = np.array([1, 0], dtype=np.int)
test_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
test_labels = np.array([1, 0], dtype=np.int)
feature_size = 3
batch_size = 2
# Define hyperparameters
epoch_number = 10
learning_rate = 0.01
weights = np.ones(feature_size)
# Start training
for epoch_index in range(epoch_number):
print("Start the epoch: {}".format(epoch_index))
''' Implement with batch size
# [2, 3] = [3] * [2, 3]
multiple_weights_result = weights * train_features
# [2] = [2, 3]
predict = np.sum(multiple_weights_result, 1)
# [2] = [2]
sigmoid_predict = sigmoid(predict)
# [2] = [2]
predict_difference = train_labels - sigmoid_predict
# TODO: [2, 3, 1] = [2, 3] * [2]
batch_grad = train_features * predict_difference
# TODO: fix that
grad = batch_grad
# [3, 1] = [3, 1]
weights += learning_rate * grad
'''
# Train with single example
train_features = np.array([1, 0, 25], dtype=np.float)
train_labels = np.array([0], dtype=np.int)
# [3] = [3] * [3]
multiple_weights_result = train_features * weights
# [1] = [3]
predict = np.sum(multiple_weights_result)
# [1] = [1]
sigmoid_predict = sigmoid(predict)
# [1] = [1]
predict_difference = train_labels - sigmoid_predict
# [3] = [3] * [1]
grad = train_features * predict_difference
# [3] = [3]
weights += learning_rate * grad
print("Current weights is: {}".format(weights))
# TODO: Predict with validate dataset
predict_true_probability = sigmoid(np.sum(train_features * weights))
print("Current predict true probability is: {}".format(
predict_true_probability))
likehood = 1 - predict_true_probability
print("Current likehood is: {}\n".format(likehood))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add logistic regression without batch<commit_after>
|
#!/usr/bin/env python
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def test_sigmoid():
x = 0
print("Input x: {}, the sigmoid value is: {}".format(x, sigmoid(x)))
def main():
# Prepare dataset
train_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
train_labels = np.array([1, 0], dtype=np.int)
test_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
test_labels = np.array([1, 0], dtype=np.int)
feature_size = 3
batch_size = 2
# Define hyperparameters
epoch_number = 10
learning_rate = 0.01
weights = np.ones(feature_size)
# Start training
for epoch_index in range(epoch_number):
print("Start the epoch: {}".format(epoch_index))
''' Implement with batch size
# [2, 3] = [3] * [2, 3]
multiple_weights_result = weights * train_features
# [2] = [2, 3]
predict = np.sum(multiple_weights_result, 1)
# [2] = [2]
sigmoid_predict = sigmoid(predict)
# [2] = [2]
predict_difference = train_labels - sigmoid_predict
# TODO: [2, 3, 1] = [2, 3] * [2]
batch_grad = train_features * predict_difference
# TODO: fix that
grad = batch_grad
# [3, 1] = [3, 1]
weights += learning_rate * grad
'''
# Train with single example
train_features = np.array([1, 0, 25], dtype=np.float)
train_labels = np.array([0], dtype=np.int)
# [3] = [3] * [3]
multiple_weights_result = train_features * weights
# [1] = [3]
predict = np.sum(multiple_weights_result)
# [1] = [1]
sigmoid_predict = sigmoid(predict)
# [1] = [1]
predict_difference = train_labels - sigmoid_predict
# [3] = [3] * [1]
grad = train_features * predict_difference
# [3] = [3]
weights += learning_rate * grad
print("Current weights is: {}".format(weights))
# TODO: Predict with validate dataset
predict_true_probability = sigmoid(np.sum(train_features * weights))
print("Current predict true probability is: {}".format(
predict_true_probability))
likehood = 1 - predict_true_probability
print("Current likehood is: {}\n".format(likehood))
if __name__ == "__main__":
main()
|
Add logistic regression without batch#!/usr/bin/env python
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def test_sigmoid():
x = 0
print("Input x: {}, the sigmoid value is: {}".format(x, sigmoid(x)))
def main():
# Prepare dataset
train_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
train_labels = np.array([1, 0], dtype=np.int)
test_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
test_labels = np.array([1, 0], dtype=np.int)
feature_size = 3
batch_size = 2
# Define hyperparameters
epoch_number = 10
learning_rate = 0.01
weights = np.ones(feature_size)
# Start training
for epoch_index in range(epoch_number):
print("Start the epoch: {}".format(epoch_index))
''' Implement with batch size
# [2, 3] = [3] * [2, 3]
multiple_weights_result = weights * train_features
# [2] = [2, 3]
predict = np.sum(multiple_weights_result, 1)
# [2] = [2]
sigmoid_predict = sigmoid(predict)
# [2] = [2]
predict_difference = train_labels - sigmoid_predict
# TODO: [2, 3, 1] = [2, 3] * [2]
batch_grad = train_features * predict_difference
# TODO: fix that
grad = batch_grad
# [3, 1] = [3, 1]
weights += learning_rate * grad
'''
# Train with single example
train_features = np.array([1, 0, 25], dtype=np.float)
train_labels = np.array([0], dtype=np.int)
# [3] = [3] * [3]
multiple_weights_result = train_features * weights
# [1] = [3]
predict = np.sum(multiple_weights_result)
# [1] = [1]
sigmoid_predict = sigmoid(predict)
# [1] = [1]
predict_difference = train_labels - sigmoid_predict
# [3] = [3] * [1]
grad = train_features * predict_difference
# [3] = [3]
weights += learning_rate * grad
print("Current weights is: {}".format(weights))
# TODO: Predict with validate dataset
predict_true_probability = sigmoid(np.sum(train_features * weights))
print("Current predict true probability is: {}".format(
predict_true_probability))
likehood = 1 - predict_true_probability
print("Current likehood is: {}\n".format(likehood))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add logistic regression without batch<commit_after>#!/usr/bin/env python
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def test_sigmoid():
x = 0
print("Input x: {}, the sigmoid value is: {}".format(x, sigmoid(x)))
def main():
# Prepare dataset
train_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
train_labels = np.array([1, 0], dtype=np.int)
test_features = np.array([[1, 0, 26], [0, 1, 25]], dtype=np.float)
test_labels = np.array([1, 0], dtype=np.int)
feature_size = 3
batch_size = 2
# Define hyperparameters
epoch_number = 10
learning_rate = 0.01
weights = np.ones(feature_size)
# Start training
for epoch_index in range(epoch_number):
print("Start the epoch: {}".format(epoch_index))
''' Implement with batch size
# [2, 3] = [3] * [2, 3]
multiple_weights_result = weights * train_features
# [2] = [2, 3]
predict = np.sum(multiple_weights_result, 1)
# [2] = [2]
sigmoid_predict = sigmoid(predict)
# [2] = [2]
predict_difference = train_labels - sigmoid_predict
# TODO: [2, 3, 1] = [2, 3] * [2]
batch_grad = train_features * predict_difference
# TODO: fix that
grad = batch_grad
# [3, 1] = [3, 1]
weights += learning_rate * grad
'''
# Train with single example
train_features = np.array([1, 0, 25], dtype=np.float)
train_labels = np.array([0], dtype=np.int)
# [3] = [3] * [3]
multiple_weights_result = train_features * weights
# [1] = [3]
predict = np.sum(multiple_weights_result)
# [1] = [1]
sigmoid_predict = sigmoid(predict)
# [1] = [1]
predict_difference = train_labels - sigmoid_predict
# [3] = [3] * [1]
grad = train_features * predict_difference
# [3] = [3]
weights += learning_rate * grad
print("Current weights is: {}".format(weights))
# TODO: Predict with validate dataset
predict_true_probability = sigmoid(np.sum(train_features * weights))
print("Current predict true probability is: {}".format(
predict_true_probability))
likehood = 1 - predict_true_probability
print("Current likehood is: {}\n".format(likehood))
if __name__ == "__main__":
main()
|
|
8d3c6d208027d4e19592a3e849f4ec2700722613
|
tests/test_add_language/test_main.py
|
tests/test_add_language/test_main.py
|
# tests.test_add_language.test_save_bible_data
# coding=utf-8
from __future__ import unicode_literals
import nose.tools as nose
from mock import patch
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@patch('utilities.add_language.update_language_list')
@patch('utilities.add_language.save_bible_data')
@patch('utilities.add_language.get_bible_data', return_value={})
@patch('utilities.add_language.get_language_name', return_value='Swedish')
@redirect_stdout
def test_add_language(out, get_language_name, get_bible_data, save_bible_data,
update_language_list):
"""should perform all necessary steps to add a language"""
language_id = 'swe'
default_version = 33
max_version_id = 500
add_lang.add_language(
language_id, default_version, max_version_id)
get_language_name.assert_called_once_with(language_id)
get_bible_data.assert_called_once_with(
language_id, default_version, max_version_id)
update_language_list.assert_called_once_with(
language_id, get_language_name.return_value)
@patch('sys.argv', [add_lang.__file__, 'swe',
'--default-version', '33', '--max-version-id', '500'])
def test_parse_cli_args():
"""should parse command line arguments"""
cli_args = add_lang.parse_cli_args()
nose.assert_equal(cli_args.language_id, 'swe')
nose.assert_equal(cli_args.default_version, 33)
nose.assert_equal(cli_args.max_version_id, 500)
|
Add initial tests for main add_language functions
|
Add initial tests for main add_language functions
|
Python
|
mit
|
caleb531/youversion-suggest,caleb531/youversion-suggest
|
Add initial tests for main add_language functions
|
# tests.test_add_language.test_save_bible_data
# coding=utf-8
from __future__ import unicode_literals
import nose.tools as nose
from mock import patch
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@patch('utilities.add_language.update_language_list')
@patch('utilities.add_language.save_bible_data')
@patch('utilities.add_language.get_bible_data', return_value={})
@patch('utilities.add_language.get_language_name', return_value='Swedish')
@redirect_stdout
def test_add_language(out, get_language_name, get_bible_data, save_bible_data,
update_language_list):
"""should perform all necessary steps to add a language"""
language_id = 'swe'
default_version = 33
max_version_id = 500
add_lang.add_language(
language_id, default_version, max_version_id)
get_language_name.assert_called_once_with(language_id)
get_bible_data.assert_called_once_with(
language_id, default_version, max_version_id)
update_language_list.assert_called_once_with(
language_id, get_language_name.return_value)
@patch('sys.argv', [add_lang.__file__, 'swe',
'--default-version', '33', '--max-version-id', '500'])
def test_parse_cli_args():
"""should parse command line arguments"""
cli_args = add_lang.parse_cli_args()
nose.assert_equal(cli_args.language_id, 'swe')
nose.assert_equal(cli_args.default_version, 33)
nose.assert_equal(cli_args.max_version_id, 500)
|
<commit_before><commit_msg>Add initial tests for main add_language functions<commit_after>
|
# tests.test_add_language.test_save_bible_data
# coding=utf-8
from __future__ import unicode_literals
import nose.tools as nose
from mock import patch
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@patch('utilities.add_language.update_language_list')
@patch('utilities.add_language.save_bible_data')
@patch('utilities.add_language.get_bible_data', return_value={})
@patch('utilities.add_language.get_language_name', return_value='Swedish')
@redirect_stdout
def test_add_language(out, get_language_name, get_bible_data, save_bible_data,
update_language_list):
"""should perform all necessary steps to add a language"""
language_id = 'swe'
default_version = 33
max_version_id = 500
add_lang.add_language(
language_id, default_version, max_version_id)
get_language_name.assert_called_once_with(language_id)
get_bible_data.assert_called_once_with(
language_id, default_version, max_version_id)
update_language_list.assert_called_once_with(
language_id, get_language_name.return_value)
@patch('sys.argv', [add_lang.__file__, 'swe',
'--default-version', '33', '--max-version-id', '500'])
def test_parse_cli_args():
"""should parse command line arguments"""
cli_args = add_lang.parse_cli_args()
nose.assert_equal(cli_args.language_id, 'swe')
nose.assert_equal(cli_args.default_version, 33)
nose.assert_equal(cli_args.max_version_id, 500)
|
Add initial tests for main add_language functions# tests.test_add_language.test_save_bible_data
# coding=utf-8
from __future__ import unicode_literals
import nose.tools as nose
from mock import patch
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@patch('utilities.add_language.update_language_list')
@patch('utilities.add_language.save_bible_data')
@patch('utilities.add_language.get_bible_data', return_value={})
@patch('utilities.add_language.get_language_name', return_value='Swedish')
@redirect_stdout
def test_add_language(out, get_language_name, get_bible_data, save_bible_data,
update_language_list):
"""should perform all necessary steps to add a language"""
language_id = 'swe'
default_version = 33
max_version_id = 500
add_lang.add_language(
language_id, default_version, max_version_id)
get_language_name.assert_called_once_with(language_id)
get_bible_data.assert_called_once_with(
language_id, default_version, max_version_id)
update_language_list.assert_called_once_with(
language_id, get_language_name.return_value)
@patch('sys.argv', [add_lang.__file__, 'swe',
'--default-version', '33', '--max-version-id', '500'])
def test_parse_cli_args():
"""should parse command line arguments"""
cli_args = add_lang.parse_cli_args()
nose.assert_equal(cli_args.language_id, 'swe')
nose.assert_equal(cli_args.default_version, 33)
nose.assert_equal(cli_args.max_version_id, 500)
|
<commit_before><commit_msg>Add initial tests for main add_language functions<commit_after># tests.test_add_language.test_save_bible_data
# coding=utf-8
from __future__ import unicode_literals
import nose.tools as nose
from mock import patch
import utilities.add_language as add_lang
from tests.test_add_language import set_up, tear_down
from tests.test_add_language.decorators import redirect_stdout
@nose.with_setup(set_up, tear_down)
@patch('utilities.add_language.update_language_list')
@patch('utilities.add_language.save_bible_data')
@patch('utilities.add_language.get_bible_data', return_value={})
@patch('utilities.add_language.get_language_name', return_value='Swedish')
@redirect_stdout
def test_add_language(out, get_language_name, get_bible_data, save_bible_data,
update_language_list):
"""should perform all necessary steps to add a language"""
language_id = 'swe'
default_version = 33
max_version_id = 500
add_lang.add_language(
language_id, default_version, max_version_id)
get_language_name.assert_called_once_with(language_id)
get_bible_data.assert_called_once_with(
language_id, default_version, max_version_id)
update_language_list.assert_called_once_with(
language_id, get_language_name.return_value)
@patch('sys.argv', [add_lang.__file__, 'swe',
'--default-version', '33', '--max-version-id', '500'])
def test_parse_cli_args():
"""should parse command line arguments"""
cli_args = add_lang.parse_cli_args()
nose.assert_equal(cli_args.language_id, 'swe')
nose.assert_equal(cli_args.default_version, 33)
nose.assert_equal(cli_args.max_version_id, 500)
|
|
7418fb63eda14f277bcfb66cea27838736d0f43a
|
tools/compare_branches.py
|
tools/compare_branches.py
|
#!/usr/bin/env python
import argparse
import subprocess
from collections import OrderedDict
def main():
args = get_args()
local_commits = get_local_commits()
other_commits = get_other_commits(args.branch, args.path)
local_subjects = set(local_commits.values())
for commitish, subject in other_commits.items():
if subject not in local_subjects:
print('%s %s' % (commitish, subject))
def get_local_commits():
cmd = [
'git',
'log',
'--format=%h %s',
'--no-merges',
'--max-count=2000',
]
return parse_commits(subprocess.check_output(cmd))
def get_other_commits(branch, path):
cmd = [
'git',
'log',
'..%s' % branch,
'--no-merges',
'--format=%h %s',
'--max-count=2000',
]
if path:
cmd.append('--')
cmd.append(path)
return parse_commits(subprocess.check_output(cmd))
def parse_commits(git_log_output):
commits = OrderedDict()
for line in git_log_output.split('\n'):
line = line.strip()
if not line:
continue
commitish, subject = line.split(' ', 1)
commits[commitish] = subject
return commits
def get_args():
parser = argparse.ArgumentParser(description='Find commits on a given branch missing '
'from the current branch')
parser.add_argument('branch')
parser.add_argument('path', nargs='?', help='Restrict search by path')
return parser.parse_args()
if __name__ == '__main__':
main()
|
Add helper to compare branches
|
Add helper to compare branches
|
Python
|
mit
|
thusoy/salt-states,thusoy/salt-states,thusoy/salt-states,thusoy/salt-states
|
Add helper to compare branches
|
#!/usr/bin/env python
import argparse
import subprocess
from collections import OrderedDict
def main():
args = get_args()
local_commits = get_local_commits()
other_commits = get_other_commits(args.branch, args.path)
local_subjects = set(local_commits.values())
for commitish, subject in other_commits.items():
if subject not in local_subjects:
print('%s %s' % (commitish, subject))
def get_local_commits():
cmd = [
'git',
'log',
'--format=%h %s',
'--no-merges',
'--max-count=2000',
]
return parse_commits(subprocess.check_output(cmd))
def get_other_commits(branch, path):
cmd = [
'git',
'log',
'..%s' % branch,
'--no-merges',
'--format=%h %s',
'--max-count=2000',
]
if path:
cmd.append('--')
cmd.append(path)
return parse_commits(subprocess.check_output(cmd))
def parse_commits(git_log_output):
commits = OrderedDict()
for line in git_log_output.split('\n'):
line = line.strip()
if not line:
continue
commitish, subject = line.split(' ', 1)
commits[commitish] = subject
return commits
def get_args():
parser = argparse.ArgumentParser(description='Find commits on a given branch missing '
'from the current branch')
parser.add_argument('branch')
parser.add_argument('path', nargs='?', help='Restrict search by path')
return parser.parse_args()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add helper to compare branches<commit_after>
|
#!/usr/bin/env python
import argparse
import subprocess
from collections import OrderedDict
def main():
args = get_args()
local_commits = get_local_commits()
other_commits = get_other_commits(args.branch, args.path)
local_subjects = set(local_commits.values())
for commitish, subject in other_commits.items():
if subject not in local_subjects:
print('%s %s' % (commitish, subject))
def get_local_commits():
cmd = [
'git',
'log',
'--format=%h %s',
'--no-merges',
'--max-count=2000',
]
return parse_commits(subprocess.check_output(cmd))
def get_other_commits(branch, path):
cmd = [
'git',
'log',
'..%s' % branch,
'--no-merges',
'--format=%h %s',
'--max-count=2000',
]
if path:
cmd.append('--')
cmd.append(path)
return parse_commits(subprocess.check_output(cmd))
def parse_commits(git_log_output):
commits = OrderedDict()
for line in git_log_output.split('\n'):
line = line.strip()
if not line:
continue
commitish, subject = line.split(' ', 1)
commits[commitish] = subject
return commits
def get_args():
parser = argparse.ArgumentParser(description='Find commits on a given branch missing '
'from the current branch')
parser.add_argument('branch')
parser.add_argument('path', nargs='?', help='Restrict search by path')
return parser.parse_args()
if __name__ == '__main__':
main()
|
Add helper to compare branches#!/usr/bin/env python
import argparse
import subprocess
from collections import OrderedDict
def main():
args = get_args()
local_commits = get_local_commits()
other_commits = get_other_commits(args.branch, args.path)
local_subjects = set(local_commits.values())
for commitish, subject in other_commits.items():
if subject not in local_subjects:
print('%s %s' % (commitish, subject))
def get_local_commits():
cmd = [
'git',
'log',
'--format=%h %s',
'--no-merges',
'--max-count=2000',
]
return parse_commits(subprocess.check_output(cmd))
def get_other_commits(branch, path):
cmd = [
'git',
'log',
'..%s' % branch,
'--no-merges',
'--format=%h %s',
'--max-count=2000',
]
if path:
cmd.append('--')
cmd.append(path)
return parse_commits(subprocess.check_output(cmd))
def parse_commits(git_log_output):
commits = OrderedDict()
for line in git_log_output.split('\n'):
line = line.strip()
if not line:
continue
commitish, subject = line.split(' ', 1)
commits[commitish] = subject
return commits
def get_args():
parser = argparse.ArgumentParser(description='Find commits on a given branch missing '
'from the current branch')
parser.add_argument('branch')
parser.add_argument('path', nargs='?', help='Restrict search by path')
return parser.parse_args()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add helper to compare branches<commit_after>#!/usr/bin/env python
import argparse
import subprocess
from collections import OrderedDict
def main():
args = get_args()
local_commits = get_local_commits()
other_commits = get_other_commits(args.branch, args.path)
local_subjects = set(local_commits.values())
for commitish, subject in other_commits.items():
if subject not in local_subjects:
print('%s %s' % (commitish, subject))
def get_local_commits():
cmd = [
'git',
'log',
'--format=%h %s',
'--no-merges',
'--max-count=2000',
]
return parse_commits(subprocess.check_output(cmd))
def get_other_commits(branch, path):
cmd = [
'git',
'log',
'..%s' % branch,
'--no-merges',
'--format=%h %s',
'--max-count=2000',
]
if path:
cmd.append('--')
cmd.append(path)
return parse_commits(subprocess.check_output(cmd))
def parse_commits(git_log_output):
commits = OrderedDict()
for line in git_log_output.split('\n'):
line = line.strip()
if not line:
continue
commitish, subject = line.split(' ', 1)
commits[commitish] = subject
return commits
def get_args():
parser = argparse.ArgumentParser(description='Find commits on a given branch missing '
'from the current branch')
parser.add_argument('branch')
parser.add_argument('path', nargs='?', help='Restrict search by path')
return parser.parse_args()
if __name__ == '__main__':
main()
|
|
4a44069abbb88f29a1fa236511f2f31c10564cad
|
strong_typing/versioned_struct.py
|
strong_typing/versioned_struct.py
|
# -*- coding: utf-8 -*-
# Standard library
from distutils.version import StrictVersion
from copy import deepcopy
from struct import StructMeta, Struct
class VersionedStructMeta(StructMeta):
def __init__(cls, name, bases, attrs, **kwargs):
docu = """
%s (current version: %s):
Description: %s
:Parameters:
"""%(name, cls.__VERSION__, cls.__DESCRIPTION__)
for parameter, version in zip(cls.__ATTRIBUTES__,cls.__ATT_VERSIONS__):
docu += """
``%s`` %s
%s
Default: %s
"""%(parameter.id,\
"" if version is None else "(appeared in version %s)"%version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
docu+="""
:Deprecated parameters:
"""
for parameter, first_version, last_version in cls.__DEPRECATED_ATT_N_VERSIONS__:
docu += """
``%s``%s%s
%s
Default: %s
"""%(parameter.id,\
"" if first_version is None else " (appeared in version %s)"%version,\
" (deprecated since version %s)"%last_version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
cls.__doc__ = docu
@property
def version(cls):
return StrictVersion(cls.__VERSION__)
class VersionedStruct(Struct):
__metaclass__= VersionedStructMeta
__VERSION__="1.0"
__DESCRIPTION__ = ""
__ATTRIBUTES__ = []
__ATT_VERSIONS__ = []
__DEPRECATED_ATT_N_VERSIONS__ = []
@classmethod
def fromDict(cls, data=dict()):
if data.has_key("version"):
version = StrictVersion(data.pop("version"))
if version < cls.version:
return cls._fromOldDict(data, version)
return cls(**data)
@property
def version(self):
return StrictVersion(self.__VERSION__)
|
Add the untested, undocumented feature: VersionedStruct
|
Add the untested, undocumented feature: VersionedStruct
|
Python
|
bsd-3-clause
|
aldebaran/strong_typing
|
Add the untested, undocumented feature: VersionedStruct
|
# -*- coding: utf-8 -*-
# Standard library
from distutils.version import StrictVersion
from copy import deepcopy
from struct import StructMeta, Struct
class VersionedStructMeta(StructMeta):
def __init__(cls, name, bases, attrs, **kwargs):
docu = """
%s (current version: %s):
Description: %s
:Parameters:
"""%(name, cls.__VERSION__, cls.__DESCRIPTION__)
for parameter, version in zip(cls.__ATTRIBUTES__,cls.__ATT_VERSIONS__):
docu += """
``%s`` %s
%s
Default: %s
"""%(parameter.id,\
"" if version is None else "(appeared in version %s)"%version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
docu+="""
:Deprecated parameters:
"""
for parameter, first_version, last_version in cls.__DEPRECATED_ATT_N_VERSIONS__:
docu += """
``%s``%s%s
%s
Default: %s
"""%(parameter.id,\
"" if first_version is None else " (appeared in version %s)"%version,\
" (deprecated since version %s)"%last_version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
cls.__doc__ = docu
@property
def version(cls):
return StrictVersion(cls.__VERSION__)
class VersionedStruct(Struct):
__metaclass__= VersionedStructMeta
__VERSION__="1.0"
__DESCRIPTION__ = ""
__ATTRIBUTES__ = []
__ATT_VERSIONS__ = []
__DEPRECATED_ATT_N_VERSIONS__ = []
@classmethod
def fromDict(cls, data=dict()):
if data.has_key("version"):
version = StrictVersion(data.pop("version"))
if version < cls.version:
return cls._fromOldDict(data, version)
return cls(**data)
@property
def version(self):
return StrictVersion(self.__VERSION__)
|
<commit_before><commit_msg>Add the untested, undocumented feature: VersionedStruct<commit_after>
|
# -*- coding: utf-8 -*-
# Standard library
from distutils.version import StrictVersion
from copy import deepcopy
from struct import StructMeta, Struct
class VersionedStructMeta(StructMeta):
def __init__(cls, name, bases, attrs, **kwargs):
docu = """
%s (current version: %s):
Description: %s
:Parameters:
"""%(name, cls.__VERSION__, cls.__DESCRIPTION__)
for parameter, version in zip(cls.__ATTRIBUTES__,cls.__ATT_VERSIONS__):
docu += """
``%s`` %s
%s
Default: %s
"""%(parameter.id,\
"" if version is None else "(appeared in version %s)"%version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
docu+="""
:Deprecated parameters:
"""
for parameter, first_version, last_version in cls.__DEPRECATED_ATT_N_VERSIONS__:
docu += """
``%s``%s%s
%s
Default: %s
"""%(parameter.id,\
"" if first_version is None else " (appeared in version %s)"%version,\
" (deprecated since version %s)"%last_version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
cls.__doc__ = docu
@property
def version(cls):
return StrictVersion(cls.__VERSION__)
class VersionedStruct(Struct):
__metaclass__= VersionedStructMeta
__VERSION__="1.0"
__DESCRIPTION__ = ""
__ATTRIBUTES__ = []
__ATT_VERSIONS__ = []
__DEPRECATED_ATT_N_VERSIONS__ = []
@classmethod
def fromDict(cls, data=dict()):
if data.has_key("version"):
version = StrictVersion(data.pop("version"))
if version < cls.version:
return cls._fromOldDict(data, version)
return cls(**data)
@property
def version(self):
return StrictVersion(self.__VERSION__)
|
Add the untested, undocumented feature: VersionedStruct# -*- coding: utf-8 -*-
# Standard library
from distutils.version import StrictVersion
from copy import deepcopy
from struct import StructMeta, Struct
class VersionedStructMeta(StructMeta):
def __init__(cls, name, bases, attrs, **kwargs):
docu = """
%s (current version: %s):
Description: %s
:Parameters:
"""%(name, cls.__VERSION__, cls.__DESCRIPTION__)
for parameter, version in zip(cls.__ATTRIBUTES__,cls.__ATT_VERSIONS__):
docu += """
``%s`` %s
%s
Default: %s
"""%(parameter.id,\
"" if version is None else "(appeared in version %s)"%version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
docu+="""
:Deprecated parameters:
"""
for parameter, first_version, last_version in cls.__DEPRECATED_ATT_N_VERSIONS__:
docu += """
``%s``%s%s
%s
Default: %s
"""%(parameter.id,\
"" if first_version is None else " (appeared in version %s)"%version,\
" (deprecated since version %s)"%last_version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
cls.__doc__ = docu
@property
def version(cls):
return StrictVersion(cls.__VERSION__)
class VersionedStruct(Struct):
__metaclass__= VersionedStructMeta
__VERSION__="1.0"
__DESCRIPTION__ = ""
__ATTRIBUTES__ = []
__ATT_VERSIONS__ = []
__DEPRECATED_ATT_N_VERSIONS__ = []
@classmethod
def fromDict(cls, data=dict()):
if data.has_key("version"):
version = StrictVersion(data.pop("version"))
if version < cls.version:
return cls._fromOldDict(data, version)
return cls(**data)
@property
def version(self):
return StrictVersion(self.__VERSION__)
|
<commit_before><commit_msg>Add the untested, undocumented feature: VersionedStruct<commit_after># -*- coding: utf-8 -*-
# Standard library
from distutils.version import StrictVersion
from copy import deepcopy
from struct import StructMeta, Struct
class VersionedStructMeta(StructMeta):
def __init__(cls, name, bases, attrs, **kwargs):
docu = """
%s (current version: %s):
Description: %s
:Parameters:
"""%(name, cls.__VERSION__, cls.__DESCRIPTION__)
for parameter, version in zip(cls.__ATTRIBUTES__,cls.__ATT_VERSIONS__):
docu += """
``%s`` %s
%s
Default: %s
"""%(parameter.id,\
"" if version is None else "(appeared in version %s)"%version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
docu+="""
:Deprecated parameters:
"""
for parameter, first_version, last_version in cls.__DEPRECATED_ATT_N_VERSIONS__:
docu += """
``%s``%s%s
%s
Default: %s
"""%(parameter.id,\
"" if first_version is None else " (appeared in version %s)"%version,\
" (deprecated since version %s)"%last_version,\
parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
cls.__doc__ = docu
@property
def version(cls):
return StrictVersion(cls.__VERSION__)
class VersionedStruct(Struct):
__metaclass__= VersionedStructMeta
__VERSION__="1.0"
__DESCRIPTION__ = ""
__ATTRIBUTES__ = []
__ATT_VERSIONS__ = []
__DEPRECATED_ATT_N_VERSIONS__ = []
@classmethod
def fromDict(cls, data=dict()):
if data.has_key("version"):
version = StrictVersion(data.pop("version"))
if version < cls.version:
return cls._fromOldDict(data, version)
return cls(**data)
@property
def version(self):
return StrictVersion(self.__VERSION__)
|
|
deffe34fe1c9255697d37390a95b157b9b068923
|
dcore/linear_solvers.py
|
dcore/linear_solvers.py
|
class TimesteppingSolver(object):
"""
Base class for timestepping linear solvers for dcore.
This is a dummy base class where the input is just copied to the output.
:arg state: x_in :class:`.Function` object for the input
:arg state: x_out :class:`.Function` object for the output
"""
def __init__(x_in, x_out):
self.x_in = x_in
self.x_out = x_out
def solve():
"""
Function to execute the solver.
"""
#This is a base class so we just copy x_in to x_out
self.x_out.assign(x_in)
|
Create a base class for timestepping solvers
|
Create a base class for timestepping solvers
|
Python
|
mit
|
firedrakeproject/dcore,firedrakeproject/gusto
|
Create a base class for timestepping solvers
|
class TimesteppingSolver(object):
"""
Base class for timestepping linear solvers for dcore.
This is a dummy base class where the input is just copied to the output.
:arg state: x_in :class:`.Function` object for the input
:arg state: x_out :class:`.Function` object for the output
"""
def __init__(x_in, x_out):
self.x_in = x_in
self.x_out = x_out
def solve():
"""
Function to execute the solver.
"""
#This is a base class so we just copy x_in to x_out
self.x_out.assign(x_in)
|
<commit_before><commit_msg>Create a base class for timestepping solvers<commit_after>
|
class TimesteppingSolver(object):
"""
Base class for timestepping linear solvers for dcore.
This is a dummy base class where the input is just copied to the output.
:arg state: x_in :class:`.Function` object for the input
:arg state: x_out :class:`.Function` object for the output
"""
def __init__(x_in, x_out):
self.x_in = x_in
self.x_out = x_out
def solve():
"""
Function to execute the solver.
"""
#This is a base class so we just copy x_in to x_out
self.x_out.assign(x_in)
|
Create a base class for timestepping solversclass TimesteppingSolver(object):
"""
Base class for timestepping linear solvers for dcore.
This is a dummy base class where the input is just copied to the output.
:arg state: x_in :class:`.Function` object for the input
:arg state: x_out :class:`.Function` object for the output
"""
def __init__(x_in, x_out):
self.x_in = x_in
self.x_out = x_out
def solve():
"""
Function to execute the solver.
"""
#This is a base class so we just copy x_in to x_out
self.x_out.assign(x_in)
|
<commit_before><commit_msg>Create a base class for timestepping solvers<commit_after>class TimesteppingSolver(object):
"""
Base class for timestepping linear solvers for dcore.
This is a dummy base class where the input is just copied to the output.
:arg state: x_in :class:`.Function` object for the input
:arg state: x_out :class:`.Function` object for the output
"""
def __init__(x_in, x_out):
self.x_in = x_in
self.x_out = x_out
def solve():
"""
Function to execute the solver.
"""
#This is a base class so we just copy x_in to x_out
self.x_out.assign(x_in)
|
|
3ac421e5adcd5780985a6598acb60b8f0447f048
|
src/scripts/build_tests.py
|
src/scripts/build_tests.py
|
#!/usr/bin/python3
"""
This configures and builds with many different sub-configurations
in an attempt to flush out missing feature macro checks, etc.
There is probably no reason for you to run this. Unless you want to.
(C) 2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import sys
import subprocess
def get_module_list(configure_py):
configure = subprocess.Popen([configure_py, '--list-modules'], stdout=subprocess.PIPE)
(stdout, _) = configure.communicate()
if configure.returncode != 0:
raise Exception("Running configure.py --list-modules failed")
modules = [s.decode('ascii') for s in stdout.split()]
return modules
def get_concurrency():
def_concurrency = 2
try:
import multiprocessing
return max(def_concurrency, multiprocessing.cpu_count())
except ImportError:
return def_concurrency
def run_test_build(configure_py, modules):
cmdline = [configure_py, '--minimized']
if modules:
cmdline.append('--enable-modules=' + ','.join(modules))
print("Testing", cmdline)
configure = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
configure.communicate()
if configure.returncode != 0:
raise Exception("Running %s failed" % (' '.join(cmdline)))
make = subprocess.Popen(['make', '-j', str(get_concurrency())],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = make.communicate()
if make.returncode != 0:
print("Build failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
tests = subprocess.Popen(['./botan-test'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = tests.communicate()
if tests.returncode != 0:
print("Tests failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
sys.stdout.flush()
def main(args):
# TODO take configure.py and botan-test paths via options
configure_py = './configure.py'
modules = get_module_list(configure_py)
for module in sorted(modules):
if module in ['bearssl']:
continue
extra = ['sha2_32', 'sha2_64', 'aes']
if module == 'auto_rng':
extra.append('dev_random')
run_test_build(configure_py, [module] + extra)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script that runs build+test across many configs, reports errors
|
Add script that runs build+test across many configs, reports errors
|
Python
|
bsd-2-clause
|
Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,randombit/botan,webmaster128/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan
|
Add script that runs build+test across many configs, reports errors
|
#!/usr/bin/python3
"""
This configures and builds with many different sub-configurations
in an attempt to flush out missing feature macro checks, etc.
There is probably no reason for you to run this. Unless you want to.
(C) 2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import sys
import subprocess
def get_module_list(configure_py):
configure = subprocess.Popen([configure_py, '--list-modules'], stdout=subprocess.PIPE)
(stdout, _) = configure.communicate()
if configure.returncode != 0:
raise Exception("Running configure.py --list-modules failed")
modules = [s.decode('ascii') for s in stdout.split()]
return modules
def get_concurrency():
def_concurrency = 2
try:
import multiprocessing
return max(def_concurrency, multiprocessing.cpu_count())
except ImportError:
return def_concurrency
def run_test_build(configure_py, modules):
cmdline = [configure_py, '--minimized']
if modules:
cmdline.append('--enable-modules=' + ','.join(modules))
print("Testing", cmdline)
configure = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
configure.communicate()
if configure.returncode != 0:
raise Exception("Running %s failed" % (' '.join(cmdline)))
make = subprocess.Popen(['make', '-j', str(get_concurrency())],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = make.communicate()
if make.returncode != 0:
print("Build failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
tests = subprocess.Popen(['./botan-test'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = tests.communicate()
if tests.returncode != 0:
print("Tests failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
sys.stdout.flush()
def main(args):
# TODO take configure.py and botan-test paths via options
configure_py = './configure.py'
modules = get_module_list(configure_py)
for module in sorted(modules):
if module in ['bearssl']:
continue
extra = ['sha2_32', 'sha2_64', 'aes']
if module == 'auto_rng':
extra.append('dev_random')
run_test_build(configure_py, [module] + extra)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script that runs build+test across many configs, reports errors<commit_after>
|
#!/usr/bin/python3
"""
This configures and builds with many different sub-configurations
in an attempt to flush out missing feature macro checks, etc.
There is probably no reason for you to run this. Unless you want to.
(C) 2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import sys
import subprocess
def get_module_list(configure_py):
configure = subprocess.Popen([configure_py, '--list-modules'], stdout=subprocess.PIPE)
(stdout, _) = configure.communicate()
if configure.returncode != 0:
raise Exception("Running configure.py --list-modules failed")
modules = [s.decode('ascii') for s in stdout.split()]
return modules
def get_concurrency():
def_concurrency = 2
try:
import multiprocessing
return max(def_concurrency, multiprocessing.cpu_count())
except ImportError:
return def_concurrency
def run_test_build(configure_py, modules):
cmdline = [configure_py, '--minimized']
if modules:
cmdline.append('--enable-modules=' + ','.join(modules))
print("Testing", cmdline)
configure = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
configure.communicate()
if configure.returncode != 0:
raise Exception("Running %s failed" % (' '.join(cmdline)))
make = subprocess.Popen(['make', '-j', str(get_concurrency())],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = make.communicate()
if make.returncode != 0:
print("Build failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
tests = subprocess.Popen(['./botan-test'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = tests.communicate()
if tests.returncode != 0:
print("Tests failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
sys.stdout.flush()
def main(args):
# TODO take configure.py and botan-test paths via options
configure_py = './configure.py'
modules = get_module_list(configure_py)
for module in sorted(modules):
if module in ['bearssl']:
continue
extra = ['sha2_32', 'sha2_64', 'aes']
if module == 'auto_rng':
extra.append('dev_random')
run_test_build(configure_py, [module] + extra)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add script that runs build+test across many configs, reports errors#!/usr/bin/python3
"""
This configures and builds with many different sub-configurations
in an attempt to flush out missing feature macro checks, etc.
There is probably no reason for you to run this. Unless you want to.
(C) 2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import sys
import subprocess
def get_module_list(configure_py):
configure = subprocess.Popen([configure_py, '--list-modules'], stdout=subprocess.PIPE)
(stdout, _) = configure.communicate()
if configure.returncode != 0:
raise Exception("Running configure.py --list-modules failed")
modules = [s.decode('ascii') for s in stdout.split()]
return modules
def get_concurrency():
def_concurrency = 2
try:
import multiprocessing
return max(def_concurrency, multiprocessing.cpu_count())
except ImportError:
return def_concurrency
def run_test_build(configure_py, modules):
cmdline = [configure_py, '--minimized']
if modules:
cmdline.append('--enable-modules=' + ','.join(modules))
print("Testing", cmdline)
configure = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
configure.communicate()
if configure.returncode != 0:
raise Exception("Running %s failed" % (' '.join(cmdline)))
make = subprocess.Popen(['make', '-j', str(get_concurrency())],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = make.communicate()
if make.returncode != 0:
print("Build failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
tests = subprocess.Popen(['./botan-test'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = tests.communicate()
if tests.returncode != 0:
print("Tests failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
sys.stdout.flush()
def main(args):
# TODO take configure.py and botan-test paths via options
configure_py = './configure.py'
modules = get_module_list(configure_py)
for module in sorted(modules):
if module in ['bearssl']:
continue
extra = ['sha2_32', 'sha2_64', 'aes']
if module == 'auto_rng':
extra.append('dev_random')
run_test_build(configure_py, [module] + extra)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script that runs build+test across many configs, reports errors<commit_after>#!/usr/bin/python3
"""
This configures and builds with many different sub-configurations
in an attempt to flush out missing feature macro checks, etc.
There is probably no reason for you to run this. Unless you want to.
(C) 2017 Jack Lloyd
Botan is released under the Simplified BSD License (see license.txt)
"""
import sys
import subprocess
def get_module_list(configure_py):
configure = subprocess.Popen([configure_py, '--list-modules'], stdout=subprocess.PIPE)
(stdout, _) = configure.communicate()
if configure.returncode != 0:
raise Exception("Running configure.py --list-modules failed")
modules = [s.decode('ascii') for s in stdout.split()]
return modules
def get_concurrency():
def_concurrency = 2
try:
import multiprocessing
return max(def_concurrency, multiprocessing.cpu_count())
except ImportError:
return def_concurrency
def run_test_build(configure_py, modules):
cmdline = [configure_py, '--minimized']
if modules:
cmdline.append('--enable-modules=' + ','.join(modules))
print("Testing", cmdline)
configure = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
configure.communicate()
if configure.returncode != 0:
raise Exception("Running %s failed" % (' '.join(cmdline)))
make = subprocess.Popen(['make', '-j', str(get_concurrency())],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = make.communicate()
if make.returncode != 0:
print("Build failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
tests = subprocess.Popen(['./botan-test'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = tests.communicate()
if tests.returncode != 0:
print("Tests failed:")
print(stdout.decode('ascii'))
print(stderr.decode('ascii'))
sys.stdout.flush()
def main(args):
# TODO take configure.py and botan-test paths via options
configure_py = './configure.py'
modules = get_module_list(configure_py)
for module in sorted(modules):
if module in ['bearssl']:
continue
extra = ['sha2_32', 'sha2_64', 'aes']
if module == 'auto_rng':
extra.append('dev_random')
run_test_build(configure_py, [module] + extra)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
06d763cd3a9651a1af785216b8665d09f11c4e1a
|
src/postgres/migrations/0007_fix_user_id_proposal_id_in_review.py
|
src/postgres/migrations/0007_fix_user_id_proposal_id_in_review.py
|
from django.db import migrations
ALTER_REVIEW_PROPOSAL_ID = """
ALTER TABLE "reviews_review" alter COLUMN "proposal_id"
SET DATA TYPE bigint;
"""
ALTER_REVIEW_REVIEWER_ID = """
ALTER TABLE "reviews_review" alter COLUMN "reviewer_id"
SET DATA TYPE bigint;
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0006_fix_user_id_in_permission'),
]
operations = [
migrations.RunSQL(
ALTER_REVIEW_PROPOSAL_ID,
migrations.RunSQL.noop,
),
migrations.RunSQL(
ALTER_REVIEW_REVIEWER_ID,
migrations.RunSQL.noop,
),
]
|
Fix review and user pk in postgresql
|
Fix review and user pk in postgresql
|
Python
|
mit
|
pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016
|
Fix review and user pk in postgresql
|
from django.db import migrations
ALTER_REVIEW_PROPOSAL_ID = """
ALTER TABLE "reviews_review" alter COLUMN "proposal_id"
SET DATA TYPE bigint;
"""
ALTER_REVIEW_REVIEWER_ID = """
ALTER TABLE "reviews_review" alter COLUMN "reviewer_id"
SET DATA TYPE bigint;
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0006_fix_user_id_in_permission'),
]
operations = [
migrations.RunSQL(
ALTER_REVIEW_PROPOSAL_ID,
migrations.RunSQL.noop,
),
migrations.RunSQL(
ALTER_REVIEW_REVIEWER_ID,
migrations.RunSQL.noop,
),
]
|
<commit_before><commit_msg>Fix review and user pk in postgresql<commit_after>
|
from django.db import migrations
ALTER_REVIEW_PROPOSAL_ID = """
ALTER TABLE "reviews_review" alter COLUMN "proposal_id"
SET DATA TYPE bigint;
"""
ALTER_REVIEW_REVIEWER_ID = """
ALTER TABLE "reviews_review" alter COLUMN "reviewer_id"
SET DATA TYPE bigint;
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0006_fix_user_id_in_permission'),
]
operations = [
migrations.RunSQL(
ALTER_REVIEW_PROPOSAL_ID,
migrations.RunSQL.noop,
),
migrations.RunSQL(
ALTER_REVIEW_REVIEWER_ID,
migrations.RunSQL.noop,
),
]
|
Fix review and user pk in postgresqlfrom django.db import migrations
ALTER_REVIEW_PROPOSAL_ID = """
ALTER TABLE "reviews_review" alter COLUMN "proposal_id"
SET DATA TYPE bigint;
"""
ALTER_REVIEW_REVIEWER_ID = """
ALTER TABLE "reviews_review" alter COLUMN "reviewer_id"
SET DATA TYPE bigint;
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0006_fix_user_id_in_permission'),
]
operations = [
migrations.RunSQL(
ALTER_REVIEW_PROPOSAL_ID,
migrations.RunSQL.noop,
),
migrations.RunSQL(
ALTER_REVIEW_REVIEWER_ID,
migrations.RunSQL.noop,
),
]
|
<commit_before><commit_msg>Fix review and user pk in postgresql<commit_after>from django.db import migrations
ALTER_REVIEW_PROPOSAL_ID = """
ALTER TABLE "reviews_review" alter COLUMN "proposal_id"
SET DATA TYPE bigint;
"""
ALTER_REVIEW_REVIEWER_ID = """
ALTER TABLE "reviews_review" alter COLUMN "reviewer_id"
SET DATA TYPE bigint;
"""
class Migration(migrations.Migration):
dependencies = [
('postgres', '0006_fix_user_id_in_permission'),
]
operations = [
migrations.RunSQL(
ALTER_REVIEW_PROPOSAL_ID,
migrations.RunSQL.noop,
),
migrations.RunSQL(
ALTER_REVIEW_REVIEWER_ID,
migrations.RunSQL.noop,
),
]
|
|
634440b794598b8a3c935a1ee6e4fa722d1c1cea
|
direct/src/directtools/DirectGlobals.py
|
direct/src/directtools/DirectGlobals.py
|
from PandaModules import Vec3, Point3
UNPICKABLE = ['x-disc-visible', 'y-disc-visible', 'z-disc-visible',
'gridBack', 'unpickable']
# For linmath operations
X_AXIS = Vec3(1,0,0)
Y_AXIS = Vec3(0,1,0)
Z_AXIS = Vec3(0,0,1)
NEG_X_AXIS = Vec3(-1,0,0)
NEG_Y_AXIS = Vec3(0,-1,0)
NEG_Z_AXIS = Vec3(0,0,-1)
ZERO_VEC = ORIGIN = Vec3(0)
UNIT_VEC = Vec3(1)
ZERO_POINT = Point3(0)
DIRECT_FLASH_DURATION = 1.5
MANIPULATION_MOVE_DELAY = 0.65
Q_EPSILON = 1e-10
DIRECT_NO_MOD = 0
DIRECT_SHIFT_MOD = 1
DIRECT_CONTROL_MOD = 2
DIRECT_ALT_MOD = 4
|
Put all direct globals in one file
|
Put all direct globals in one file
|
Python
|
bsd-3-clause
|
jjkoletar/panda3d,matthiascy/panda3d,mgracer48/panda3d,brakhane/panda3d,grimfang/panda3d,grimfang/panda3d,matthiascy/panda3d,grimfang/panda3d,chandler14362/panda3d,Wilee999/panda3d,cc272309126/panda3d,tobspr/panda3d,chandler14362/panda3d,grimfang/panda3d,hj3938/panda3d,brakhane/panda3d,brakhane/panda3d,Wilee999/panda3d,matthiascy/panda3d,tobspr/panda3d,chandler14362/panda3d,ee08b397/panda3d,brakhane/panda3d,tobspr/panda3d,cc272309126/panda3d,jjkoletar/panda3d,cc272309126/panda3d,Wilee999/panda3d,jjkoletar/panda3d,grimfang/panda3d,grimfang/panda3d,chandler14362/panda3d,tobspr/panda3d,hj3938/panda3d,Wilee999/panda3d,chandler14362/panda3d,jjkoletar/panda3d,jjkoletar/panda3d,hj3938/panda3d,brakhane/panda3d,tobspr/panda3d,matthiascy/panda3d,tobspr/panda3d,grimfang/panda3d,matthiascy/panda3d,mgracer48/panda3d,grimfang/panda3d,mgracer48/panda3d,hj3938/panda3d,mgracer48/panda3d,brakhane/panda3d,matthiascy/panda3d,cc272309126/panda3d,matthiascy/panda3d,cc272309126/panda3d,jjkoletar/panda3d,Wilee999/panda3d,mgracer48/panda3d,chandler14362/panda3d,jjkoletar/panda3d,mgracer48/panda3d,chandler14362/panda3d,grimfang/panda3d,cc272309126/panda3d,Wilee999/panda3d,ee08b397/panda3d,cc272309126/panda3d,hj3938/panda3d,brakhane/panda3d,ee08b397/panda3d,chandler14362/panda3d,mgracer48/panda3d,jjkoletar/panda3d,mgracer48/panda3d,ee08b397/panda3d,ee08b397/panda3d,tobspr/panda3d,ee08b397/panda3d,cc272309126/panda3d,Wilee999/panda3d,tobspr/panda3d,brakhane/panda3d,jjkoletar/panda3d,chandler14362/panda3d,hj3938/panda3d,Wilee999/panda3d,hj3938/panda3d,brakhane/panda3d,cc272309126/panda3d,matthiascy/panda3d,tobspr/panda3d,Wilee999/panda3d,matthiascy/panda3d,tobspr/panda3d,hj3938/panda3d,ee08b397/panda3d,chandler14362/panda3d,grimfang/panda3d,ee08b397/panda3d,mgracer48/panda3d,hj3938/panda3d,ee08b397/panda3d
|
Put all direct globals in one file
|
from PandaModules import Vec3, Point3
UNPICKABLE = ['x-disc-visible', 'y-disc-visible', 'z-disc-visible',
'gridBack', 'unpickable']
# For linmath operations
X_AXIS = Vec3(1,0,0)
Y_AXIS = Vec3(0,1,0)
Z_AXIS = Vec3(0,0,1)
NEG_X_AXIS = Vec3(-1,0,0)
NEG_Y_AXIS = Vec3(0,-1,0)
NEG_Z_AXIS = Vec3(0,0,-1)
ZERO_VEC = ORIGIN = Vec3(0)
UNIT_VEC = Vec3(1)
ZERO_POINT = Point3(0)
DIRECT_FLASH_DURATION = 1.5
MANIPULATION_MOVE_DELAY = 0.65
Q_EPSILON = 1e-10
DIRECT_NO_MOD = 0
DIRECT_SHIFT_MOD = 1
DIRECT_CONTROL_MOD = 2
DIRECT_ALT_MOD = 4
|
<commit_before><commit_msg>Put all direct globals in one file<commit_after>
|
from PandaModules import Vec3, Point3
UNPICKABLE = ['x-disc-visible', 'y-disc-visible', 'z-disc-visible',
'gridBack', 'unpickable']
# For linmath operations
X_AXIS = Vec3(1,0,0)
Y_AXIS = Vec3(0,1,0)
Z_AXIS = Vec3(0,0,1)
NEG_X_AXIS = Vec3(-1,0,0)
NEG_Y_AXIS = Vec3(0,-1,0)
NEG_Z_AXIS = Vec3(0,0,-1)
ZERO_VEC = ORIGIN = Vec3(0)
UNIT_VEC = Vec3(1)
ZERO_POINT = Point3(0)
DIRECT_FLASH_DURATION = 1.5
MANIPULATION_MOVE_DELAY = 0.65
Q_EPSILON = 1e-10
DIRECT_NO_MOD = 0
DIRECT_SHIFT_MOD = 1
DIRECT_CONTROL_MOD = 2
DIRECT_ALT_MOD = 4
|
Put all direct globals in one filefrom PandaModules import Vec3, Point3
UNPICKABLE = ['x-disc-visible', 'y-disc-visible', 'z-disc-visible',
'gridBack', 'unpickable']
# For linmath operations
X_AXIS = Vec3(1,0,0)
Y_AXIS = Vec3(0,1,0)
Z_AXIS = Vec3(0,0,1)
NEG_X_AXIS = Vec3(-1,0,0)
NEG_Y_AXIS = Vec3(0,-1,0)
NEG_Z_AXIS = Vec3(0,0,-1)
ZERO_VEC = ORIGIN = Vec3(0)
UNIT_VEC = Vec3(1)
ZERO_POINT = Point3(0)
DIRECT_FLASH_DURATION = 1.5
MANIPULATION_MOVE_DELAY = 0.65
Q_EPSILON = 1e-10
DIRECT_NO_MOD = 0
DIRECT_SHIFT_MOD = 1
DIRECT_CONTROL_MOD = 2
DIRECT_ALT_MOD = 4
|
<commit_before><commit_msg>Put all direct globals in one file<commit_after>from PandaModules import Vec3, Point3
UNPICKABLE = ['x-disc-visible', 'y-disc-visible', 'z-disc-visible',
'gridBack', 'unpickable']
# For linmath operations
X_AXIS = Vec3(1,0,0)
Y_AXIS = Vec3(0,1,0)
Z_AXIS = Vec3(0,0,1)
NEG_X_AXIS = Vec3(-1,0,0)
NEG_Y_AXIS = Vec3(0,-1,0)
NEG_Z_AXIS = Vec3(0,0,-1)
ZERO_VEC = ORIGIN = Vec3(0)
UNIT_VEC = Vec3(1)
ZERO_POINT = Point3(0)
DIRECT_FLASH_DURATION = 1.5
MANIPULATION_MOVE_DELAY = 0.65
Q_EPSILON = 1e-10
DIRECT_NO_MOD = 0
DIRECT_SHIFT_MOD = 1
DIRECT_CONTROL_MOD = 2
DIRECT_ALT_MOD = 4
|
|
e0e94c2196b6df22763d84564913440f55ad4977
|
RFTOCSV.py
|
RFTOCSV.py
|
import os
import glob
import csv
from string import Template
import sys, traceback
import datetime
from collections import OrderedDict
try :
CSV_FILE_PATH = 'DailyRainTest.csv'
RF_DIR_PATH = './OUTPUT/RF/'
UPPER_CATCHMENT_WEIGHTS = {
'Attanagalla' : 1/7, # 1
'Daraniyagala' : 1/7, # 2
'Glencourse' : 1/7, # 3
'Hanwella' : 1/7, # 4
'Holombuwa' : 1/7, # 5
'Kitulgala' : 1/7, # 6
'Norwood' : 1/7 # 7
}
UPPER_CATCHMENTS = UPPER_CATCHMENT_WEIGHTS.keys()
# now = datetime.datetime.now()
now = datetime.datetime(2017, 3, 22)
date = now.strftime("%Y-%m-%d")
THEISSEN_VALUES = OrderedDict()
for catchment in UPPER_CATCHMENTS :
for filename in glob.glob(os.path.join(RF_DIR_PATH, catchment+date+'*.txt')):
print('Start Operating on ', filename)
csvCatchment = csv.reader(open(filename, 'r'), delimiter=',', skipinitialspace=True)
csvCatchment = list(csvCatchment)
for row in csvCatchment :
# print(row[0].replace('_', ' '), row[1].strip(' \t'))
d = datetime.datetime.strptime(row[0].replace('_', ' '), '%Y-%m-%d %H:%M:%S')
key = d.timestamp()
if key not in THEISSEN_VALUES :
THEISSEN_VALUES[key] = 0
THEISSEN_VALUES[key] += float(row[1].strip(' \t')) * UPPER_CATCHMENT_WEIGHTS[catchment]
print('Finished processing files. Start Writing Theissen polygon avg in to CSV')
# print(THEISSEN_VALUES)
csvWriter = csv.writer(open(CSV_FILE_PATH, 'w'), delimiter=',', quotechar='|')
for avg in THEISSEN_VALUES :
# print(avg, THEISSEN_VALUES[avg])
d = datetime.datetime.fromtimestamp(avg)
csvWriter.writerow([d.strftime('%Y-%m-%d %H:%M:%S'), "%.2f" % THEISSEN_VALUES[avg]])
except Exception as e :
traceback.print_exc()
finally:
print('Completed ', RF_DIR_PATH, ' to ', CSV_FILE_PATH)
|
Read WRF output and write to CSV
|
Read WRF output and write to CSV
- Read data values of Upper Catchment stations and take the avg
using Theissen polygen method and write into CSV file
|
Python
|
apache-2.0
|
gihankarunarathne/udp,gihankarunarathne/udp
|
Read WRF output and write to CSV
- Read data values of Upper Catchment stations and take the avg
using Theissen polygen method and write into CSV file
|
import os
import glob
import csv
from string import Template
import sys, traceback
import datetime
from collections import OrderedDict
try :
CSV_FILE_PATH = 'DailyRainTest.csv'
RF_DIR_PATH = './OUTPUT/RF/'
UPPER_CATCHMENT_WEIGHTS = {
'Attanagalla' : 1/7, # 1
'Daraniyagala' : 1/7, # 2
'Glencourse' : 1/7, # 3
'Hanwella' : 1/7, # 4
'Holombuwa' : 1/7, # 5
'Kitulgala' : 1/7, # 6
'Norwood' : 1/7 # 7
}
UPPER_CATCHMENTS = UPPER_CATCHMENT_WEIGHTS.keys()
# now = datetime.datetime.now()
now = datetime.datetime(2017, 3, 22)
date = now.strftime("%Y-%m-%d")
THEISSEN_VALUES = OrderedDict()
for catchment in UPPER_CATCHMENTS :
for filename in glob.glob(os.path.join(RF_DIR_PATH, catchment+date+'*.txt')):
print('Start Operating on ', filename)
csvCatchment = csv.reader(open(filename, 'r'), delimiter=',', skipinitialspace=True)
csvCatchment = list(csvCatchment)
for row in csvCatchment :
# print(row[0].replace('_', ' '), row[1].strip(' \t'))
d = datetime.datetime.strptime(row[0].replace('_', ' '), '%Y-%m-%d %H:%M:%S')
key = d.timestamp()
if key not in THEISSEN_VALUES :
THEISSEN_VALUES[key] = 0
THEISSEN_VALUES[key] += float(row[1].strip(' \t')) * UPPER_CATCHMENT_WEIGHTS[catchment]
print('Finished processing files. Start Writing Theissen polygon avg in to CSV')
# print(THEISSEN_VALUES)
csvWriter = csv.writer(open(CSV_FILE_PATH, 'w'), delimiter=',', quotechar='|')
for avg in THEISSEN_VALUES :
# print(avg, THEISSEN_VALUES[avg])
d = datetime.datetime.fromtimestamp(avg)
csvWriter.writerow([d.strftime('%Y-%m-%d %H:%M:%S'), "%.2f" % THEISSEN_VALUES[avg]])
except Exception as e :
traceback.print_exc()
finally:
print('Completed ', RF_DIR_PATH, ' to ', CSV_FILE_PATH)
|
<commit_before><commit_msg>Read WRF output and write to CSV
- Read data values of Upper Catchment stations and take the avg
using Theissen polygen method and write into CSV file<commit_after>
|
import os
import glob
import csv
from string import Template
import sys, traceback
import datetime
from collections import OrderedDict
try :
CSV_FILE_PATH = 'DailyRainTest.csv'
RF_DIR_PATH = './OUTPUT/RF/'
UPPER_CATCHMENT_WEIGHTS = {
'Attanagalla' : 1/7, # 1
'Daraniyagala' : 1/7, # 2
'Glencourse' : 1/7, # 3
'Hanwella' : 1/7, # 4
'Holombuwa' : 1/7, # 5
'Kitulgala' : 1/7, # 6
'Norwood' : 1/7 # 7
}
UPPER_CATCHMENTS = UPPER_CATCHMENT_WEIGHTS.keys()
# now = datetime.datetime.now()
now = datetime.datetime(2017, 3, 22)
date = now.strftime("%Y-%m-%d")
THEISSEN_VALUES = OrderedDict()
for catchment in UPPER_CATCHMENTS :
for filename in glob.glob(os.path.join(RF_DIR_PATH, catchment+date+'*.txt')):
print('Start Operating on ', filename)
csvCatchment = csv.reader(open(filename, 'r'), delimiter=',', skipinitialspace=True)
csvCatchment = list(csvCatchment)
for row in csvCatchment :
# print(row[0].replace('_', ' '), row[1].strip(' \t'))
d = datetime.datetime.strptime(row[0].replace('_', ' '), '%Y-%m-%d %H:%M:%S')
key = d.timestamp()
if key not in THEISSEN_VALUES :
THEISSEN_VALUES[key] = 0
THEISSEN_VALUES[key] += float(row[1].strip(' \t')) * UPPER_CATCHMENT_WEIGHTS[catchment]
print('Finished processing files. Start Writing Theissen polygon avg in to CSV')
# print(THEISSEN_VALUES)
csvWriter = csv.writer(open(CSV_FILE_PATH, 'w'), delimiter=',', quotechar='|')
for avg in THEISSEN_VALUES :
# print(avg, THEISSEN_VALUES[avg])
d = datetime.datetime.fromtimestamp(avg)
csvWriter.writerow([d.strftime('%Y-%m-%d %H:%M:%S'), "%.2f" % THEISSEN_VALUES[avg]])
except Exception as e :
traceback.print_exc()
finally:
print('Completed ', RF_DIR_PATH, ' to ', CSV_FILE_PATH)
|
Read WRF output and write to CSV
- Read data values of Upper Catchment stations and take the avg
using Theissen polygen method and write into CSV fileimport os
import glob
import csv
from string import Template
import sys, traceback
import datetime
from collections import OrderedDict
try :
CSV_FILE_PATH = 'DailyRainTest.csv'
RF_DIR_PATH = './OUTPUT/RF/'
UPPER_CATCHMENT_WEIGHTS = {
'Attanagalla' : 1/7, # 1
'Daraniyagala' : 1/7, # 2
'Glencourse' : 1/7, # 3
'Hanwella' : 1/7, # 4
'Holombuwa' : 1/7, # 5
'Kitulgala' : 1/7, # 6
'Norwood' : 1/7 # 7
}
UPPER_CATCHMENTS = UPPER_CATCHMENT_WEIGHTS.keys()
# now = datetime.datetime.now()
now = datetime.datetime(2017, 3, 22)
date = now.strftime("%Y-%m-%d")
THEISSEN_VALUES = OrderedDict()
for catchment in UPPER_CATCHMENTS :
for filename in glob.glob(os.path.join(RF_DIR_PATH, catchment+date+'*.txt')):
print('Start Operating on ', filename)
csvCatchment = csv.reader(open(filename, 'r'), delimiter=',', skipinitialspace=True)
csvCatchment = list(csvCatchment)
for row in csvCatchment :
# print(row[0].replace('_', ' '), row[1].strip(' \t'))
d = datetime.datetime.strptime(row[0].replace('_', ' '), '%Y-%m-%d %H:%M:%S')
key = d.timestamp()
if key not in THEISSEN_VALUES :
THEISSEN_VALUES[key] = 0
THEISSEN_VALUES[key] += float(row[1].strip(' \t')) * UPPER_CATCHMENT_WEIGHTS[catchment]
print('Finished processing files. Start Writing Theissen polygon avg in to CSV')
# print(THEISSEN_VALUES)
csvWriter = csv.writer(open(CSV_FILE_PATH, 'w'), delimiter=',', quotechar='|')
for avg in THEISSEN_VALUES :
# print(avg, THEISSEN_VALUES[avg])
d = datetime.datetime.fromtimestamp(avg)
csvWriter.writerow([d.strftime('%Y-%m-%d %H:%M:%S'), "%.2f" % THEISSEN_VALUES[avg]])
except Exception as e :
traceback.print_exc()
finally:
print('Completed ', RF_DIR_PATH, ' to ', CSV_FILE_PATH)
|
<commit_before><commit_msg>Read WRF output and write to CSV
- Read data values of Upper Catchment stations and take the avg
using Theissen polygen method and write into CSV file<commit_after>import os
import glob
import csv
from string import Template
import sys, traceback
import datetime
from collections import OrderedDict
try :
CSV_FILE_PATH = 'DailyRainTest.csv'
RF_DIR_PATH = './OUTPUT/RF/'
UPPER_CATCHMENT_WEIGHTS = {
'Attanagalla' : 1/7, # 1
'Daraniyagala' : 1/7, # 2
'Glencourse' : 1/7, # 3
'Hanwella' : 1/7, # 4
'Holombuwa' : 1/7, # 5
'Kitulgala' : 1/7, # 6
'Norwood' : 1/7 # 7
}
UPPER_CATCHMENTS = UPPER_CATCHMENT_WEIGHTS.keys()
# now = datetime.datetime.now()
now = datetime.datetime(2017, 3, 22)
date = now.strftime("%Y-%m-%d")
THEISSEN_VALUES = OrderedDict()
for catchment in UPPER_CATCHMENTS :
for filename in glob.glob(os.path.join(RF_DIR_PATH, catchment+date+'*.txt')):
print('Start Operating on ', filename)
csvCatchment = csv.reader(open(filename, 'r'), delimiter=',', skipinitialspace=True)
csvCatchment = list(csvCatchment)
for row in csvCatchment :
# print(row[0].replace('_', ' '), row[1].strip(' \t'))
d = datetime.datetime.strptime(row[0].replace('_', ' '), '%Y-%m-%d %H:%M:%S')
key = d.timestamp()
if key not in THEISSEN_VALUES :
THEISSEN_VALUES[key] = 0
THEISSEN_VALUES[key] += float(row[1].strip(' \t')) * UPPER_CATCHMENT_WEIGHTS[catchment]
print('Finished processing files. Start Writing Theissen polygon avg in to CSV')
# print(THEISSEN_VALUES)
csvWriter = csv.writer(open(CSV_FILE_PATH, 'w'), delimiter=',', quotechar='|')
for avg in THEISSEN_VALUES :
# print(avg, THEISSEN_VALUES[avg])
d = datetime.datetime.fromtimestamp(avg)
csvWriter.writerow([d.strftime('%Y-%m-%d %H:%M:%S'), "%.2f" % THEISSEN_VALUES[avg]])
except Exception as e :
traceback.print_exc()
finally:
print('Completed ', RF_DIR_PATH, ' to ', CSV_FILE_PATH)
|
|
8eeaa69574ee723a4e3d435140814c5776e97055
|
trunk/metpy/bl/sim/mos.py
|
trunk/metpy/bl/sim/mos.py
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*9.81*theta_star(u,v,w,T))
return L
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.constants import g
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*g*theta_star(u,v,w,T))
return L
|
Change to import gravity constant from constants file.
|
Change to import gravity constant from constants file.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@23 150532fb-1d5b-0410-a8ab-efec50f980d4
|
Python
|
bsd-3-clause
|
Unidata/MetPy,dopplershift/MetPy,jrleeman/MetPy,ahaberlie/MetPy,ShawnMurd/MetPy,Unidata/MetPy,ahaberlie/MetPy,deeplycloudy/MetPy,jrleeman/MetPy,ahill818/MetPy,dopplershift/MetPy
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*9.81*theta_star(u,v,w,T))
return L
Change to import gravity constant from constants file.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@23 150532fb-1d5b-0410-a8ab-efec50f980d4
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.constants import g
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*g*theta_star(u,v,w,T))
return L
|
<commit_before>#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*9.81*theta_star(u,v,w,T))
return L
<commit_msg>Change to import gravity constant from constants file.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@23 150532fb-1d5b-0410-a8ab-efec50f980d4<commit_after>
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.constants import g
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*g*theta_star(u,v,w,T))
return L
|
#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*9.81*theta_star(u,v,w,T))
return L
Change to import gravity constant from constants file.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@23 150532fb-1d5b-0410-a8ab-efec50f980d4#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.constants import g
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*g*theta_star(u,v,w,T))
return L
|
<commit_before>#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*9.81*theta_star(u,v,w,T))
return L
<commit_msg>Change to import gravity constant from constants file.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@23 150532fb-1d5b-0410-a8ab-efec50f980d4<commit_after>#!/usr/bin/python
import numpy as N
def u_star(u,v,w):
'''
Compute the friction velocity, u_star, from the timeseries of the velocity \
components u, v, and w (an nD array)
'''
from metpy.bl.turb.fluxes import rs as R
rs = R(u,v,w)
uw = rs[3]
vw = rs[4]
us = N.power(N.power(uw,2)+N.power(vw,2),0.25)
return us
def theta_star(u,v,w,T):
'''
Compute the friction temperature, theta_star, from the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.bl.turb.fluxes import turb_covar as TC
ts = -TC(w,T)/u_star(u,v,w)
return ts
def obu_length(u,v,w,T):
'''
Compute the Obukhov Length, L, using the timeseries of the velocity \
components u, v, and w, and temperature (an nD array)
'''
from metpy.constants import g
L = N.power(u_star(u,v,w),2)*N.average(T)/(0.4*g*theta_star(u,v,w,T))
return L
|
8d00057b474f169de468225afdc72aed9f5baf93
|
vctk/backend/__init__.py
|
vctk/backend/__init__.py
|
# coding: utf-8
from vctk import Analyzer, Synthesizer, SpeechParameters
import world
class WORLD(Analyzer, Synthesizer):
"""
WORLD-based speech analyzer & synthesizer
TODO:
support platinum
"""
def __init__(self,
period=5.0,
fs=44100,
f0_floor=40.0,
f0_ceil=700.0,
channels_in_octave=2,
speed=4
):
self.period = period
self.fs = fs
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.channels_in_octave = channels_in_octave
self.speed = speed
def analyze(self, x):
"""
TODO
"""
opt = world.pyDioOption(self.f0_floor, self.f0_ceil,
self.channels_in_octave,
self.period, self.speed)
f0, time_axis = world.dio(x, self.fs, self.period, opt)
f0 = world.stonemask(x, self.fs, self.period, time_axis, f0)
spectrum_envelope = world.cheaptrick(x, self.fs, self.period,
time_axis, f0)
aperiodicity = world.aperiodicityratio(x, self.fs, self.period,
time_axis, f0)
# TODO
self.time_len = len(x)
return SpeechParameters(f0, spectrum_envelope, aperiodicity)
def synthesis(self, params):
"""
TODO
"""
if not isinstance(params, SpeechParameters):
raise "Not supoprted"
y = world.synthesis_from_aperiodicity(self.fs, self.period,
params.f0,
params.spectrum_envelope,
params.aperiodicity, self.time_len)
return y
|
Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`
|
Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`
|
Python
|
mit
|
k2kobayashi/sprocket
|
Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`
|
# coding: utf-8
from vctk import Analyzer, Synthesizer, SpeechParameters
import world
class WORLD(Analyzer, Synthesizer):
"""
WORLD-based speech analyzer & synthesizer
TODO:
support platinum
"""
def __init__(self,
period=5.0,
fs=44100,
f0_floor=40.0,
f0_ceil=700.0,
channels_in_octave=2,
speed=4
):
self.period = period
self.fs = fs
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.channels_in_octave = channels_in_octave
self.speed = speed
def analyze(self, x):
"""
TODO
"""
opt = world.pyDioOption(self.f0_floor, self.f0_ceil,
self.channels_in_octave,
self.period, self.speed)
f0, time_axis = world.dio(x, self.fs, self.period, opt)
f0 = world.stonemask(x, self.fs, self.period, time_axis, f0)
spectrum_envelope = world.cheaptrick(x, self.fs, self.period,
time_axis, f0)
aperiodicity = world.aperiodicityratio(x, self.fs, self.period,
time_axis, f0)
# TODO
self.time_len = len(x)
return SpeechParameters(f0, spectrum_envelope, aperiodicity)
def synthesis(self, params):
"""
TODO
"""
if not isinstance(params, SpeechParameters):
raise "Not supoprted"
y = world.synthesis_from_aperiodicity(self.fs, self.period,
params.f0,
params.spectrum_envelope,
params.aperiodicity, self.time_len)
return y
|
<commit_before><commit_msg>Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`<commit_after>
|
# coding: utf-8
from vctk import Analyzer, Synthesizer, SpeechParameters
import world
class WORLD(Analyzer, Synthesizer):
"""
WORLD-based speech analyzer & synthesizer
TODO:
support platinum
"""
def __init__(self,
period=5.0,
fs=44100,
f0_floor=40.0,
f0_ceil=700.0,
channels_in_octave=2,
speed=4
):
self.period = period
self.fs = fs
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.channels_in_octave = channels_in_octave
self.speed = speed
def analyze(self, x):
"""
TODO
"""
opt = world.pyDioOption(self.f0_floor, self.f0_ceil,
self.channels_in_octave,
self.period, self.speed)
f0, time_axis = world.dio(x, self.fs, self.period, opt)
f0 = world.stonemask(x, self.fs, self.period, time_axis, f0)
spectrum_envelope = world.cheaptrick(x, self.fs, self.period,
time_axis, f0)
aperiodicity = world.aperiodicityratio(x, self.fs, self.period,
time_axis, f0)
# TODO
self.time_len = len(x)
return SpeechParameters(f0, spectrum_envelope, aperiodicity)
def synthesis(self, params):
"""
TODO
"""
if not isinstance(params, SpeechParameters):
raise "Not supoprted"
y = world.synthesis_from_aperiodicity(self.fs, self.period,
params.f0,
params.spectrum_envelope,
params.aperiodicity, self.time_len)
return y
|
Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`# coding: utf-8
from vctk import Analyzer, Synthesizer, SpeechParameters
import world
class WORLD(Analyzer, Synthesizer):
"""
WORLD-based speech analyzer & synthesizer
TODO:
support platinum
"""
def __init__(self,
period=5.0,
fs=44100,
f0_floor=40.0,
f0_ceil=700.0,
channels_in_octave=2,
speed=4
):
self.period = period
self.fs = fs
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.channels_in_octave = channels_in_octave
self.speed = speed
def analyze(self, x):
"""
TODO
"""
opt = world.pyDioOption(self.f0_floor, self.f0_ceil,
self.channels_in_octave,
self.period, self.speed)
f0, time_axis = world.dio(x, self.fs, self.period, opt)
f0 = world.stonemask(x, self.fs, self.period, time_axis, f0)
spectrum_envelope = world.cheaptrick(x, self.fs, self.period,
time_axis, f0)
aperiodicity = world.aperiodicityratio(x, self.fs, self.period,
time_axis, f0)
# TODO
self.time_len = len(x)
return SpeechParameters(f0, spectrum_envelope, aperiodicity)
def synthesis(self, params):
"""
TODO
"""
if not isinstance(params, SpeechParameters):
raise "Not supoprted"
y = world.synthesis_from_aperiodicity(self.fs, self.period,
params.f0,
params.spectrum_envelope,
params.aperiodicity, self.time_len)
return y
|
<commit_before><commit_msg>Add concrete class `WORLD` that implements `Analyzer` and `Synthesizer`<commit_after># coding: utf-8
from vctk import Analyzer, Synthesizer, SpeechParameters
import world
class WORLD(Analyzer, Synthesizer):
"""
WORLD-based speech analyzer & synthesizer
TODO:
support platinum
"""
def __init__(self,
period=5.0,
fs=44100,
f0_floor=40.0,
f0_ceil=700.0,
channels_in_octave=2,
speed=4
):
self.period = period
self.fs = fs
self.f0_floor = f0_floor
self.f0_ceil = f0_ceil
self.channels_in_octave = channels_in_octave
self.speed = speed
def analyze(self, x):
"""
TODO
"""
opt = world.pyDioOption(self.f0_floor, self.f0_ceil,
self.channels_in_octave,
self.period, self.speed)
f0, time_axis = world.dio(x, self.fs, self.period, opt)
f0 = world.stonemask(x, self.fs, self.period, time_axis, f0)
spectrum_envelope = world.cheaptrick(x, self.fs, self.period,
time_axis, f0)
aperiodicity = world.aperiodicityratio(x, self.fs, self.period,
time_axis, f0)
# TODO
self.time_len = len(x)
return SpeechParameters(f0, spectrum_envelope, aperiodicity)
def synthesis(self, params):
"""
TODO
"""
if not isinstance(params, SpeechParameters):
raise "Not supoprted"
y = world.synthesis_from_aperiodicity(self.fs, self.period,
params.f0,
params.spectrum_envelope,
params.aperiodicity, self.time_len)
return y
|
|
ad5d76d82acec8fe06eec8a8f3bfb9bcfd12a70f
|
alltheitems.py
|
alltheitems.py
|
#!/usr/bin/env python3
"""
Wurstmineberg: All The Items
"""
import bottle
application = bottle.Bottle()
document_root = '/var/www/alltheitems.wurstmineberg.de'
@application.route('/')
def show_index():
"""The index page."""
return bottle.static_file('index.html', root=document_root)
@application.route('/alltheitems.png')
def image_alltheitems():
"""The “Craft ALL the items!” image."""
return bottle.static_file('alltheitems.png', root=document_root)
if __name__ == '__main__':
bottle.run(app=application, host='0.0.0.0', port=8081)
|
Make the website a bottle app
|
Make the website a bottle app
|
Python
|
mit
|
wurstmineberg/alltheitems.wurstmineberg.de,wurstmineberg/alltheitems.wurstmineberg.de
|
Make the website a bottle app
|
#!/usr/bin/env python3
"""
Wurstmineberg: All The Items
"""
import bottle
application = bottle.Bottle()
document_root = '/var/www/alltheitems.wurstmineberg.de'
@application.route('/')
def show_index():
"""The index page."""
return bottle.static_file('index.html', root=document_root)
@application.route('/alltheitems.png')
def image_alltheitems():
"""The “Craft ALL the items!” image."""
return bottle.static_file('alltheitems.png', root=document_root)
if __name__ == '__main__':
bottle.run(app=application, host='0.0.0.0', port=8081)
|
<commit_before><commit_msg>Make the website a bottle app<commit_after>
|
#!/usr/bin/env python3
"""
Wurstmineberg: All The Items
"""
import bottle
application = bottle.Bottle()
document_root = '/var/www/alltheitems.wurstmineberg.de'
@application.route('/')
def show_index():
"""The index page."""
return bottle.static_file('index.html', root=document_root)
@application.route('/alltheitems.png')
def image_alltheitems():
"""The “Craft ALL the items!” image."""
return bottle.static_file('alltheitems.png', root=document_root)
if __name__ == '__main__':
bottle.run(app=application, host='0.0.0.0', port=8081)
|
Make the website a bottle app#!/usr/bin/env python3
"""
Wurstmineberg: All The Items
"""
import bottle
application = bottle.Bottle()
document_root = '/var/www/alltheitems.wurstmineberg.de'
@application.route('/')
def show_index():
"""The index page."""
return bottle.static_file('index.html', root=document_root)
@application.route('/alltheitems.png')
def image_alltheitems():
"""The “Craft ALL the items!” image."""
return bottle.static_file('alltheitems.png', root=document_root)
if __name__ == '__main__':
bottle.run(app=application, host='0.0.0.0', port=8081)
|
<commit_before><commit_msg>Make the website a bottle app<commit_after>#!/usr/bin/env python3
"""
Wurstmineberg: All The Items
"""
import bottle
application = bottle.Bottle()
document_root = '/var/www/alltheitems.wurstmineberg.de'
@application.route('/')
def show_index():
"""The index page."""
return bottle.static_file('index.html', root=document_root)
@application.route('/alltheitems.png')
def image_alltheitems():
"""The “Craft ALL the items!” image."""
return bottle.static_file('alltheitems.png', root=document_root)
if __name__ == '__main__':
bottle.run(app=application, host='0.0.0.0', port=8081)
|
|
3e5e5fe542ed30274604b580cb6b0ff816bcda2a
|
tests/example_peninsula.py
|
tests/example_peninsula.py
|
from parcels import NEMOGrid, Particle
from argparse import ArgumentParser
def pensinsula_example(filename, npart):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input grid file set
:arg npart: Number of particles to intialise"""
# Open grid file set
grid = NEMOGrid(filename)
# Initialise particles
for p in range(npart):
y = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60.
grid.add_particle(Particle(x=3 / 1.852 / 60., y=y))
print "Initial particle positions:"
for p in grid._particles:
print p
# Advect the particles for 24h
time = 86400.
dt = 36.
ntimesteps = int(time / dt)
for t in range(ntimesteps):
for p in grid._particles:
p.advect_rk4(grid, dt)
print "Final particle positions:"
for p in grid._particles:
print p
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
args = p.parse_args()
pensinsula_example('peninsula', args.particles)
|
Add example configuration for the pensinsula test case
|
Pensinsula: Add example configuration for the pensinsula test case
This initialises a set of particles, advects them for 24h and plots their
final position to screen.
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Pensinsula: Add example configuration for the pensinsula test case
This initialises a set of particles, advects them for 24h and plots their
final position to screen.
|
from parcels import NEMOGrid, Particle
from argparse import ArgumentParser
def pensinsula_example(filename, npart):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input grid file set
:arg npart: Number of particles to intialise"""
# Open grid file set
grid = NEMOGrid(filename)
# Initialise particles
for p in range(npart):
y = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60.
grid.add_particle(Particle(x=3 / 1.852 / 60., y=y))
print "Initial particle positions:"
for p in grid._particles:
print p
# Advect the particles for 24h
time = 86400.
dt = 36.
ntimesteps = int(time / dt)
for t in range(ntimesteps):
for p in grid._particles:
p.advect_rk4(grid, dt)
print "Final particle positions:"
for p in grid._particles:
print p
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
args = p.parse_args()
pensinsula_example('peninsula', args.particles)
|
<commit_before><commit_msg>Pensinsula: Add example configuration for the pensinsula test case
This initialises a set of particles, advects them for 24h and plots their
final position to screen.<commit_after>
|
from parcels import NEMOGrid, Particle
from argparse import ArgumentParser
def pensinsula_example(filename, npart):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input grid file set
:arg npart: Number of particles to intialise"""
# Open grid file set
grid = NEMOGrid(filename)
# Initialise particles
for p in range(npart):
y = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60.
grid.add_particle(Particle(x=3 / 1.852 / 60., y=y))
print "Initial particle positions:"
for p in grid._particles:
print p
# Advect the particles for 24h
time = 86400.
dt = 36.
ntimesteps = int(time / dt)
for t in range(ntimesteps):
for p in grid._particles:
p.advect_rk4(grid, dt)
print "Final particle positions:"
for p in grid._particles:
print p
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
args = p.parse_args()
pensinsula_example('peninsula', args.particles)
|
Pensinsula: Add example configuration for the pensinsula test case
This initialises a set of particles, advects them for 24h and plots their
final position to screen.from parcels import NEMOGrid, Particle
from argparse import ArgumentParser
def pensinsula_example(filename, npart):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input grid file set
:arg npart: Number of particles to intialise"""
# Open grid file set
grid = NEMOGrid(filename)
# Initialise particles
for p in range(npart):
y = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60.
grid.add_particle(Particle(x=3 / 1.852 / 60., y=y))
print "Initial particle positions:"
for p in grid._particles:
print p
# Advect the particles for 24h
time = 86400.
dt = 36.
ntimesteps = int(time / dt)
for t in range(ntimesteps):
for p in grid._particles:
p.advect_rk4(grid, dt)
print "Final particle positions:"
for p in grid._particles:
print p
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
args = p.parse_args()
pensinsula_example('peninsula', args.particles)
|
<commit_before><commit_msg>Pensinsula: Add example configuration for the pensinsula test case
This initialises a set of particles, advects them for 24h and plots their
final position to screen.<commit_after>from parcels import NEMOGrid, Particle
from argparse import ArgumentParser
def pensinsula_example(filename, npart):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input grid file set
:arg npart: Number of particles to intialise"""
# Open grid file set
grid = NEMOGrid(filename)
# Initialise particles
for p in range(npart):
y = p * grid.lat_u.valid_max / npart + 0.45 / 1.852 / 60.
grid.add_particle(Particle(x=3 / 1.852 / 60., y=y))
print "Initial particle positions:"
for p in grid._particles:
print p
# Advect the particles for 24h
time = 86400.
dt = 36.
ntimesteps = int(time / dt)
for t in range(ntimesteps):
for p in grid._particles:
p.advect_rk4(grid, dt)
print "Final particle positions:"
for p in grid._particles:
print p
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
args = p.parse_args()
pensinsula_example('peninsula', args.particles)
|
|
34c3f085add407f7fe2860ef9abc06bdd47482df
|
examples/rotatinglog.py
|
examples/rotatinglog.py
|
# Use with --log-config option:
# volttron --log-config rotatinglog.py
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'agent': {
'()': 'volttron.platform.agent.utils.AgentFormatter',
},
},
'handlers': {
'rotating': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'agent',
'filename': 'volttron.log',
'encoding': 'utf-8',
'when': 'midnight',
'backupCount': 7,
},
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
},
}
|
Implement log rotation in config file.
|
Implement log rotation in config file.
|
Python
|
bsd-2-clause
|
schandrika/volttron,schandrika/volttron,schandrika/volttron,schandrika/volttron
|
Implement log rotation in config file.
|
# Use with --log-config option:
# volttron --log-config rotatinglog.py
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'agent': {
'()': 'volttron.platform.agent.utils.AgentFormatter',
},
},
'handlers': {
'rotating': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'agent',
'filename': 'volttron.log',
'encoding': 'utf-8',
'when': 'midnight',
'backupCount': 7,
},
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
},
}
|
<commit_before><commit_msg>Implement log rotation in config file.<commit_after>
|
# Use with --log-config option:
# volttron --log-config rotatinglog.py
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'agent': {
'()': 'volttron.platform.agent.utils.AgentFormatter',
},
},
'handlers': {
'rotating': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'agent',
'filename': 'volttron.log',
'encoding': 'utf-8',
'when': 'midnight',
'backupCount': 7,
},
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
},
}
|
Implement log rotation in config file.# Use with --log-config option:
# volttron --log-config rotatinglog.py
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'agent': {
'()': 'volttron.platform.agent.utils.AgentFormatter',
},
},
'handlers': {
'rotating': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'agent',
'filename': 'volttron.log',
'encoding': 'utf-8',
'when': 'midnight',
'backupCount': 7,
},
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
},
}
|
<commit_before><commit_msg>Implement log rotation in config file.<commit_after># Use with --log-config option:
# volttron --log-config rotatinglog.py
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'agent': {
'()': 'volttron.platform.agent.utils.AgentFormatter',
},
},
'handlers': {
'rotating': {
'class': 'logging.handlers.TimedRotatingFileHandler',
'level': 'DEBUG',
'formatter': 'agent',
'filename': 'volttron.log',
'encoding': 'utf-8',
'when': 'midnight',
'backupCount': 7,
},
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
},
}
|
|
ef337b8fa5234f8a89178d24449e0146fa92e19c
|
farmers_api/farmers/tests.py
|
farmers_api/farmers/tests.py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import Farmer
class FarmerModelTest(TestCase):
def test_string_representation(self):
farmer = Farmer(first_name='John', surname='Smith', town='Harrogate')
self.assertEqual(str(farmer), '%s %s' %
(farmer.first_name, farmer.surname))
def test_verbose_name(self):
self.assertEqual(Farmer._meta.verbose_name, 'farmer')
def test_verbose_plural_name(self):
self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers')
def test_getting_full_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper')
def test_getting_short_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_short_name(), 'T. Pupper')
def test_fail_if_surname_is_not_supplied(self):
farmer = Farmer(first_name='Tom', town='Leeds')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_first_name_is_not_supplied(self):
farmer = Farmer(surname='Pupper', town='Harrogate')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_town_is_not_supplied(self):
farmer = Farmer(first_name='Test', surname='Family Name')
with self.assertRaises(ValidationError):
farmer.full_clean()
|
Add unit test for the Farmer model
|
Add unit test for the Farmer model
|
Python
|
bsd-2-clause
|
tm-kn/farmers-api
|
Add unit test for the Farmer model
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import Farmer
class FarmerModelTest(TestCase):
def test_string_representation(self):
farmer = Farmer(first_name='John', surname='Smith', town='Harrogate')
self.assertEqual(str(farmer), '%s %s' %
(farmer.first_name, farmer.surname))
def test_verbose_name(self):
self.assertEqual(Farmer._meta.verbose_name, 'farmer')
def test_verbose_plural_name(self):
self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers')
def test_getting_full_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper')
def test_getting_short_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_short_name(), 'T. Pupper')
def test_fail_if_surname_is_not_supplied(self):
farmer = Farmer(first_name='Tom', town='Leeds')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_first_name_is_not_supplied(self):
farmer = Farmer(surname='Pupper', town='Harrogate')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_town_is_not_supplied(self):
farmer = Farmer(first_name='Test', surname='Family Name')
with self.assertRaises(ValidationError):
farmer.full_clean()
|
<commit_before><commit_msg>Add unit test for the Farmer model<commit_after>
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import Farmer
class FarmerModelTest(TestCase):
def test_string_representation(self):
farmer = Farmer(first_name='John', surname='Smith', town='Harrogate')
self.assertEqual(str(farmer), '%s %s' %
(farmer.first_name, farmer.surname))
def test_verbose_name(self):
self.assertEqual(Farmer._meta.verbose_name, 'farmer')
def test_verbose_plural_name(self):
self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers')
def test_getting_full_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper')
def test_getting_short_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_short_name(), 'T. Pupper')
def test_fail_if_surname_is_not_supplied(self):
farmer = Farmer(first_name='Tom', town='Leeds')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_first_name_is_not_supplied(self):
farmer = Farmer(surname='Pupper', town='Harrogate')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_town_is_not_supplied(self):
farmer = Farmer(first_name='Test', surname='Family Name')
with self.assertRaises(ValidationError):
farmer.full_clean()
|
Add unit test for the Farmer modelfrom django.core.exceptions import ValidationError
from django.test import TestCase
from .models import Farmer
class FarmerModelTest(TestCase):
def test_string_representation(self):
farmer = Farmer(first_name='John', surname='Smith', town='Harrogate')
self.assertEqual(str(farmer), '%s %s' %
(farmer.first_name, farmer.surname))
def test_verbose_name(self):
self.assertEqual(Farmer._meta.verbose_name, 'farmer')
def test_verbose_plural_name(self):
self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers')
def test_getting_full_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper')
def test_getting_short_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_short_name(), 'T. Pupper')
def test_fail_if_surname_is_not_supplied(self):
farmer = Farmer(first_name='Tom', town='Leeds')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_first_name_is_not_supplied(self):
farmer = Farmer(surname='Pupper', town='Harrogate')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_town_is_not_supplied(self):
farmer = Farmer(first_name='Test', surname='Family Name')
with self.assertRaises(ValidationError):
farmer.full_clean()
|
<commit_before><commit_msg>Add unit test for the Farmer model<commit_after>from django.core.exceptions import ValidationError
from django.test import TestCase
from .models import Farmer
class FarmerModelTest(TestCase):
def test_string_representation(self):
farmer = Farmer(first_name='John', surname='Smith', town='Harrogate')
self.assertEqual(str(farmer), '%s %s' %
(farmer.first_name, farmer.surname))
def test_verbose_name(self):
self.assertEqual(Farmer._meta.verbose_name, 'farmer')
def test_verbose_plural_name(self):
self.assertEqual(Farmer._meta.verbose_name_plural, 'farmers')
def test_getting_full_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_full_name(), 'Tom Doggo Pupper')
def test_getting_short_name(self):
farmer = Farmer(first_name='Tom Doggo',
surname='Pupper', town='Harrogate')
self.assertEqual(farmer.get_short_name(), 'T. Pupper')
def test_fail_if_surname_is_not_supplied(self):
farmer = Farmer(first_name='Tom', town='Leeds')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_first_name_is_not_supplied(self):
farmer = Farmer(surname='Pupper', town='Harrogate')
with self.assertRaises(ValidationError):
farmer.full_clean()
def test_fail_if_town_is_not_supplied(self):
farmer = Farmer(first_name='Test', surname='Family Name')
with self.assertRaises(ValidationError):
farmer.full_clean()
|
|
d99170f1aa6dd885354804f20c82cfff7662266c
|
tempest/tests/services/compute/test_limits_client.py
|
tempest/tests/services/compute/test_limits_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import limits_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestLimitsClient(base.TestCase):
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(
fake_auth, 'compute', 'regionOne')
def _test_show_limits(self, bytes_body=False):
expected = {"rate": [],
"absolute": {"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxServerGroups": 10,
"maxSecurityGroupRules": 20,
"maxTotalKeypairs": 100,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"totalSecurityGroupsUsed": 0,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200,
"maxServerGroupMembers": 10}}
serialized_body = json.dumps({"limits": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_limits()
self.assertEqual(expected, resp)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
Add unit test for method show_limits
|
Add unit test for method show_limits
This patch adds unit test for limits_client.
Specific about method show_limits.
Change-Id: I03acbf807d388d5d6c61dafff0f2d9a07520775f
|
Python
|
apache-2.0
|
tonyli71/tempest,Juniper/tempest,tonyli71/tempest,flyingfish007/tempest,openstack/tempest,JioCloud/tempest,roopali8/tempest,LIS/lis-tempest,Tesora/tesora-tempest,hayderimran7/tempest,bigswitch/tempest,Juniper/tempest,vedujoshi/tempest,zsoltdudas/lis-tempest,masayukig/tempest,pczerkas/tempest,dkalashnik/tempest,hayderimran7/tempest,vedujoshi/tempest,flyingfish007/tempest,openstack/tempest,dkalashnik/tempest,alinbalutoiu/tempest,sebrandon1/tempest,rakeshmi/tempest,bigswitch/tempest,masayukig/tempest,cisco-openstack/tempest,JioCloud/tempest,roopali8/tempest,tudorvio/tempest,rakeshmi/tempest,manasi24/jiocloud-tempest-qatempest,Tesora/tesora-tempest,nunogt/tempest,xbezdick/tempest,tudorvio/tempest,zsoltdudas/lis-tempest,manasi24/jiocloud-tempest-qatempest,manasi24/tempest,manasi24/tempest,akash1808/tempest,pczerkas/tempest,LIS/lis-tempest,izadorozhna/tempest,akash1808/tempest,pandeyop/tempest,pandeyop/tempest,nunogt/tempest,izadorozhna/tempest,varunarya10/tempest,cisco-openstack/tempest,varunarya10/tempest,sebrandon1/tempest,xbezdick/tempest,alinbalutoiu/tempest
|
Add unit test for method show_limits
This patch adds unit test for limits_client.
Specific about method show_limits.
Change-Id: I03acbf807d388d5d6c61dafff0f2d9a07520775f
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import limits_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestLimitsClient(base.TestCase):
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(
fake_auth, 'compute', 'regionOne')
def _test_show_limits(self, bytes_body=False):
expected = {"rate": [],
"absolute": {"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxServerGroups": 10,
"maxSecurityGroupRules": 20,
"maxTotalKeypairs": 100,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"totalSecurityGroupsUsed": 0,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200,
"maxServerGroupMembers": 10}}
serialized_body = json.dumps({"limits": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_limits()
self.assertEqual(expected, resp)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method show_limits
This patch adds unit test for limits_client.
Specific about method show_limits.
Change-Id: I03acbf807d388d5d6c61dafff0f2d9a07520775f<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import limits_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestLimitsClient(base.TestCase):
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(
fake_auth, 'compute', 'regionOne')
def _test_show_limits(self, bytes_body=False):
expected = {"rate": [],
"absolute": {"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxServerGroups": 10,
"maxSecurityGroupRules": 20,
"maxTotalKeypairs": 100,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"totalSecurityGroupsUsed": 0,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200,
"maxServerGroupMembers": 10}}
serialized_body = json.dumps({"limits": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_limits()
self.assertEqual(expected, resp)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
Add unit test for method show_limits
This patch adds unit test for limits_client.
Specific about method show_limits.
Change-Id: I03acbf807d388d5d6c61dafff0f2d9a07520775f# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import limits_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestLimitsClient(base.TestCase):
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(
fake_auth, 'compute', 'regionOne')
def _test_show_limits(self, bytes_body=False):
expected = {"rate": [],
"absolute": {"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxServerGroups": 10,
"maxSecurityGroupRules": 20,
"maxTotalKeypairs": 100,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"totalSecurityGroupsUsed": 0,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200,
"maxServerGroupMembers": 10}}
serialized_body = json.dumps({"limits": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_limits()
self.assertEqual(expected, resp)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method show_limits
This patch adds unit test for limits_client.
Specific about method show_limits.
Change-Id: I03acbf807d388d5d6c61dafff0f2d9a07520775f<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import limits_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestLimitsClient(base.TestCase):
def setUp(self):
super(TestLimitsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = limits_client.LimitsClient(
fake_auth, 'compute', 'regionOne')
def _test_show_limits(self, bytes_body=False):
expected = {"rate": [],
"absolute": {"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxServerGroups": 10,
"maxSecurityGroupRules": 20,
"maxTotalKeypairs": 100,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"totalSecurityGroupsUsed": 0,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200,
"maxServerGroupMembers": 10}}
serialized_body = json.dumps({"limits": expected})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_limits()
self.assertEqual(expected, resp)
def test_show_limits_with_str_body(self):
self._test_show_limits()
def test_show_limits_with_bytes_body(self):
self._test_show_limits(bytes_body=True)
|
|
e3ec476cd844235ce8a8e9b089a4c0d46185eb72
|
core/management/commands/delete_old_sessions.py
|
core/management/commands/delete_old_sessions.py
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions command
|
Add delete old sessions command
|
Python
|
mit
|
pydanny/djangopackages,pydanny/djangopackages,nanuxbe/djangopackages,nanuxbe/djangopackages,QLGu/djangopackages,pydanny/djangopackages,QLGu/djangopackages,QLGu/djangopackages,nanuxbe/djangopackages
|
Add delete old sessions command
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>
|
from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
Add delete old sessions commandfrom datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
<commit_before><commit_msg>Add delete old sessions command<commit_after>from datetime import datetime
from django.core.management.base import BaseCommand
from django.contrib.sessions.models import Session
"""
>>> def clean(count):
... for idx, s in enumerate(Session.objects.filter(expire_date__lt=now)[:count+1]):
... s.delete()
... if str(idx).endswith('000'): print idx
... print "{0} records left".format(Session.objects.filter(expire_date__lt=now).count())
...
"""
class Command(NoArgsCommand):
args = '<count count ...>'
help = "Delete old sessions"
def handle(self, *args, **options):
old_sessions = Session.objects.filter(expire_date__lt=datetime.now())
self.stdout.write("Deleting {0} expired sessions".format(
old_sessions.count()
)
for index, session in enumerate(old_sessions):
session.delete()
if str(idx).endswith('000'):
self.stdout.write("{0} records deleted".format(index)
self.stdout.write("{0} expired sessions remaining".format(
Session.objects.filter(expire_date__lt=datetime.now())
)
|
|
fec4ff648c65decf17707929e3d44789c50fde4f
|
carseour/__init__.py
|
carseour/__init__.py
|
import mmap
from carseour.definitions import GameInstance, SHARED_MEMORY_VERSION
def _get_mmapped():
# could we use ctypes.sizeof(GameInstance) instead here? A too large value results in access denied,
# 8k works for now
return mmap.mmap(0, 8000, tagname='$pcars$')
def _validate_instance(instance):
if instance.mVersion != SHARED_MEMORY_VERSION:
raise InvalidSharedMemoryVersionException("""
Mismatch between library data structure version and game data structure version.
Retrieve new SharedMemory.h and run bin/generate_classes.py to regenerate the definitions file.
""")
return instance
def get_live():
return _validate_instance(GameInstance.from_buffer(_get_mmapped()))
def get_snapshot():
return _validate_instance(GameInstance.from_buffer_copy(_get_mmapped()))
class InvalidSharedMemoryVersionException(Exception):
pass
|
Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.
|
Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.
|
Python
|
mit
|
matslindh/carseour,matslindh/carseour
|
Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.
|
import mmap
from carseour.definitions import GameInstance, SHARED_MEMORY_VERSION
def _get_mmapped():
# could we use ctypes.sizeof(GameInstance) instead here? A too large value results in access denied,
# 8k works for now
return mmap.mmap(0, 8000, tagname='$pcars$')
def _validate_instance(instance):
if instance.mVersion != SHARED_MEMORY_VERSION:
raise InvalidSharedMemoryVersionException("""
Mismatch between library data structure version and game data structure version.
Retrieve new SharedMemory.h and run bin/generate_classes.py to regenerate the definitions file.
""")
return instance
def get_live():
return _validate_instance(GameInstance.from_buffer(_get_mmapped()))
def get_snapshot():
return _validate_instance(GameInstance.from_buffer_copy(_get_mmapped()))
class InvalidSharedMemoryVersionException(Exception):
pass
|
<commit_before><commit_msg>Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.<commit_after>
|
import mmap
from carseour.definitions import GameInstance, SHARED_MEMORY_VERSION
def _get_mmapped():
# could we use ctypes.sizeof(GameInstance) instead here? A too large value results in access denied,
# 8k works for now
return mmap.mmap(0, 8000, tagname='$pcars$')
def _validate_instance(instance):
if instance.mVersion != SHARED_MEMORY_VERSION:
raise InvalidSharedMemoryVersionException("""
Mismatch between library data structure version and game data structure version.
Retrieve new SharedMemory.h and run bin/generate_classes.py to regenerate the definitions file.
""")
return instance
def get_live():
return _validate_instance(GameInstance.from_buffer(_get_mmapped()))
def get_snapshot():
return _validate_instance(GameInstance.from_buffer_copy(_get_mmapped()))
class InvalidSharedMemoryVersionException(Exception):
pass
|
Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.import mmap
from carseour.definitions import GameInstance, SHARED_MEMORY_VERSION
def _get_mmapped():
# could we use ctypes.sizeof(GameInstance) instead here? A too large value results in access denied,
# 8k works for now
return mmap.mmap(0, 8000, tagname='$pcars$')
def _validate_instance(instance):
if instance.mVersion != SHARED_MEMORY_VERSION:
raise InvalidSharedMemoryVersionException("""
Mismatch between library data structure version and game data structure version.
Retrieve new SharedMemory.h and run bin/generate_classes.py to regenerate the definitions file.
""")
return instance
def get_live():
return _validate_instance(GameInstance.from_buffer(_get_mmapped()))
def get_snapshot():
return _validate_instance(GameInstance.from_buffer_copy(_get_mmapped()))
class InvalidSharedMemoryVersionException(Exception):
pass
|
<commit_before><commit_msg>Add methods to retrieve a game instance backed live by shared memory or to retrieve a copy of the current state of the memory.<commit_after>import mmap
from carseour.definitions import GameInstance, SHARED_MEMORY_VERSION
def _get_mmapped():
# could we use ctypes.sizeof(GameInstance) instead here? A too large value results in access denied,
# 8k works for now
return mmap.mmap(0, 8000, tagname='$pcars$')
def _validate_instance(instance):
if instance.mVersion != SHARED_MEMORY_VERSION:
raise InvalidSharedMemoryVersionException("""
Mismatch between library data structure version and game data structure version.
Retrieve new SharedMemory.h and run bin/generate_classes.py to regenerate the definitions file.
""")
return instance
def get_live():
return _validate_instance(GameInstance.from_buffer(_get_mmapped()))
def get_snapshot():
return _validate_instance(GameInstance.from_buffer_copy(_get_mmapped()))
class InvalidSharedMemoryVersionException(Exception):
pass
|
|
149b54a921841f546fa43ebbf37cdffe5c3a93af
|
module1-2/new_integrals.py
|
module1-2/new_integrals.py
|
# Кириллов Алексей, ИУ7-22
# Работа с интегралами из меню
def f(x):
return x*x
def parabola(a,b,n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4 * f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def Integral(a,b,n=0,e=1):
if not n:
n = 1
while abs(parabola(a,b,n)-parabola(a,b,n+1)) > e:
n += 1
print('\nЧисло разбиений =',n)
S = parabola(a,b,n)
return S
menu = 1
while menu:
print('Выберите действие:\n')
print('\t1 - подсчитать интеграл с заданной точностью')
print('\t2 - подсчитать интеграл с заданным числом разбиений')
print('\n\t0 - выход\n')
menu = int(input('Введите номер действия: '))
if menu:
a,b = map(int,input('\n\nЗадайте границы интервала: ').split())
if menu == 1:
e = float(input('Введите требуемую точность: '))
S = Integral(a,b,0,e)
if menu == 2:
n = int(input('Задайте число разбиений: '))
S = Integral(a,b,n)
if menu:
print('\nИнтеграл функции y = x*x на отрезке [',a,',',b,'] =',S,'\n\n')
|
Work with integrals from menu
|
Work with integrals from menu
|
Python
|
mit
|
aspadm/labworks
|
Work with integrals from menu
|
# Кириллов Алексей, ИУ7-22
# Работа с интегралами из меню
def f(x):
return x*x
def parabola(a,b,n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4 * f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def Integral(a,b,n=0,e=1):
if not n:
n = 1
while abs(parabola(a,b,n)-parabola(a,b,n+1)) > e:
n += 1
print('\nЧисло разбиений =',n)
S = parabola(a,b,n)
return S
menu = 1
while menu:
print('Выберите действие:\n')
print('\t1 - подсчитать интеграл с заданной точностью')
print('\t2 - подсчитать интеграл с заданным числом разбиений')
print('\n\t0 - выход\n')
menu = int(input('Введите номер действия: '))
if menu:
a,b = map(int,input('\n\nЗадайте границы интервала: ').split())
if menu == 1:
e = float(input('Введите требуемую точность: '))
S = Integral(a,b,0,e)
if menu == 2:
n = int(input('Задайте число разбиений: '))
S = Integral(a,b,n)
if menu:
print('\nИнтеграл функции y = x*x на отрезке [',a,',',b,'] =',S,'\n\n')
|
<commit_before><commit_msg>Work with integrals from menu<commit_after>
|
# Кириллов Алексей, ИУ7-22
# Работа с интегралами из меню
def f(x):
return x*x
def parabola(a,b,n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4 * f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def Integral(a,b,n=0,e=1):
if not n:
n = 1
while abs(parabola(a,b,n)-parabola(a,b,n+1)) > e:
n += 1
print('\nЧисло разбиений =',n)
S = parabola(a,b,n)
return S
menu = 1
while menu:
print('Выберите действие:\n')
print('\t1 - подсчитать интеграл с заданной точностью')
print('\t2 - подсчитать интеграл с заданным числом разбиений')
print('\n\t0 - выход\n')
menu = int(input('Введите номер действия: '))
if menu:
a,b = map(int,input('\n\nЗадайте границы интервала: ').split())
if menu == 1:
e = float(input('Введите требуемую точность: '))
S = Integral(a,b,0,e)
if menu == 2:
n = int(input('Задайте число разбиений: '))
S = Integral(a,b,n)
if menu:
print('\nИнтеграл функции y = x*x на отрезке [',a,',',b,'] =',S,'\n\n')
|
Work with integrals from menu# Кириллов Алексей, ИУ7-22
# Работа с интегралами из меню
def f(x):
return x*x
def parabola(a,b,n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4 * f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def Integral(a,b,n=0,e=1):
if not n:
n = 1
while abs(parabola(a,b,n)-parabola(a,b,n+1)) > e:
n += 1
print('\nЧисло разбиений =',n)
S = parabola(a,b,n)
return S
menu = 1
while menu:
print('Выберите действие:\n')
print('\t1 - подсчитать интеграл с заданной точностью')
print('\t2 - подсчитать интеграл с заданным числом разбиений')
print('\n\t0 - выход\n')
menu = int(input('Введите номер действия: '))
if menu:
a,b = map(int,input('\n\nЗадайте границы интервала: ').split())
if menu == 1:
e = float(input('Введите требуемую точность: '))
S = Integral(a,b,0,e)
if menu == 2:
n = int(input('Задайте число разбиений: '))
S = Integral(a,b,n)
if menu:
print('\nИнтеграл функции y = x*x на отрезке [',a,',',b,'] =',S,'\n\n')
|
<commit_before><commit_msg>Work with integrals from menu<commit_after># Кириллов Алексей, ИУ7-22
# Работа с интегралами из меню
def f(x):
return x*x
def parabola(a,b,n):
s = 0
h = (b-a)/n
for i in range(n):
s += f(a+i*h) + 4 * f(a+i*h+h/2) + f(a+i*h+h)
s *= h/6
return s
def Integral(a,b,n=0,e=1):
if not n:
n = 1
while abs(parabola(a,b,n)-parabola(a,b,n+1)) > e:
n += 1
print('\nЧисло разбиений =',n)
S = parabola(a,b,n)
return S
menu = 1
while menu:
print('Выберите действие:\n')
print('\t1 - подсчитать интеграл с заданной точностью')
print('\t2 - подсчитать интеграл с заданным числом разбиений')
print('\n\t0 - выход\n')
menu = int(input('Введите номер действия: '))
if menu:
a,b = map(int,input('\n\nЗадайте границы интервала: ').split())
if menu == 1:
e = float(input('Введите требуемую точность: '))
S = Integral(a,b,0,e)
if menu == 2:
n = int(input('Задайте число разбиений: '))
S = Integral(a,b,n)
if menu:
print('\nИнтеграл функции y = x*x на отрезке [',a,',',b,'] =',S,'\n\n')
|
|
a0739545ebb9ef469be1f1c1034231b76fdb8c6a
|
Lib/test/test_plistlib.py
|
Lib/test/test_plistlib.py
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
Test suite for the plistlib module.
|
Test suite for the plistlib module.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Test suite for the plistlib module.
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
<commit_before><commit_msg>Test suite for the plistlib module.<commit_after>
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
Test suite for the plistlib module.# Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
<commit_before><commit_msg>Test suite for the plistlib module.<commit_after># Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
|
aec8653089f37d53d13e1526ce2379a05e66604d
|
Utilities/Maintenance/GeneratePythonDownloadsPage.py
|
Utilities/Maintenance/GeneratePythonDownloadsPage.py
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
Add script to generate download links used on simpleitk.org
|
Add script to generate download links used on simpleitk.org
|
Python
|
apache-2.0
|
InsightSoftwareConsortium/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,InsightSoftwareConsortium/SimpleITK,InsightSoftwareConsortium/SimpleITK,blowekamp/SimpleITK,SimpleITK/SimpleITK,blowekamp/SimpleITK,SimpleITK/SimpleITK,richardbeare/SimpleITK,blowekamp/SimpleITK,SimpleITK/SimpleITK,richardbeare/SimpleITK,blowekamp/SimpleITK,SimpleITK/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,blowekamp/SimpleITK,InsightSoftwareConsortium/SimpleITK,InsightSoftwareConsortium/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,blowekamp/SimpleITK,richardbeare/SimpleITK,SimpleITK/SimpleITK,InsightSoftwareConsortium/SimpleITK,richardbeare/SimpleITK,InsightSoftwareConsortium/SimpleITK,InsightSoftwareConsortium/SimpleITK,SimpleITK/SimpleITK
|
Add script to generate download links used on simpleitk.org
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
<commit_before><commit_msg>Add script to generate download links used on simpleitk.org<commit_after>
|
#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
Add script to generate download links used on simpleitk.org#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
<commit_before><commit_msg>Add script to generate download links used on simpleitk.org<commit_after>#!/usr/bin/env python
#=========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
import hashlib
import argparse
import re
import os
parser = argparse.ArgumentParser( description="Given a list of python wheels, generate a list of hyperlinks to GitHub with sha512 fragment identifier" )
parser.add_argument( '--hash', choices=['md5','sha256', 'sha512'], default='sha512')
parser.add_argument( '-f', '--format', choices=['html','md'], default='html')
parser.add_argument( 'files', metavar="python.whl", type=argparse.FileType(mode='rb'), nargs='+' )
args = parser.parse_args()
for f in args.files:
name = os.path.basename(f.name)
#version="1.1.0"
version = re.match(r'SimpleITK-([0-9]+\.[0-9]+(\.[0-9]+)?(rc[0-9]+)?)', name ).group(1)
print("version:{0}".format(version))
if args.hash == "md5":
hash_value = hashlib.md5(f.read()).hexdigest()
elif args.hash == "sha256":
hash_value = hashlib.sha256(f.read()).hexdigest()
elif args.hash == "sha512":
hash_value = hashlib.sha512(f.read()).hexdigest()
tag = "v{0}".format(version)
#host="SourceForge"
#url = "https://sourceforge.net/projects/simpleitk/files/SimpleITK/{0}/Python/{1}#{2}={3}".format(version,name,args.hash,hash_value)
host = "GitHub"
url = "https://github.com/SimpleITK/SimpleITK/releases/download/{0}/{1}#{2}={3}".format(tag,name,args.hash,hash_value)
if args.format == 'html':
print "<li><a href=\"{0}\" title=\"Click to download {1}\">{1} (hosted at {2})</a></li>".format(url,name,host)
elif args.format == 'md':
print "[{1}]({0})".format(url,name)
f.close()
|
|
6a1755e28edcdcc7d86cce713d7fd8d640a898fd
|
tests/test_aquire.py
|
tests/test_aquire.py
|
import pytest
@pytest.fixture
def testdir_with_map(testdir):
testdir.makefile('.yaml', map='''
version: 1
environments:
mock_env:
roles:
mock:
- mocker.example:
greeting: Hello World
''')
testdir.makepyfile(mocker='''
class Greeting:
def __init__(self, greeting):
self.greeting = greeting
def example(greeting):
return Greeting(greeting)
''')
return testdir
def test_aquire(pytestconfig):
assert pytest.roles.data['mock']
assert pytest.roles.data['mock']['mocker.example']
def test_aquire(testdir_with_map):
testdir_with_map.makepyfile('''
import pytest
def test_read_environment(request):
mock = pytest.roles.aquire('mock')
assert mock.name == 'mock'
assert mock.greeting == 'Hello World'
''')
result = testdir_with_map.runpytest('--env=mock_env')
assert result.ret == 0
|
Add a basic test to make sure acquisition works
|
Add a basic test to make sure acquisition works
|
Python
|
mpl-2.0
|
sangoma/pytestlab
|
Add a basic test to make sure acquisition works
|
import pytest
@pytest.fixture
def testdir_with_map(testdir):
testdir.makefile('.yaml', map='''
version: 1
environments:
mock_env:
roles:
mock:
- mocker.example:
greeting: Hello World
''')
testdir.makepyfile(mocker='''
class Greeting:
def __init__(self, greeting):
self.greeting = greeting
def example(greeting):
return Greeting(greeting)
''')
return testdir
def test_aquire(pytestconfig):
assert pytest.roles.data['mock']
assert pytest.roles.data['mock']['mocker.example']
def test_aquire(testdir_with_map):
testdir_with_map.makepyfile('''
import pytest
def test_read_environment(request):
mock = pytest.roles.aquire('mock')
assert mock.name == 'mock'
assert mock.greeting == 'Hello World'
''')
result = testdir_with_map.runpytest('--env=mock_env')
assert result.ret == 0
|
<commit_before><commit_msg>Add a basic test to make sure acquisition works<commit_after>
|
import pytest
@pytest.fixture
def testdir_with_map(testdir):
testdir.makefile('.yaml', map='''
version: 1
environments:
mock_env:
roles:
mock:
- mocker.example:
greeting: Hello World
''')
testdir.makepyfile(mocker='''
class Greeting:
def __init__(self, greeting):
self.greeting = greeting
def example(greeting):
return Greeting(greeting)
''')
return testdir
def test_aquire(pytestconfig):
assert pytest.roles.data['mock']
assert pytest.roles.data['mock']['mocker.example']
def test_aquire(testdir_with_map):
testdir_with_map.makepyfile('''
import pytest
def test_read_environment(request):
mock = pytest.roles.aquire('mock')
assert mock.name == 'mock'
assert mock.greeting == 'Hello World'
''')
result = testdir_with_map.runpytest('--env=mock_env')
assert result.ret == 0
|
Add a basic test to make sure acquisition worksimport pytest
@pytest.fixture
def testdir_with_map(testdir):
testdir.makefile('.yaml', map='''
version: 1
environments:
mock_env:
roles:
mock:
- mocker.example:
greeting: Hello World
''')
testdir.makepyfile(mocker='''
class Greeting:
def __init__(self, greeting):
self.greeting = greeting
def example(greeting):
return Greeting(greeting)
''')
return testdir
def test_aquire(pytestconfig):
assert pytest.roles.data['mock']
assert pytest.roles.data['mock']['mocker.example']
def test_aquire(testdir_with_map):
testdir_with_map.makepyfile('''
import pytest
def test_read_environment(request):
mock = pytest.roles.aquire('mock')
assert mock.name == 'mock'
assert mock.greeting == 'Hello World'
''')
result = testdir_with_map.runpytest('--env=mock_env')
assert result.ret == 0
|
<commit_before><commit_msg>Add a basic test to make sure acquisition works<commit_after>import pytest
@pytest.fixture
def testdir_with_map(testdir):
testdir.makefile('.yaml', map='''
version: 1
environments:
mock_env:
roles:
mock:
- mocker.example:
greeting: Hello World
''')
testdir.makepyfile(mocker='''
class Greeting:
def __init__(self, greeting):
self.greeting = greeting
def example(greeting):
return Greeting(greeting)
''')
return testdir
def test_aquire(pytestconfig):
assert pytest.roles.data['mock']
assert pytest.roles.data['mock']['mocker.example']
def test_aquire(testdir_with_map):
testdir_with_map.makepyfile('''
import pytest
def test_read_environment(request):
mock = pytest.roles.aquire('mock')
assert mock.name == 'mock'
assert mock.greeting == 'Hello World'
''')
result = testdir_with_map.runpytest('--env=mock_env')
assert result.ret == 0
|
|
a5361a632ee392ee318258976fca33b8baef7fba
|
sc2reader/exceptions.py
|
sc2reader/exceptions.py
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MutipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
Fix a typo in MultipleMatchingFilesError.
|
Fix a typo in MultipleMatchingFilesError.
|
Python
|
mit
|
vlaufer/sc2reader,GraylinKim/sc2reader,ggtracker/sc2reader,ggtracker/sc2reader,dsjoerg/sc2reader,GraylinKim/sc2reader,StoicLoofah/sc2reader,vlaufer/sc2reader,StoicLoofah/sc2reader,dsjoerg/sc2reader
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MutipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
Fix a typo in MultipleMatchingFilesError.
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
<commit_before>class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MutipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
<commit_msg>Fix a typo in MultipleMatchingFilesError.<commit_after>
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MutipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
Fix a typo in MultipleMatchingFilesError.class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
<commit_before>class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MutipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
<commit_msg>Fix a typo in MultipleMatchingFilesError.<commit_after>class SC2ReaderError(Exception):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, code, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super(ReadError, self).__init__(msg)
def __str__(self):
return "{0}, Type: {1}, Code: {2}".format(self.msg, self.type, self.code)
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
|
33535ae325e15c7341c6330cf1caa756cfa09831
|
tests/lib/test_coins.py
|
tests/lib/test_coins.py
|
import electrumx.lib.coins as coins
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
|
Add test for BCH electrum header
|
Add test for BCH electrum header
|
Python
|
mit
|
thelazier/electrumx,thelazier/electrumx
|
Add test for BCH electrum header
|
import electrumx.lib.coins as coins
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
|
<commit_before><commit_msg>Add test for BCH electrum header<commit_after>
|
import electrumx.lib.coins as coins
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
|
Add test for BCH electrum headerimport electrumx.lib.coins as coins
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
|
<commit_before><commit_msg>Add test for BCH electrum header<commit_after>import electrumx.lib.coins as coins
def test_bitcoin_cash():
raw_header = bytes.fromhex(
"00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0"
"1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d"
"e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5"
)
height = 540000
electrum_header = {
'block_height': 540000,
'version': 536870912,
'prev_block_hash':
'0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df',
'merkle_root':
'81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36',
'timestamp': 1532215285,
'bits': 402774676,
'nonce': 3321400748
}
assert coins.BitcoinCash.electrum_header(
raw_header, height) == electrum_header
|
|
4fb24d18cefc76b8622b53427a064fc430b9bfee
|
tests/test_search.py
|
tests/test_search.py
|
from . import TestCase
from memopol.search.templatetags.search_tags import simple_search_shortcut
class TestSearchTemplateTags(TestCase):
def test_simple_search_shortcut(self):
url = simple_search_shortcut('country:FR or country:BR')
self.assertEqual(url, "/search/?q=country%3AFR%20or%20country%3ABR")
|
Add a basic test for simple_search_shortcut templatetag
|
[enh] Add a basic test for simple_search_shortcut templatetag
|
Python
|
agpl-3.0
|
yohanboniface/memopol-core,yohanboniface/memopol-core,yohanboniface/memopol-core
|
[enh] Add a basic test for simple_search_shortcut templatetag
|
from . import TestCase
from memopol.search.templatetags.search_tags import simple_search_shortcut
class TestSearchTemplateTags(TestCase):
def test_simple_search_shortcut(self):
url = simple_search_shortcut('country:FR or country:BR')
self.assertEqual(url, "/search/?q=country%3AFR%20or%20country%3ABR")
|
<commit_before><commit_msg>[enh] Add a basic test for simple_search_shortcut templatetag<commit_after>
|
from . import TestCase
from memopol.search.templatetags.search_tags import simple_search_shortcut
class TestSearchTemplateTags(TestCase):
def test_simple_search_shortcut(self):
url = simple_search_shortcut('country:FR or country:BR')
self.assertEqual(url, "/search/?q=country%3AFR%20or%20country%3ABR")
|
[enh] Add a basic test for simple_search_shortcut templatetagfrom . import TestCase
from memopol.search.templatetags.search_tags import simple_search_shortcut
class TestSearchTemplateTags(TestCase):
def test_simple_search_shortcut(self):
url = simple_search_shortcut('country:FR or country:BR')
self.assertEqual(url, "/search/?q=country%3AFR%20or%20country%3ABR")
|
<commit_before><commit_msg>[enh] Add a basic test for simple_search_shortcut templatetag<commit_after>from . import TestCase
from memopol.search.templatetags.search_tags import simple_search_shortcut
class TestSearchTemplateTags(TestCase):
def test_simple_search_shortcut(self):
url = simple_search_shortcut('country:FR or country:BR')
self.assertEqual(url, "/search/?q=country%3AFR%20or%20country%3ABR")
|
|
8222d728aa4a3e6032d3ea527bdaae3b053a6aed
|
tests/test_stream.py
|
tests/test_stream.py
|
#!/usr/bin/env python
import cle
import nose
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_stream():
dirpath = os.path.join(test_location, "x86_64")
filepath = os.path.join(dirpath, "fauxware")
filestream = open(filepath, 'rb')
load_opts = {'custom_ld_path': [dirpath]}
path_ld = cle.Loader(filepath, **load_opts)
stream_ld = cle.Loader(filestream, **load_opts)
nose.tools.assert_equal(path_ld.main_bin.entry, stream_ld.main_bin.entry)
nose.tools.assert_equal(path_ld.shared_objects.keys(), stream_ld.shared_objects.keys())
nose.tools.assert_equal(path_ld.memory.read_addr_at(path_ld.main_bin.entry),
stream_ld.memory.read_addr_at(stream_ld.main_bin.entry))
if __name__ == '__main__':
test_stream()
|
Add a test for loading files as a stream
|
Add a test for loading files as a stream
|
Python
|
bsd-2-clause
|
chubbymaggie/cle,angr/cle
|
Add a test for loading files as a stream
|
#!/usr/bin/env python
import cle
import nose
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_stream():
dirpath = os.path.join(test_location, "x86_64")
filepath = os.path.join(dirpath, "fauxware")
filestream = open(filepath, 'rb')
load_opts = {'custom_ld_path': [dirpath]}
path_ld = cle.Loader(filepath, **load_opts)
stream_ld = cle.Loader(filestream, **load_opts)
nose.tools.assert_equal(path_ld.main_bin.entry, stream_ld.main_bin.entry)
nose.tools.assert_equal(path_ld.shared_objects.keys(), stream_ld.shared_objects.keys())
nose.tools.assert_equal(path_ld.memory.read_addr_at(path_ld.main_bin.entry),
stream_ld.memory.read_addr_at(stream_ld.main_bin.entry))
if __name__ == '__main__':
test_stream()
|
<commit_before><commit_msg>Add a test for loading files as a stream<commit_after>
|
#!/usr/bin/env python
import cle
import nose
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_stream():
dirpath = os.path.join(test_location, "x86_64")
filepath = os.path.join(dirpath, "fauxware")
filestream = open(filepath, 'rb')
load_opts = {'custom_ld_path': [dirpath]}
path_ld = cle.Loader(filepath, **load_opts)
stream_ld = cle.Loader(filestream, **load_opts)
nose.tools.assert_equal(path_ld.main_bin.entry, stream_ld.main_bin.entry)
nose.tools.assert_equal(path_ld.shared_objects.keys(), stream_ld.shared_objects.keys())
nose.tools.assert_equal(path_ld.memory.read_addr_at(path_ld.main_bin.entry),
stream_ld.memory.read_addr_at(stream_ld.main_bin.entry))
if __name__ == '__main__':
test_stream()
|
Add a test for loading files as a stream#!/usr/bin/env python
import cle
import nose
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_stream():
dirpath = os.path.join(test_location, "x86_64")
filepath = os.path.join(dirpath, "fauxware")
filestream = open(filepath, 'rb')
load_opts = {'custom_ld_path': [dirpath]}
path_ld = cle.Loader(filepath, **load_opts)
stream_ld = cle.Loader(filestream, **load_opts)
nose.tools.assert_equal(path_ld.main_bin.entry, stream_ld.main_bin.entry)
nose.tools.assert_equal(path_ld.shared_objects.keys(), stream_ld.shared_objects.keys())
nose.tools.assert_equal(path_ld.memory.read_addr_at(path_ld.main_bin.entry),
stream_ld.memory.read_addr_at(stream_ld.main_bin.entry))
if __name__ == '__main__':
test_stream()
|
<commit_before><commit_msg>Add a test for loading files as a stream<commit_after>#!/usr/bin/env python
import cle
import nose
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_stream():
dirpath = os.path.join(test_location, "x86_64")
filepath = os.path.join(dirpath, "fauxware")
filestream = open(filepath, 'rb')
load_opts = {'custom_ld_path': [dirpath]}
path_ld = cle.Loader(filepath, **load_opts)
stream_ld = cle.Loader(filestream, **load_opts)
nose.tools.assert_equal(path_ld.main_bin.entry, stream_ld.main_bin.entry)
nose.tools.assert_equal(path_ld.shared_objects.keys(), stream_ld.shared_objects.keys())
nose.tools.assert_equal(path_ld.memory.read_addr_at(path_ld.main_bin.entry),
stream_ld.memory.read_addr_at(stream_ld.main_bin.entry))
if __name__ == '__main__':
test_stream()
|
|
a182ee1b7bbf00c3605fb363d0751b5959652263
|
ingestor/luigi_tasks.py
|
ingestor/luigi_tasks.py
|
import luigi
import logging
_log = logging.getLogger()
class ImportLandsatScene(luigi.Task):
dataset_path = luigi.Parameter()
def requires(self):
yield Reproject(dataset_path)
yield Tile(dataset_path)
yield ImportToNetCDFs(dataset_path)
yield RecordInDatabase(dataset_path)
if __name__ == '__main__':
luigi.run()
|
Add very very preliminary template for luigi-isation of ingestion
|
Add very very preliminary template for luigi-isation of ingestion
|
Python
|
bsd-3-clause
|
omad/datacube-experiments
|
Add very very preliminary template for luigi-isation of ingestion
|
import luigi
import logging
_log = logging.getLogger()
class ImportLandsatScene(luigi.Task):
dataset_path = luigi.Parameter()
def requires(self):
yield Reproject(dataset_path)
yield Tile(dataset_path)
yield ImportToNetCDFs(dataset_path)
yield RecordInDatabase(dataset_path)
if __name__ == '__main__':
luigi.run()
|
<commit_before><commit_msg>Add very very preliminary template for luigi-isation of ingestion<commit_after>
|
import luigi
import logging
_log = logging.getLogger()
class ImportLandsatScene(luigi.Task):
dataset_path = luigi.Parameter()
def requires(self):
yield Reproject(dataset_path)
yield Tile(dataset_path)
yield ImportToNetCDFs(dataset_path)
yield RecordInDatabase(dataset_path)
if __name__ == '__main__':
luigi.run()
|
Add very very preliminary template for luigi-isation of ingestion
import luigi
import logging
_log = logging.getLogger()
class ImportLandsatScene(luigi.Task):
dataset_path = luigi.Parameter()
def requires(self):
yield Reproject(dataset_path)
yield Tile(dataset_path)
yield ImportToNetCDFs(dataset_path)
yield RecordInDatabase(dataset_path)
if __name__ == '__main__':
luigi.run()
|
<commit_before><commit_msg>Add very very preliminary template for luigi-isation of ingestion<commit_after>
import luigi
import logging
_log = logging.getLogger()
class ImportLandsatScene(luigi.Task):
dataset_path = luigi.Parameter()
def requires(self):
yield Reproject(dataset_path)
yield Tile(dataset_path)
yield ImportToNetCDFs(dataset_path)
yield RecordInDatabase(dataset_path)
if __name__ == '__main__':
luigi.run()
|
|
1fcbd94d99fa7ea28330dbfb69c5b24d34ebcdd5
|
python/count-inversions.py
|
python/count-inversions.py
|
def countInversions(list):
if len(list) == 1:
return list, 0;
else:
middle = int(len(list)/2)
A = list[:middle]
B = list[middle:]
left, a = countInversions(A)
right, b = countInversions(B)
result, c = countSplitInversions(left, right)
return result, a + b + c
def countSplitInversions(A, B):
# Copies from right array before left array represent a split inversions
# of inversions represented by number of items remaing in left array)
n = len(A) + len(B)
sorted = []
inversions = 0
for i in xrange(0, n):
if len(A) == 0:
sorted.append(B[0])
del(B[0])
elif len(B) == 0:
sorted.append(A[0])
del(A[0])
elif A[0] < B[0]:
sorted.append(A[0])
#Remove first element from array
del(A[0])
else:
sorted.append(B[0])
del(B[0])
inversions += len(A)
i+=1
# print sorted
return sorted, inversions
print countInversions(integers)[1]
|
Add algorithm to count number of inversions in an array using divide and conquer
|
Add algorithm to count number of inversions in an array using divide and conquer
|
Python
|
mit
|
HiccupinGminor/tidbits,HiccupinGminor/tidbits
|
Add algorithm to count number of inversions in an array using divide and conquer
|
def countInversions(list):
if len(list) == 1:
return list, 0;
else:
middle = int(len(list)/2)
A = list[:middle]
B = list[middle:]
left, a = countInversions(A)
right, b = countInversions(B)
result, c = countSplitInversions(left, right)
return result, a + b + c
def countSplitInversions(A, B):
# Copies from right array before left array represent a split inversions
# of inversions represented by number of items remaing in left array)
n = len(A) + len(B)
sorted = []
inversions = 0
for i in xrange(0, n):
if len(A) == 0:
sorted.append(B[0])
del(B[0])
elif len(B) == 0:
sorted.append(A[0])
del(A[0])
elif A[0] < B[0]:
sorted.append(A[0])
#Remove first element from array
del(A[0])
else:
sorted.append(B[0])
del(B[0])
inversions += len(A)
i+=1
# print sorted
return sorted, inversions
print countInversions(integers)[1]
|
<commit_before><commit_msg>Add algorithm to count number of inversions in an array using divide and conquer<commit_after>
|
def countInversions(list):
if len(list) == 1:
return list, 0;
else:
middle = int(len(list)/2)
A = list[:middle]
B = list[middle:]
left, a = countInversions(A)
right, b = countInversions(B)
result, c = countSplitInversions(left, right)
return result, a + b + c
def countSplitInversions(A, B):
# Copies from right array before left array represent a split inversions
# of inversions represented by number of items remaing in left array)
n = len(A) + len(B)
sorted = []
inversions = 0
for i in xrange(0, n):
if len(A) == 0:
sorted.append(B[0])
del(B[0])
elif len(B) == 0:
sorted.append(A[0])
del(A[0])
elif A[0] < B[0]:
sorted.append(A[0])
#Remove first element from array
del(A[0])
else:
sorted.append(B[0])
del(B[0])
inversions += len(A)
i+=1
# print sorted
return sorted, inversions
print countInversions(integers)[1]
|
Add algorithm to count number of inversions in an array using divide and conquerdef countInversions(list):
if len(list) == 1:
return list, 0;
else:
middle = int(len(list)/2)
A = list[:middle]
B = list[middle:]
left, a = countInversions(A)
right, b = countInversions(B)
result, c = countSplitInversions(left, right)
return result, a + b + c
def countSplitInversions(A, B):
# Copies from right array before left array represent a split inversions
# of inversions represented by number of items remaing in left array)
n = len(A) + len(B)
sorted = []
inversions = 0
for i in xrange(0, n):
if len(A) == 0:
sorted.append(B[0])
del(B[0])
elif len(B) == 0:
sorted.append(A[0])
del(A[0])
elif A[0] < B[0]:
sorted.append(A[0])
#Remove first element from array
del(A[0])
else:
sorted.append(B[0])
del(B[0])
inversions += len(A)
i+=1
# print sorted
return sorted, inversions
print countInversions(integers)[1]
|
<commit_before><commit_msg>Add algorithm to count number of inversions in an array using divide and conquer<commit_after>def countInversions(list):
if len(list) == 1:
return list, 0;
else:
middle = int(len(list)/2)
A = list[:middle]
B = list[middle:]
left, a = countInversions(A)
right, b = countInversions(B)
result, c = countSplitInversions(left, right)
return result, a + b + c
def countSplitInversions(A, B):
# Copies from right array before left array represent a split inversions
# of inversions represented by number of items remaing in left array)
n = len(A) + len(B)
sorted = []
inversions = 0
for i in xrange(0, n):
if len(A) == 0:
sorted.append(B[0])
del(B[0])
elif len(B) == 0:
sorted.append(A[0])
del(A[0])
elif A[0] < B[0]:
sorted.append(A[0])
#Remove first element from array
del(A[0])
else:
sorted.append(B[0])
del(B[0])
inversions += len(A)
i+=1
# print sorted
return sorted, inversions
print countInversions(integers)[1]
|
|
5a28355cf49b8e2bbc265163d5c94b470dd34ba4
|
util/noslang_scraper.py
|
util/noslang_scraper.py
|
"""
This script grabs data from the slang dictionary
http://www.noslang.com/dictionary and converts it into a python dictionary for
serialization.
Obviously contingent on noslang's format consistency.
Jesse Mu
"""
from bs4 import BeautifulSoup
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
NOSLANG_URL = "http://www.noslang.com/dictionary/{}/"
def scraper(verbose=False):
"""Return a dictionary containing abbrevations from noslang."""
noslang_dict = {}
for letter in '1abcdefghijklmnopqrstuvwxyz':
url = NOSLANG_URL.format(letter)
if verbose:
print "Parsing url {}".format(url)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tables = soup.find_all('table')
assert len(tables) == 2, "format discrepancy: > 2 tables on page"
# Get second table
table = tables[1]
tabletext = str(table)
# Check to make (reasonably) sure this is the correct table
assert '<a name=' in tabletext and '<abbr title=' in tabletext, \
"""format discrepancy: this doesn't seem to be an abbrevation table
(or format has changed)!"""
# noslang table has information in attributes in each dt
dts = table.find_all('dt')
abbrevations = [(dt.a['name'], dt.abbr['title']) for dt in dts]
noslang_dict.update(dict(abbrevations))
return noslang_dict
def serialize(filename, dictionary, verbose=False):
"""Output to a file or stdout with pickle."""
if filename == '-' or not filename:
if verbose: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(dictionary, sys.stdout)
else:
if verbose:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(dictionary, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.p'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser()
parser.add_argument(
'-o', '--output', nargs="?", const='lib/noslang.p', default='-',
help="specify output file (defaults to bin/noslang.p "
"if specified without file or stdout if not specified"
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help="be verbose"
)
args = parser.parse_args()
filename = handle_filename(args.output)
noslang_dict = scraper(args.verbose)
serialize(filename, noslang_dict, args.verbose)
|
Add scraper for noslang abbrevations
|
Add scraper for noslang abbrevations
This will be used for tweet preprocessing.
|
Python
|
mit
|
jayelm/twittersa,jayelm/twittersa
|
Add scraper for noslang abbrevations
This will be used for tweet preprocessing.
|
"""
This script grabs data from the slang dictionary
http://www.noslang.com/dictionary and converts it into a python dictionary for
serialization.
Obviously contingent on noslang's format consistency.
Jesse Mu
"""
from bs4 import BeautifulSoup
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
NOSLANG_URL = "http://www.noslang.com/dictionary/{}/"
def scraper(verbose=False):
"""Return a dictionary containing abbrevations from noslang."""
noslang_dict = {}
for letter in '1abcdefghijklmnopqrstuvwxyz':
url = NOSLANG_URL.format(letter)
if verbose:
print "Parsing url {}".format(url)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tables = soup.find_all('table')
assert len(tables) == 2, "format discrepancy: > 2 tables on page"
# Get second table
table = tables[1]
tabletext = str(table)
# Check to make (reasonably) sure this is the correct table
assert '<a name=' in tabletext and '<abbr title=' in tabletext, \
"""format discrepancy: this doesn't seem to be an abbrevation table
(or format has changed)!"""
# noslang table has information in attributes in each dt
dts = table.find_all('dt')
abbrevations = [(dt.a['name'], dt.abbr['title']) for dt in dts]
noslang_dict.update(dict(abbrevations))
return noslang_dict
def serialize(filename, dictionary, verbose=False):
"""Output to a file or stdout with pickle."""
if filename == '-' or not filename:
if verbose: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(dictionary, sys.stdout)
else:
if verbose:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(dictionary, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.p'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser()
parser.add_argument(
'-o', '--output', nargs="?", const='lib/noslang.p', default='-',
help="specify output file (defaults to bin/noslang.p "
"if specified without file or stdout if not specified"
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help="be verbose"
)
args = parser.parse_args()
filename = handle_filename(args.output)
noslang_dict = scraper(args.verbose)
serialize(filename, noslang_dict, args.verbose)
|
<commit_before><commit_msg>Add scraper for noslang abbrevations
This will be used for tweet preprocessing.<commit_after>
|
"""
This script grabs data from the slang dictionary
http://www.noslang.com/dictionary and converts it into a python dictionary for
serialization.
Obviously contingent on noslang's format consistency.
Jesse Mu
"""
from bs4 import BeautifulSoup
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
NOSLANG_URL = "http://www.noslang.com/dictionary/{}/"
def scraper(verbose=False):
"""Return a dictionary containing abbrevations from noslang."""
noslang_dict = {}
for letter in '1abcdefghijklmnopqrstuvwxyz':
url = NOSLANG_URL.format(letter)
if verbose:
print "Parsing url {}".format(url)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tables = soup.find_all('table')
assert len(tables) == 2, "format discrepancy: > 2 tables on page"
# Get second table
table = tables[1]
tabletext = str(table)
# Check to make (reasonably) sure this is the correct table
assert '<a name=' in tabletext and '<abbr title=' in tabletext, \
"""format discrepancy: this doesn't seem to be an abbrevation table
(or format has changed)!"""
# noslang table has information in attributes in each dt
dts = table.find_all('dt')
abbrevations = [(dt.a['name'], dt.abbr['title']) for dt in dts]
noslang_dict.update(dict(abbrevations))
return noslang_dict
def serialize(filename, dictionary, verbose=False):
"""Output to a file or stdout with pickle."""
if filename == '-' or not filename:
if verbose: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(dictionary, sys.stdout)
else:
if verbose:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(dictionary, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.p'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser()
parser.add_argument(
'-o', '--output', nargs="?", const='lib/noslang.p', default='-',
help="specify output file (defaults to bin/noslang.p "
"if specified without file or stdout if not specified"
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help="be verbose"
)
args = parser.parse_args()
filename = handle_filename(args.output)
noslang_dict = scraper(args.verbose)
serialize(filename, noslang_dict, args.verbose)
|
Add scraper for noslang abbrevations
This will be used for tweet preprocessing."""
This script grabs data from the slang dictionary
http://www.noslang.com/dictionary and converts it into a python dictionary for
serialization.
Obviously contingent on noslang's format consistency.
Jesse Mu
"""
from bs4 import BeautifulSoup
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
NOSLANG_URL = "http://www.noslang.com/dictionary/{}/"
def scraper(verbose=False):
"""Return a dictionary containing abbrevations from noslang."""
noslang_dict = {}
for letter in '1abcdefghijklmnopqrstuvwxyz':
url = NOSLANG_URL.format(letter)
if verbose:
print "Parsing url {}".format(url)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tables = soup.find_all('table')
assert len(tables) == 2, "format discrepancy: > 2 tables on page"
# Get second table
table = tables[1]
tabletext = str(table)
# Check to make (reasonably) sure this is the correct table
assert '<a name=' in tabletext and '<abbr title=' in tabletext, \
"""format discrepancy: this doesn't seem to be an abbrevation table
(or format has changed)!"""
# noslang table has information in attributes in each dt
dts = table.find_all('dt')
abbrevations = [(dt.a['name'], dt.abbr['title']) for dt in dts]
noslang_dict.update(dict(abbrevations))
return noslang_dict
def serialize(filename, dictionary, verbose=False):
"""Output to a file or stdout with pickle."""
if filename == '-' or not filename:
if verbose: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(dictionary, sys.stdout)
else:
if verbose:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(dictionary, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.p'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser()
parser.add_argument(
'-o', '--output', nargs="?", const='lib/noslang.p', default='-',
help="specify output file (defaults to bin/noslang.p "
"if specified without file or stdout if not specified"
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help="be verbose"
)
args = parser.parse_args()
filename = handle_filename(args.output)
noslang_dict = scraper(args.verbose)
serialize(filename, noslang_dict, args.verbose)
|
<commit_before><commit_msg>Add scraper for noslang abbrevations
This will be used for tweet preprocessing.<commit_after>"""
This script grabs data from the slang dictionary
http://www.noslang.com/dictionary and converts it into a python dictionary for
serialization.
Obviously contingent on noslang's format consistency.
Jesse Mu
"""
from bs4 import BeautifulSoup
import requests
try:
import cPickle as pickle
except ImportError:
import pickle
NOSLANG_URL = "http://www.noslang.com/dictionary/{}/"
def scraper(verbose=False):
"""Return a dictionary containing abbrevations from noslang."""
noslang_dict = {}
for letter in '1abcdefghijklmnopqrstuvwxyz':
url = NOSLANG_URL.format(letter)
if verbose:
print "Parsing url {}".format(url)
r = requests.get(url)
soup = BeautifulSoup(r.text)
tables = soup.find_all('table')
assert len(tables) == 2, "format discrepancy: > 2 tables on page"
# Get second table
table = tables[1]
tabletext = str(table)
# Check to make (reasonably) sure this is the correct table
assert '<a name=' in tabletext and '<abbr title=' in tabletext, \
"""format discrepancy: this doesn't seem to be an abbrevation table
(or format has changed)!"""
# noslang table has information in attributes in each dt
dts = table.find_all('dt')
abbrevations = [(dt.a['name'], dt.abbr['title']) for dt in dts]
noslang_dict.update(dict(abbrevations))
return noslang_dict
def serialize(filename, dictionary, verbose=False):
"""Output to a file or stdout with pickle."""
if filename == '-' or not filename:
if verbose: # Not sure why someone would specify verbose for stdout
print "Writing to stdout"
pickle.dump(dictionary, sys.stdout)
else:
if verbose:
print "Writing to {}".format(filename)
with open(filename, 'w') as fout:
pickle.dump(dictionary, fout)
def handle_filename(filename):
"""Prepare the filename - if directory specified, add default name"""
if filename[-1] == '/':
filename += 'noslang.p'
return filename
if __name__ == '__main__':
from argparse import ArgumentParser
import sys
parser = ArgumentParser()
parser.add_argument(
'-o', '--output', nargs="?", const='lib/noslang.p', default='-',
help="specify output file (defaults to bin/noslang.p "
"if specified without file or stdout if not specified"
)
parser.add_argument(
'-v', '--verbose', action='store_true',
help="be verbose"
)
args = parser.parse_args()
filename = handle_filename(args.output)
noslang_dict = scraper(args.verbose)
serialize(filename, noslang_dict, args.verbose)
|
|
6e658a93b9e91b30f0902f741b2f90d9ecc18021
|
numba/exttypes/validators.py
|
numba/exttypes/validators.py
|
# -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
import types
import warnings
import inspect
import numba
from numba import *
from numba import error
from numba import typesystem
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance Validators
#------------------------------------------------------------------------
class InheritanceValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type, base_ext_type):
"""
Validate an extension type with its parents.
"""
class AttributeValidator(object):
def validate(self, ext_type):
attr_prefix = utils.get_attributes_type(base).is_prefix(struct_type)
if not attr_prefix or not method_prefix:
raise error.NumbaError(
"Multiple incompatible base classes found: "
"%s and %s" % (base, bases[-1]))
|
Add extension types validator module
|
Add extension types validator module
|
Python
|
bsd-2-clause
|
GaZ3ll3/numba,gdementen/numba,stuartarchibald/numba,cpcloud/numba,numba/numba,pitrou/numba,sklam/numba,pitrou/numba,pitrou/numba,ssarangi/numba,shiquanwang/numba,GaZ3ll3/numba,sklam/numba,numba/numba,ssarangi/numba,gdementen/numba,jriehl/numba,seibert/numba,GaZ3ll3/numba,IntelLabs/numba,stuartarchibald/numba,cpcloud/numba,stonebig/numba,pitrou/numba,gdementen/numba,ssarangi/numba,stuartarchibald/numba,stonebig/numba,gmarkall/numba,gdementen/numba,gdementen/numba,pombredanne/numba,seibert/numba,IntelLabs/numba,stefanseefeld/numba,numba/numba,jriehl/numba,stefanseefeld/numba,pitrou/numba,stefanseefeld/numba,stonebig/numba,seibert/numba,ssarangi/numba,jriehl/numba,stefanseefeld/numba,stefanseefeld/numba,GaZ3ll3/numba,jriehl/numba,stonebig/numba,stonebig/numba,cpcloud/numba,gmarkall/numba,pombredanne/numba,pombredanne/numba,pombredanne/numba,IntelLabs/numba,gmarkall/numba,GaZ3ll3/numba,shiquanwang/numba,IntelLabs/numba,cpcloud/numba,numba/numba,IntelLabs/numba,jriehl/numba,sklam/numba,pombredanne/numba,sklam/numba,seibert/numba,stuartarchibald/numba,shiquanwang/numba,sklam/numba,stuartarchibald/numba,gmarkall/numba,seibert/numba,cpcloud/numba,ssarangi/numba,numba/numba,gmarkall/numba
|
Add extension types validator module
|
# -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
import types
import warnings
import inspect
import numba
from numba import *
from numba import error
from numba import typesystem
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance Validators
#------------------------------------------------------------------------
class InheritanceValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type, base_ext_type):
"""
Validate an extension type with its parents.
"""
class AttributeValidator(object):
def validate(self, ext_type):
attr_prefix = utils.get_attributes_type(base).is_prefix(struct_type)
if not attr_prefix or not method_prefix:
raise error.NumbaError(
"Multiple incompatible base classes found: "
"%s and %s" % (base, bases[-1]))
|
<commit_before><commit_msg>Add extension types validator module<commit_after>
|
# -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
import types
import warnings
import inspect
import numba
from numba import *
from numba import error
from numba import typesystem
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance Validators
#------------------------------------------------------------------------
class InheritanceValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type, base_ext_type):
"""
Validate an extension type with its parents.
"""
class AttributeValidator(object):
def validate(self, ext_type):
attr_prefix = utils.get_attributes_type(base).is_prefix(struct_type)
if not attr_prefix or not method_prefix:
raise error.NumbaError(
"Multiple incompatible base classes found: "
"%s and %s" % (base, bases[-1]))
|
Add extension types validator module# -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
import types
import warnings
import inspect
import numba
from numba import *
from numba import error
from numba import typesystem
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance Validators
#------------------------------------------------------------------------
class InheritanceValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type, base_ext_type):
"""
Validate an extension type with its parents.
"""
class AttributeValidator(object):
def validate(self, ext_type):
attr_prefix = utils.get_attributes_type(base).is_prefix(struct_type)
if not attr_prefix or not method_prefix:
raise error.NumbaError(
"Multiple incompatible base classes found: "
"%s and %s" % (base, bases[-1]))
|
<commit_before><commit_msg>Add extension types validator module<commit_after># -*- coding: utf-8 -*-
"""
Validate method signatures and inheritance compatiblity.
"""
import types
import warnings
import inspect
import numba
from numba import *
from numba import error
from numba import typesystem
from numba.minivect import minitypes
#------------------------------------------------------------------------
# Method Validators
#------------------------------------------------------------------------
class MethodValidator(object):
"Interface for method validators"
def validate(self, method, ext_type):
"""
Validate a Method. Raise an exception for user typing errors.
"""
class ArgcountMethodValidator(MethodValidator):
"""
Validate a signature against the number of arguments the function expects.
"""
def validate(self, method, ext_type):
"""
Validate a signature (which is None if not declared by the user)
for a method.
"""
if method.signature is None:
return
nargs = method.py_func.__code__.co_argcount - 1 + method.is_static
if len(method.signature.args) != nargs:
raise error.NumbaError(
"Expected %d argument types in function "
"%s (don't include 'self')" % (nargs, method.name))
class InitMethodValidator(MethodValidator):
"""
Validate the init method of extension classes.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and (method.is_class or method.is_static):
raise error.NumbaError("__init__ method should not be a class- "
"or staticmethod")
class JitInitMethodValidator(MethodValidator):
"""
Validate the init method for jit functions. Issue a warning when the
signature is omitted.
"""
def validate(self, method, ext_type):
if method.name == '__init__' and method.signature is None:
self.check_init_args(method, ext_type)
def check_init_args(self, method, ext_type):
if inspect.getargspec(method.py_func).args:
warnings.warn(
"Constructor for class '%s' has no signature, "
"assuming arguments have type 'object'" %
ext_type.py_class.__name__)
jit_validators = [ArgcountMethodValidator(), InitMethodValidator(), JitInitMethodValidator()]
autojit_validators = [ArgcountMethodValidator(), InitMethodValidator()]
#------------------------------------------------------------------------
# Inheritance Validators
#------------------------------------------------------------------------
class InheritanceValidator(object):
"""
Interface for validators that check for compatible inheritance trees.
"""
def validate(self, ext_type, base_ext_type):
"""
Validate an extension type with its parents.
"""
class AttributeValidator(object):
def validate(self, ext_type):
attr_prefix = utils.get_attributes_type(base).is_prefix(struct_type)
if not attr_prefix or not method_prefix:
raise error.NumbaError(
"Multiple incompatible base classes found: "
"%s and %s" % (base, bases[-1]))
|
|
cd992f1fbe4d7469cdc3d9a88863c61157cf90cc
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=371592)
self.Fail('Pixel.CSS3DBlueBox', bug=371592)
self.Fail('Pixel.WebGLGreenTriangle', bug=371592)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
Remove failing expectations for pixel tests.
|
Remove failing expectations for pixel tests.
R=kbr@chromium.org
BUG=252046, 371592
NOTRY=true
Review URL: https://codereview.chromium.org/282953002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270785 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,markYoungH/chromium.src,ltilve/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,dednal/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,Just-D/chromium-1,littlstar/chromium.src,littlstar/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,Chilledheart/chromium,dednal/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,jaruba/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,dednal/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,M4sse/chromium.src,dednal/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,markYoungH/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,ltilve/chromium,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,Fireblend/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,ltilve/chromium,littlstar/chromium.src,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,dushu1203/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=371592)
self.Fail('Pixel.CSS3DBlueBox', bug=371592)
self.Fail('Pixel.WebGLGreenTriangle', bug=371592)
pass
Remove failing expectations for pixel tests.
R=kbr@chromium.org
BUG=252046, 371592
NOTRY=true
Review URL: https://codereview.chromium.org/282953002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270785 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=371592)
self.Fail('Pixel.CSS3DBlueBox', bug=371592)
self.Fail('Pixel.WebGLGreenTriangle', bug=371592)
pass
<commit_msg>Remove failing expectations for pixel tests.
R=kbr@chromium.org
BUG=252046, 371592
NOTRY=true
Review URL: https://codereview.chromium.org/282953002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270785 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=371592)
self.Fail('Pixel.CSS3DBlueBox', bug=371592)
self.Fail('Pixel.WebGLGreenTriangle', bug=371592)
pass
Remove failing expectations for pixel tests.
R=kbr@chromium.org
BUG=252046, 371592
NOTRY=true
Review URL: https://codereview.chromium.org/282953002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270785 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=371592)
self.Fail('Pixel.CSS3DBlueBox', bug=371592)
self.Fail('Pixel.WebGLGreenTriangle', bug=371592)
pass
<commit_msg>Remove failing expectations for pixel tests.
R=kbr@chromium.org
BUG=252046, 371592
NOTRY=true
Review URL: https://codereview.chromium.org/282953002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@270785 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
class PixelExpectations(test_expectations.TestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
5b4fdc751eecf9248a075535a86a4d40a78e737e
|
py/find-all-numbers-disappeared-in-an-array.py
|
py/find-all-numbers-disappeared-in-an-array.py
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, v in enumerate(nums, 1):
while v != i:
a, b = i - 1, v - 1
if nums[a] != nums[b]:
nums[a], nums[b] = nums[b], nums[a]
else:
break
v = nums[i - 1]
ans = []
for i, v in enumerate(nums, 1):
if v != i:
ans.append(i)
return ans
|
Add py solution for 448. Find All Numbers Disappeared in an Array
|
Add py solution for 448. Find All Numbers Disappeared in an Array
448. Find All Numbers Disappeared in an Array: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 448. Find All Numbers Disappeared in an Array
448. Find All Numbers Disappeared in an Array: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, v in enumerate(nums, 1):
while v != i:
a, b = i - 1, v - 1
if nums[a] != nums[b]:
nums[a], nums[b] = nums[b], nums[a]
else:
break
v = nums[i - 1]
ans = []
for i, v in enumerate(nums, 1):
if v != i:
ans.append(i)
return ans
|
<commit_before><commit_msg>Add py solution for 448. Find All Numbers Disappeared in an Array
448. Find All Numbers Disappeared in an Array: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/<commit_after>
|
class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, v in enumerate(nums, 1):
while v != i:
a, b = i - 1, v - 1
if nums[a] != nums[b]:
nums[a], nums[b] = nums[b], nums[a]
else:
break
v = nums[i - 1]
ans = []
for i, v in enumerate(nums, 1):
if v != i:
ans.append(i)
return ans
|
Add py solution for 448. Find All Numbers Disappeared in an Array
448. Find All Numbers Disappeared in an Array: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, v in enumerate(nums, 1):
while v != i:
a, b = i - 1, v - 1
if nums[a] != nums[b]:
nums[a], nums[b] = nums[b], nums[a]
else:
break
v = nums[i - 1]
ans = []
for i, v in enumerate(nums, 1):
if v != i:
ans.append(i)
return ans
|
<commit_before><commit_msg>Add py solution for 448. Find All Numbers Disappeared in an Array
448. Find All Numbers Disappeared in an Array: https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/<commit_after>class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, v in enumerate(nums, 1):
while v != i:
a, b = i - 1, v - 1
if nums[a] != nums[b]:
nums[a], nums[b] = nums[b], nums[a]
else:
break
v = nums[i - 1]
ans = []
for i, v in enumerate(nums, 1):
if v != i:
ans.append(i)
return ans
|
|
5384fff5ceaa694109b6a6efa790877aabd5be7e
|
spacy/tests/regression/test_issue1305.py
|
spacy/tests/regression/test_issue1305.py
|
import pytest
@pytest.mark.models('en')
def test_issue1305(EN):
'''Test lemmatization of English VBZ'''
assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work'])
doc = EN(u'This app works well')
assert doc[2].lemma_ == 'work'
|
Add test for 1305: Incorrect lemmatization of VBZ for English
|
Add test for 1305: Incorrect lemmatization of VBZ for English
|
Python
|
mit
|
honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy
|
Add test for 1305: Incorrect lemmatization of VBZ for English
|
import pytest
@pytest.mark.models('en')
def test_issue1305(EN):
'''Test lemmatization of English VBZ'''
assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work'])
doc = EN(u'This app works well')
assert doc[2].lemma_ == 'work'
|
<commit_before><commit_msg>Add test for 1305: Incorrect lemmatization of VBZ for English<commit_after>
|
import pytest
@pytest.mark.models('en')
def test_issue1305(EN):
'''Test lemmatization of English VBZ'''
assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work'])
doc = EN(u'This app works well')
assert doc[2].lemma_ == 'work'
|
Add test for 1305: Incorrect lemmatization of VBZ for Englishimport pytest
@pytest.mark.models('en')
def test_issue1305(EN):
'''Test lemmatization of English VBZ'''
assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work'])
doc = EN(u'This app works well')
assert doc[2].lemma_ == 'work'
|
<commit_before><commit_msg>Add test for 1305: Incorrect lemmatization of VBZ for English<commit_after>import pytest
@pytest.mark.models('en')
def test_issue1305(EN):
'''Test lemmatization of English VBZ'''
assert EN.vocab.morphology.lemmatizer('works', 'verb') == set(['work'])
doc = EN(u'This app works well')
assert doc[2].lemma_ == 'work'
|
|
5a27efc55634efb52efe238488c6078ba397b2dc
|
ereuse_devicehub/scripts/updates/re_materialize_events_in_devices.py
|
ereuse_devicehub/scripts/updates/re_materialize_events_in_devices.py
|
import pymongo
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.hooks import MaterializeEvents
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
from ereuse_devicehub.utils import Naming
class ReMaterializeEventsInDevices(Update):
"""
Re-computes the *events* field in the devices. Note that events usually materialize *components* in devices and
*parent* in components, which are **not rematerialized here**.
"""
def execute(self, database):
DeviceDomain.update_many_raw({}, {'$set': {'events': []}})
for event in DeviceEventDomain.get({'$query': {}, '$orderby': {'_created': pymongo.ASCENDING}}):
MaterializeEvents.materialize_events(Naming.resource(event['@type']), [event])
|
Add ReMaterializeEventsInDevices to use in update.py
|
Add ReMaterializeEventsInDevices to use in update.py
|
Python
|
agpl-3.0
|
eReuse/DeviceHub,eReuse/DeviceHub
|
Add ReMaterializeEventsInDevices to use in update.py
|
import pymongo
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.hooks import MaterializeEvents
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
from ereuse_devicehub.utils import Naming
class ReMaterializeEventsInDevices(Update):
"""
Re-computes the *events* field in the devices. Note that events usually materialize *components* in devices and
*parent* in components, which are **not rematerialized here**.
"""
def execute(self, database):
DeviceDomain.update_many_raw({}, {'$set': {'events': []}})
for event in DeviceEventDomain.get({'$query': {}, '$orderby': {'_created': pymongo.ASCENDING}}):
MaterializeEvents.materialize_events(Naming.resource(event['@type']), [event])
|
<commit_before><commit_msg>Add ReMaterializeEventsInDevices to use in update.py<commit_after>
|
import pymongo
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.hooks import MaterializeEvents
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
from ereuse_devicehub.utils import Naming
class ReMaterializeEventsInDevices(Update):
"""
Re-computes the *events* field in the devices. Note that events usually materialize *components* in devices and
*parent* in components, which are **not rematerialized here**.
"""
def execute(self, database):
DeviceDomain.update_many_raw({}, {'$set': {'events': []}})
for event in DeviceEventDomain.get({'$query': {}, '$orderby': {'_created': pymongo.ASCENDING}}):
MaterializeEvents.materialize_events(Naming.resource(event['@type']), [event])
|
Add ReMaterializeEventsInDevices to use in update.pyimport pymongo
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.hooks import MaterializeEvents
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
from ereuse_devicehub.utils import Naming
class ReMaterializeEventsInDevices(Update):
"""
Re-computes the *events* field in the devices. Note that events usually materialize *components* in devices and
*parent* in components, which are **not rematerialized here**.
"""
def execute(self, database):
DeviceDomain.update_many_raw({}, {'$set': {'events': []}})
for event in DeviceEventDomain.get({'$query': {}, '$orderby': {'_created': pymongo.ASCENDING}}):
MaterializeEvents.materialize_events(Naming.resource(event['@type']), [event])
|
<commit_before><commit_msg>Add ReMaterializeEventsInDevices to use in update.py<commit_after>import pymongo
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.device.hooks import MaterializeEvents
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
from ereuse_devicehub.utils import Naming
class ReMaterializeEventsInDevices(Update):
"""
Re-computes the *events* field in the devices. Note that events usually materialize *components* in devices and
*parent* in components, which are **not rematerialized here**.
"""
def execute(self, database):
DeviceDomain.update_many_raw({}, {'$set': {'events': []}})
for event in DeviceEventDomain.get({'$query': {}, '$orderby': {'_created': pymongo.ASCENDING}}):
MaterializeEvents.materialize_events(Naming.resource(event['@type']), [event])
|
|
833a61b13dbd10bc3c3016fbd1874ecd98cc7753
|
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
|
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
|
Create default conatct for research methods
|
Create default conatct for research methods
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Create default conatct for research methods
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
|
<commit_before><commit_msg>Create default conatct for research methods<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
|
Create default conatct for research methods# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
|
<commit_before><commit_msg>Create default conatct for research methods<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
def create_default_contact_for_research_methods(apps, schema_editor):
ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod")
for value, name in RESEARCH_CONTACT_VIA:
ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save()
def rollback_default_contact_for_research_methods(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0021_auto_20190515_1042")]
operations = [
migrations.RunPython(
create_default_contact_for_research_methods, rollback_default_contact_for_research_methods
)
]
|
|
6d7999a54745fc2d0451b8cf6eff7a87162ababa
|
tools/dmqmc/finite_temp_analysis.py
|
tools/dmqmc/finite_temp_analysis.py
|
#!/usr/bin/python
'''finite_temp_analysis.py [options] file_1 file_2 ... file_N
Analyse the output of a HANDE DMQMC calculation by averaging
temperature-dependent data across beta loops.'''
import pandas as pd
from os import path
import sys
sys.path.append(path.join(path.abspath(path.dirname(sys.argv[0])), 'pyblock'))
import pyblock
import pyhande
import optparse
def run_dmqmc_analysis(estimates):
'''Perform analysis of DMQMC data from a HANDE calculation.
Parameters
----------
estimates : :class:`pandas.DataFrame`
All of the estimates from all beta loops, whcich are to be combined and analysed.
Returns
-------
None.
'''
means = estimates.mean(level=2)
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
filenames : list of strings
list of QMC output files
'''
parser = optparse.OptionParser(usage = __doc__)
(options, filenames) = parser.parse_args(args)
if not filenames:
parser.print_help()
sys.exit(1)
return (filenames)
def main(args):
'''Run data analysis on finite-temperature HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
(files) = parse_args(args)
(metadata, data) = pyhande.extract.extract_data_sets(files)
data.set_index('iterations', inplace=True, append=True)
estimates = data.loc[:,'Shift':'# H psips']
run_dmqmc_analysis(estimates)
if __name__ == '__main__':
main(sys.argv[1:])
|
Work begun on replacing old DMQMC script with a pyhande-based script.
|
Work begun on replacing old DMQMC script with a pyhande-based script.
|
Python
|
lgpl-2.1
|
hande-qmc/hande,ruthfranklin/hande,hande-qmc/hande,hande-qmc/hande,hande-qmc/hande,hande-qmc/hande
|
Work begun on replacing old DMQMC script with a pyhande-based script.
|
#!/usr/bin/python
'''finite_temp_analysis.py [options] file_1 file_2 ... file_N
Analyse the output of a HANDE DMQMC calculation by averaging
temperature-dependent data across beta loops.'''
import pandas as pd
from os import path
import sys
sys.path.append(path.join(path.abspath(path.dirname(sys.argv[0])), 'pyblock'))
import pyblock
import pyhande
import optparse
def run_dmqmc_analysis(estimates):
'''Perform analysis of DMQMC data from a HANDE calculation.
Parameters
----------
estimates : :class:`pandas.DataFrame`
All of the estimates from all beta loops, whcich are to be combined and analysed.
Returns
-------
None.
'''
means = estimates.mean(level=2)
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
filenames : list of strings
list of QMC output files
'''
parser = optparse.OptionParser(usage = __doc__)
(options, filenames) = parser.parse_args(args)
if not filenames:
parser.print_help()
sys.exit(1)
return (filenames)
def main(args):
'''Run data analysis on finite-temperature HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
(files) = parse_args(args)
(metadata, data) = pyhande.extract.extract_data_sets(files)
data.set_index('iterations', inplace=True, append=True)
estimates = data.loc[:,'Shift':'# H psips']
run_dmqmc_analysis(estimates)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Work begun on replacing old DMQMC script with a pyhande-based script.<commit_after>
|
#!/usr/bin/python
'''finite_temp_analysis.py [options] file_1 file_2 ... file_N
Analyse the output of a HANDE DMQMC calculation by averaging
temperature-dependent data across beta loops.'''
import pandas as pd
from os import path
import sys
sys.path.append(path.join(path.abspath(path.dirname(sys.argv[0])), 'pyblock'))
import pyblock
import pyhande
import optparse
def run_dmqmc_analysis(estimates):
'''Perform analysis of DMQMC data from a HANDE calculation.
Parameters
----------
estimates : :class:`pandas.DataFrame`
All of the estimates from all beta loops, whcich are to be combined and analysed.
Returns
-------
None.
'''
means = estimates.mean(level=2)
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
filenames : list of strings
list of QMC output files
'''
parser = optparse.OptionParser(usage = __doc__)
(options, filenames) = parser.parse_args(args)
if not filenames:
parser.print_help()
sys.exit(1)
return (filenames)
def main(args):
'''Run data analysis on finite-temperature HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
(files) = parse_args(args)
(metadata, data) = pyhande.extract.extract_data_sets(files)
data.set_index('iterations', inplace=True, append=True)
estimates = data.loc[:,'Shift':'# H psips']
run_dmqmc_analysis(estimates)
if __name__ == '__main__':
main(sys.argv[1:])
|
Work begun on replacing old DMQMC script with a pyhande-based script.#!/usr/bin/python
'''finite_temp_analysis.py [options] file_1 file_2 ... file_N
Analyse the output of a HANDE DMQMC calculation by averaging
temperature-dependent data across beta loops.'''
import pandas as pd
from os import path
import sys
sys.path.append(path.join(path.abspath(path.dirname(sys.argv[0])), 'pyblock'))
import pyblock
import pyhande
import optparse
def run_dmqmc_analysis(estimates):
'''Perform analysis of DMQMC data from a HANDE calculation.
Parameters
----------
estimates : :class:`pandas.DataFrame`
All of the estimates from all beta loops, whcich are to be combined and analysed.
Returns
-------
None.
'''
means = estimates.mean(level=2)
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
filenames : list of strings
list of QMC output files
'''
parser = optparse.OptionParser(usage = __doc__)
(options, filenames) = parser.parse_args(args)
if not filenames:
parser.print_help()
sys.exit(1)
return (filenames)
def main(args):
'''Run data analysis on finite-temperature HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
(files) = parse_args(args)
(metadata, data) = pyhande.extract.extract_data_sets(files)
data.set_index('iterations', inplace=True, append=True)
estimates = data.loc[:,'Shift':'# H psips']
run_dmqmc_analysis(estimates)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Work begun on replacing old DMQMC script with a pyhande-based script.<commit_after>#!/usr/bin/python
'''finite_temp_analysis.py [options] file_1 file_2 ... file_N
Analyse the output of a HANDE DMQMC calculation by averaging
temperature-dependent data across beta loops.'''
import pandas as pd
from os import path
import sys
sys.path.append(path.join(path.abspath(path.dirname(sys.argv[0])), 'pyblock'))
import pyblock
import pyhande
import optparse
def run_dmqmc_analysis(estimates):
'''Perform analysis of DMQMC data from a HANDE calculation.
Parameters
----------
estimates : :class:`pandas.DataFrame`
All of the estimates from all beta loops, whcich are to be combined and analysed.
Returns
-------
None.
'''
means = estimates.mean(level=2)
def parse_args(args):
'''Parse command-line arguments.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
filenames : list of strings
list of QMC output files
'''
parser = optparse.OptionParser(usage = __doc__)
(options, filenames) = parser.parse_args(args)
if not filenames:
parser.print_help()
sys.exit(1)
return (filenames)
def main(args):
'''Run data analysis on finite-temperature HANDE output.
Parameters
----------
args : list of strings
command-line arguments.
Returns
-------
None.
'''
(files) = parse_args(args)
(metadata, data) = pyhande.extract.extract_data_sets(files)
data.set_index('iterations', inplace=True, append=True)
estimates = data.loc[:,'Shift':'# H psips']
run_dmqmc_analysis(estimates)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
b7c84f47dd261e8270f2f9d4a37aacac316516a4
|
CodeFights/knapsackLight.py
|
CodeFights/knapsackLight.py
|
#!/usr/local/bin/python
# Code Fights Knapsack Problem
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
else:
return max([v for v, w in zip((value1, value2), (weight1, weight2))
if w <= maxW] + [0])
def main():
tests = [
[]
]
for t in tests:
res = knapsackLight(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: knapsackLight({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: knapsackLight({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights knapsack light problem
|
Set up Code Fights knapsack light problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights knapsack light problem
|
#!/usr/local/bin/python
# Code Fights Knapsack Problem
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
else:
return max([v for v, w in zip((value1, value2), (weight1, weight2))
if w <= maxW] + [0])
def main():
tests = [
[]
]
for t in tests:
res = knapsackLight(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: knapsackLight({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: knapsackLight({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights knapsack light problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Knapsack Problem
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
else:
return max([v for v, w in zip((value1, value2), (weight1, weight2))
if w <= maxW] + [0])
def main():
tests = [
[]
]
for t in tests:
res = knapsackLight(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: knapsackLight({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: knapsackLight({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights knapsack light problem#!/usr/local/bin/python
# Code Fights Knapsack Problem
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
else:
return max([v for v, w in zip((value1, value2), (weight1, weight2))
if w <= maxW] + [0])
def main():
tests = [
[]
]
for t in tests:
res = knapsackLight(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: knapsackLight({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: knapsackLight({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights knapsack light problem<commit_after>#!/usr/local/bin/python
# Code Fights Knapsack Problem
def knapsackLight(value1, weight1, value2, weight2, maxW):
if weight1 + weight2 <= maxW:
return value1 + value2
else:
return max([v for v, w in zip((value1, value2), (weight1, weight2))
if w <= maxW] + [0])
def main():
tests = [
[]
]
for t in tests:
res = knapsackLight(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: knapsackLight({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: knapsackLight({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
|
d190a376442dd5e9516b7bef802369a5fe318f03
|
find_primes.py
|
find_primes.py
|
#!/usr/bin/env python2
def find_primes(limit):
primes = []
for candidate in range(2, limit + 1):
candidate_ok = True
for divisor in range(2, candidate):
if candidate % divisor == 0:
candidate_ok = False
break
if candidate_ok:
primes.append(candidate)
return primes
if __name__ == '__main__':
import sys
limit = int(sys.argv[1])
print find_primes(limit)
|
Implement the 'trial division' algorithm.
|
Implement the 'trial division' algorithm.
|
Python
|
mit
|
ipqb/bootcamp-primes-activity
|
Implement the 'trial division' algorithm.
|
#!/usr/bin/env python2
def find_primes(limit):
primes = []
for candidate in range(2, limit + 1):
candidate_ok = True
for divisor in range(2, candidate):
if candidate % divisor == 0:
candidate_ok = False
break
if candidate_ok:
primes.append(candidate)
return primes
if __name__ == '__main__':
import sys
limit = int(sys.argv[1])
print find_primes(limit)
|
<commit_before><commit_msg>Implement the 'trial division' algorithm.<commit_after>
|
#!/usr/bin/env python2
def find_primes(limit):
primes = []
for candidate in range(2, limit + 1):
candidate_ok = True
for divisor in range(2, candidate):
if candidate % divisor == 0:
candidate_ok = False
break
if candidate_ok:
primes.append(candidate)
return primes
if __name__ == '__main__':
import sys
limit = int(sys.argv[1])
print find_primes(limit)
|
Implement the 'trial division' algorithm.#!/usr/bin/env python2
def find_primes(limit):
primes = []
for candidate in range(2, limit + 1):
candidate_ok = True
for divisor in range(2, candidate):
if candidate % divisor == 0:
candidate_ok = False
break
if candidate_ok:
primes.append(candidate)
return primes
if __name__ == '__main__':
import sys
limit = int(sys.argv[1])
print find_primes(limit)
|
<commit_before><commit_msg>Implement the 'trial division' algorithm.<commit_after>#!/usr/bin/env python2
def find_primes(limit):
primes = []
for candidate in range(2, limit + 1):
candidate_ok = True
for divisor in range(2, candidate):
if candidate % divisor == 0:
candidate_ok = False
break
if candidate_ok:
primes.append(candidate)
return primes
if __name__ == '__main__':
import sys
limit = int(sys.argv[1])
print find_primes(limit)
|
|
2b880e0a06c41bb17f21fdcca979311a5a478958
|
problem_49.py
|
problem_49.py
|
from time import time
from problem_35 import is_prime
def is_list_of_permutations(ls):
permutations = set([''.join(set(str(num))) for num in ls])
return len(permutations) == 1
def main():
primes = [i for i in range(1000, 10000) if is_prime(i)]
primes_set = set(primes)
found_lists = []
while len(found_lists) < 2:
first_prime = primes[0]
primes.pop(0)
for p in primes:
prime_list = [first_prime]
diff = p - first_prime
if p + diff in primes_set:
prime_list += [p, p+diff]
if is_list_of_permutations(prime_list):
found_lists.append(prime_list)
print ''.join([str(i) for i in found_lists[1]])
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 49, prime permutations/same diff sequence
|
Add problem 49, prime permutations/same diff sequence
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 49, prime permutations/same diff sequence
|
from time import time
from problem_35 import is_prime
def is_list_of_permutations(ls):
permutations = set([''.join(set(str(num))) for num in ls])
return len(permutations) == 1
def main():
primes = [i for i in range(1000, 10000) if is_prime(i)]
primes_set = set(primes)
found_lists = []
while len(found_lists) < 2:
first_prime = primes[0]
primes.pop(0)
for p in primes:
prime_list = [first_prime]
diff = p - first_prime
if p + diff in primes_set:
prime_list += [p, p+diff]
if is_list_of_permutations(prime_list):
found_lists.append(prime_list)
print ''.join([str(i) for i in found_lists[1]])
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 49, prime permutations/same diff sequence<commit_after>
|
from time import time
from problem_35 import is_prime
def is_list_of_permutations(ls):
permutations = set([''.join(set(str(num))) for num in ls])
return len(permutations) == 1
def main():
primes = [i for i in range(1000, 10000) if is_prime(i)]
primes_set = set(primes)
found_lists = []
while len(found_lists) < 2:
first_prime = primes[0]
primes.pop(0)
for p in primes:
prime_list = [first_prime]
diff = p - first_prime
if p + diff in primes_set:
prime_list += [p, p+diff]
if is_list_of_permutations(prime_list):
found_lists.append(prime_list)
print ''.join([str(i) for i in found_lists[1]])
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 49, prime permutations/same diff sequencefrom time import time
from problem_35 import is_prime
def is_list_of_permutations(ls):
permutations = set([''.join(set(str(num))) for num in ls])
return len(permutations) == 1
def main():
primes = [i for i in range(1000, 10000) if is_prime(i)]
primes_set = set(primes)
found_lists = []
while len(found_lists) < 2:
first_prime = primes[0]
primes.pop(0)
for p in primes:
prime_list = [first_prime]
diff = p - first_prime
if p + diff in primes_set:
prime_list += [p, p+diff]
if is_list_of_permutations(prime_list):
found_lists.append(prime_list)
print ''.join([str(i) for i in found_lists[1]])
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 49, prime permutations/same diff sequence<commit_after>from time import time
from problem_35 import is_prime
def is_list_of_permutations(ls):
permutations = set([''.join(set(str(num))) for num in ls])
return len(permutations) == 1
def main():
primes = [i for i in range(1000, 10000) if is_prime(i)]
primes_set = set(primes)
found_lists = []
while len(found_lists) < 2:
first_prime = primes[0]
primes.pop(0)
for p in primes:
prime_list = [first_prime]
diff = p - first_prime
if p + diff in primes_set:
prime_list += [p, p+diff]
if is_list_of_permutations(prime_list):
found_lists.append(prime_list)
print ''.join([str(i) for i in found_lists[1]])
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
eb5ca1f54e5631c57a735044ca34063d72e6fc4d
|
ynr/apps/official_documents/management/commands/official_documents_check_hashes.py
|
ynr/apps/official_documents/management/commands/official_documents_check_hashes.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
import requests
import hashlib
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
class Command(BaseCommand):
help = "Check the hash of a document against the source"
fieldnames = [
'ballot_paper_id',
'Election name',
'Area name',
'Source URL',
'remote_hash',
'local_hash',
'status_code',
'notes',
]
url_info_cache = {}
def add_arguments(self, parser):
parser.add_argument(
'--election-date',
action='store',
required=True
)
def handle(self, *args, **options):
self.out_csv = BufferDictWriter(self.fieldnames)
self.out_csv.writeheader()
qs = OfficialDocument.objects.filter(
election__election_date=options['election_date'])
for document in qs:
self.check_doc(document)
self.stdout.write(self.out_csv.output)
def get_hash(self, sopn_file):
md5 = hashlib.md5()
md5.update(sopn_file)
return md5.hexdigest()
def check_doc(self, doc):
line = {
'ballot_paper_id': doc.post_election.ballot_paper_id,
'Election name': doc.post_election.election.name,
'Area name': doc.post_election.postextra.base.label,
'Source URL': doc.source_url,
}
cache = self.url_info_cache.get(doc.source_url, {})
try:
if not cache:
req = requests.get(doc.source_url, stream=True)
status_code = req.status_code
cache = {
'status_code': status_code,
}
if status_code != 200:
cache['notes'] = "Remote file missing!"
else:
cache['remote_hash'] = self.get_hash(req.raw.read())
cache['local_hash'] = self.get_hash(doc.uploaded_file.file.read())
if not cache['remote_hash'] == cache['local_hash']:
cache['notes'] = 'File hash mismatch!'
line.update(cache)
self.url_info_cache[doc.source_url] = cache
except requests.exceptions.RequestException as e:
cache['notes'] = e
self.out_csv.writerow(line)
|
Add management command for checking SOPN files after download
|
Add management command for checking SOPN files after download
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add management command for checking SOPN files after download
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
import requests
import hashlib
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
class Command(BaseCommand):
help = "Check the hash of a document against the source"
fieldnames = [
'ballot_paper_id',
'Election name',
'Area name',
'Source URL',
'remote_hash',
'local_hash',
'status_code',
'notes',
]
url_info_cache = {}
def add_arguments(self, parser):
parser.add_argument(
'--election-date',
action='store',
required=True
)
def handle(self, *args, **options):
self.out_csv = BufferDictWriter(self.fieldnames)
self.out_csv.writeheader()
qs = OfficialDocument.objects.filter(
election__election_date=options['election_date'])
for document in qs:
self.check_doc(document)
self.stdout.write(self.out_csv.output)
def get_hash(self, sopn_file):
md5 = hashlib.md5()
md5.update(sopn_file)
return md5.hexdigest()
def check_doc(self, doc):
line = {
'ballot_paper_id': doc.post_election.ballot_paper_id,
'Election name': doc.post_election.election.name,
'Area name': doc.post_election.postextra.base.label,
'Source URL': doc.source_url,
}
cache = self.url_info_cache.get(doc.source_url, {})
try:
if not cache:
req = requests.get(doc.source_url, stream=True)
status_code = req.status_code
cache = {
'status_code': status_code,
}
if status_code != 200:
cache['notes'] = "Remote file missing!"
else:
cache['remote_hash'] = self.get_hash(req.raw.read())
cache['local_hash'] = self.get_hash(doc.uploaded_file.file.read())
if not cache['remote_hash'] == cache['local_hash']:
cache['notes'] = 'File hash mismatch!'
line.update(cache)
self.url_info_cache[doc.source_url] = cache
except requests.exceptions.RequestException as e:
cache['notes'] = e
self.out_csv.writerow(line)
|
<commit_before><commit_msg>Add management command for checking SOPN files after download<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
import requests
import hashlib
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
class Command(BaseCommand):
help = "Check the hash of a document against the source"
fieldnames = [
'ballot_paper_id',
'Election name',
'Area name',
'Source URL',
'remote_hash',
'local_hash',
'status_code',
'notes',
]
url_info_cache = {}
def add_arguments(self, parser):
parser.add_argument(
'--election-date',
action='store',
required=True
)
def handle(self, *args, **options):
self.out_csv = BufferDictWriter(self.fieldnames)
self.out_csv.writeheader()
qs = OfficialDocument.objects.filter(
election__election_date=options['election_date'])
for document in qs:
self.check_doc(document)
self.stdout.write(self.out_csv.output)
def get_hash(self, sopn_file):
md5 = hashlib.md5()
md5.update(sopn_file)
return md5.hexdigest()
def check_doc(self, doc):
line = {
'ballot_paper_id': doc.post_election.ballot_paper_id,
'Election name': doc.post_election.election.name,
'Area name': doc.post_election.postextra.base.label,
'Source URL': doc.source_url,
}
cache = self.url_info_cache.get(doc.source_url, {})
try:
if not cache:
req = requests.get(doc.source_url, stream=True)
status_code = req.status_code
cache = {
'status_code': status_code,
}
if status_code != 200:
cache['notes'] = "Remote file missing!"
else:
cache['remote_hash'] = self.get_hash(req.raw.read())
cache['local_hash'] = self.get_hash(doc.uploaded_file.file.read())
if not cache['remote_hash'] == cache['local_hash']:
cache['notes'] = 'File hash mismatch!'
line.update(cache)
self.url_info_cache[doc.source_url] = cache
except requests.exceptions.RequestException as e:
cache['notes'] = e
self.out_csv.writerow(line)
|
Add management command for checking SOPN files after download# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
import requests
import hashlib
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
class Command(BaseCommand):
help = "Check the hash of a document against the source"
fieldnames = [
'ballot_paper_id',
'Election name',
'Area name',
'Source URL',
'remote_hash',
'local_hash',
'status_code',
'notes',
]
url_info_cache = {}
def add_arguments(self, parser):
parser.add_argument(
'--election-date',
action='store',
required=True
)
def handle(self, *args, **options):
self.out_csv = BufferDictWriter(self.fieldnames)
self.out_csv.writeheader()
qs = OfficialDocument.objects.filter(
election__election_date=options['election_date'])
for document in qs:
self.check_doc(document)
self.stdout.write(self.out_csv.output)
def get_hash(self, sopn_file):
md5 = hashlib.md5()
md5.update(sopn_file)
return md5.hexdigest()
def check_doc(self, doc):
line = {
'ballot_paper_id': doc.post_election.ballot_paper_id,
'Election name': doc.post_election.election.name,
'Area name': doc.post_election.postextra.base.label,
'Source URL': doc.source_url,
}
cache = self.url_info_cache.get(doc.source_url, {})
try:
if not cache:
req = requests.get(doc.source_url, stream=True)
status_code = req.status_code
cache = {
'status_code': status_code,
}
if status_code != 200:
cache['notes'] = "Remote file missing!"
else:
cache['remote_hash'] = self.get_hash(req.raw.read())
cache['local_hash'] = self.get_hash(doc.uploaded_file.file.read())
if not cache['remote_hash'] == cache['local_hash']:
cache['notes'] = 'File hash mismatch!'
line.update(cache)
self.url_info_cache[doc.source_url] = cache
except requests.exceptions.RequestException as e:
cache['notes'] = e
self.out_csv.writerow(line)
|
<commit_before><commit_msg>Add management command for checking SOPN files after download<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from compat import BufferDictWriter
import requests
import hashlib
from django.core.management.base import BaseCommand
from official_documents.models import OfficialDocument
class Command(BaseCommand):
help = "Check the hash of a document against the source"
fieldnames = [
'ballot_paper_id',
'Election name',
'Area name',
'Source URL',
'remote_hash',
'local_hash',
'status_code',
'notes',
]
url_info_cache = {}
def add_arguments(self, parser):
parser.add_argument(
'--election-date',
action='store',
required=True
)
def handle(self, *args, **options):
self.out_csv = BufferDictWriter(self.fieldnames)
self.out_csv.writeheader()
qs = OfficialDocument.objects.filter(
election__election_date=options['election_date'])
for document in qs:
self.check_doc(document)
self.stdout.write(self.out_csv.output)
def get_hash(self, sopn_file):
md5 = hashlib.md5()
md5.update(sopn_file)
return md5.hexdigest()
def check_doc(self, doc):
line = {
'ballot_paper_id': doc.post_election.ballot_paper_id,
'Election name': doc.post_election.election.name,
'Area name': doc.post_election.postextra.base.label,
'Source URL': doc.source_url,
}
cache = self.url_info_cache.get(doc.source_url, {})
try:
if not cache:
req = requests.get(doc.source_url, stream=True)
status_code = req.status_code
cache = {
'status_code': status_code,
}
if status_code != 200:
cache['notes'] = "Remote file missing!"
else:
cache['remote_hash'] = self.get_hash(req.raw.read())
cache['local_hash'] = self.get_hash(doc.uploaded_file.file.read())
if not cache['remote_hash'] == cache['local_hash']:
cache['notes'] = 'File hash mismatch!'
line.update(cache)
self.url_info_cache[doc.source_url] = cache
except requests.exceptions.RequestException as e:
cache['notes'] = e
self.out_csv.writerow(line)
|
|
865eda47ddcec937ab22fa39503cebb11a754a69
|
examples/line_example.py
|
examples/line_example.py
|
# -*- coding: utf-8 -*-
'''Test of Folium polylines'''
import folium
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[ 42.3581 , -71.0636 ],
[ 42.82995815, -74.78991444],
[ 43.17929819, -78.56603306],
[ 43.40320216, -82.37774519],
[ 43.49975489, -86.20965845],
[ 43.46811941, -90.04569087],
[ 43.30857071, -93.86961818],
[ 43.02248456, -97.66563267],
[ 42.61228259, -101.41886832],
[ 42.08133868, -105.11585198],
[ 41.4338549 , -108.74485069],
[ 40.67471747, -112.29609954],
[ 39.8093434 , -115.76190821],
[ 38.84352776, -119.13665678],
[ 37.7833 , -122.4167 ]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
m.line(coordinates, line_color='#FF0000', line_weight=5)
m.create_map(path='line_example.html')
|
Add simple example demonstrating line()
|
Add simple example demonstrating line()
|
Python
|
mit
|
zhoujh30/folium,koldunovn/folium,shankari/folium,BibMartin/folium,QuLogic/folium,kod3r/folium,koldunovn/folium,talespaiva/folium,koldunovn/folium,talespaiva/folium,talespaiva/folium,python-visualization/folium,LACMTA/folium,QuLogic/folium,UDST/folium,andrewgiessel/folium,xujun10110/folium,themiurgo/folium,BibMartin/folium,kod3r/folium,UDST/folium,andrewgiessel/folium,ocefpaf/folium,xujun10110/folium,droythorne/folium,themiurgo/folium,themiurgo/folium,zhoujh30/folium,LACMTA/folium,droythorne/folium,ozak/folium,talespaiva/folium,andrewgiessel/folium,ozak/folium,BibMartin/folium,python-visualization/folium,LACMTA/folium,xujun10110/folium,ozak/folium,zhoujh30/folium,droythorne/folium,ocefpaf/folium,shankari/folium,shankari/folium,QuLogic/folium,kod3r/folium
|
Add simple example demonstrating line()
|
# -*- coding: utf-8 -*-
'''Test of Folium polylines'''
import folium
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[ 42.3581 , -71.0636 ],
[ 42.82995815, -74.78991444],
[ 43.17929819, -78.56603306],
[ 43.40320216, -82.37774519],
[ 43.49975489, -86.20965845],
[ 43.46811941, -90.04569087],
[ 43.30857071, -93.86961818],
[ 43.02248456, -97.66563267],
[ 42.61228259, -101.41886832],
[ 42.08133868, -105.11585198],
[ 41.4338549 , -108.74485069],
[ 40.67471747, -112.29609954],
[ 39.8093434 , -115.76190821],
[ 38.84352776, -119.13665678],
[ 37.7833 , -122.4167 ]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
m.line(coordinates, line_color='#FF0000', line_weight=5)
m.create_map(path='line_example.html')
|
<commit_before><commit_msg>Add simple example demonstrating line()<commit_after>
|
# -*- coding: utf-8 -*-
'''Test of Folium polylines'''
import folium
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[ 42.3581 , -71.0636 ],
[ 42.82995815, -74.78991444],
[ 43.17929819, -78.56603306],
[ 43.40320216, -82.37774519],
[ 43.49975489, -86.20965845],
[ 43.46811941, -90.04569087],
[ 43.30857071, -93.86961818],
[ 43.02248456, -97.66563267],
[ 42.61228259, -101.41886832],
[ 42.08133868, -105.11585198],
[ 41.4338549 , -108.74485069],
[ 40.67471747, -112.29609954],
[ 39.8093434 , -115.76190821],
[ 38.84352776, -119.13665678],
[ 37.7833 , -122.4167 ]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
m.line(coordinates, line_color='#FF0000', line_weight=5)
m.create_map(path='line_example.html')
|
Add simple example demonstrating line()# -*- coding: utf-8 -*-
'''Test of Folium polylines'''
import folium
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[ 42.3581 , -71.0636 ],
[ 42.82995815, -74.78991444],
[ 43.17929819, -78.56603306],
[ 43.40320216, -82.37774519],
[ 43.49975489, -86.20965845],
[ 43.46811941, -90.04569087],
[ 43.30857071, -93.86961818],
[ 43.02248456, -97.66563267],
[ 42.61228259, -101.41886832],
[ 42.08133868, -105.11585198],
[ 41.4338549 , -108.74485069],
[ 40.67471747, -112.29609954],
[ 39.8093434 , -115.76190821],
[ 38.84352776, -119.13665678],
[ 37.7833 , -122.4167 ]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
m.line(coordinates, line_color='#FF0000', line_weight=5)
m.create_map(path='line_example.html')
|
<commit_before><commit_msg>Add simple example demonstrating line()<commit_after># -*- coding: utf-8 -*-
'''Test of Folium polylines'''
import folium
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[ 42.3581 , -71.0636 ],
[ 42.82995815, -74.78991444],
[ 43.17929819, -78.56603306],
[ 43.40320216, -82.37774519],
[ 43.49975489, -86.20965845],
[ 43.46811941, -90.04569087],
[ 43.30857071, -93.86961818],
[ 43.02248456, -97.66563267],
[ 42.61228259, -101.41886832],
[ 42.08133868, -105.11585198],
[ 41.4338549 , -108.74485069],
[ 40.67471747, -112.29609954],
[ 39.8093434 , -115.76190821],
[ 38.84352776, -119.13665678],
[ 37.7833 , -122.4167 ]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
m.line(coordinates, line_color='#FF0000', line_weight=5)
m.create_map(path='line_example.html')
|
|
ac6bdbee50c4f5feb54d111f98a29d31099f7e7f
|
plugins/groups/migrations/0004_auto_20150508_1300.py
|
plugins/groups/migrations/0004_auto_20150508_1300.py
|
"""Move github urls into custom properties."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import json
def migrate_github_urls(apps, schema_editor):
"""Move github urls into custom properties."""
# First, for each event - if there are no group properties - add them
from happening.models import ConfigurationVariable
Event = apps.get_model("events", "Event")
contenttype = ContentType.objects.get_for_model(Event)
for event in Event.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=event.id,
key="group_properties")[0]
if not c.value:
c.value = json.dumps([{"type": "URLField", "name": "Github URL"}])
c.save()
# Then convert the github url for each group
Group = apps.get_model("groups", "Group")
contenttype = ContentType.objects.get_for_model(Group)
for group in Group.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=group.id,
key="custom_properties")[0]
if not c.value:
c.value = json.dumps({"github_url": group.github_url})
c.save()
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20150426_1231'),
]
operations = [
migrations.RunPython(migrate_github_urls),
]
|
Migrate Github URL to custom properties
|
Migrate Github URL to custom properties
|
Python
|
mit
|
happeninghq/happening,happeninghq/happening,jscott1989/happening,jscott1989/happening,jscott1989/happening,jscott1989/happening,happeninghq/happening,happeninghq/happening
|
Migrate Github URL to custom properties
|
"""Move github urls into custom properties."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import json
def migrate_github_urls(apps, schema_editor):
"""Move github urls into custom properties."""
# First, for each event - if there are no group properties - add them
from happening.models import ConfigurationVariable
Event = apps.get_model("events", "Event")
contenttype = ContentType.objects.get_for_model(Event)
for event in Event.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=event.id,
key="group_properties")[0]
if not c.value:
c.value = json.dumps([{"type": "URLField", "name": "Github URL"}])
c.save()
# Then convert the github url for each group
Group = apps.get_model("groups", "Group")
contenttype = ContentType.objects.get_for_model(Group)
for group in Group.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=group.id,
key="custom_properties")[0]
if not c.value:
c.value = json.dumps({"github_url": group.github_url})
c.save()
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20150426_1231'),
]
operations = [
migrations.RunPython(migrate_github_urls),
]
|
<commit_before><commit_msg>Migrate Github URL to custom properties<commit_after>
|
"""Move github urls into custom properties."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import json
def migrate_github_urls(apps, schema_editor):
"""Move github urls into custom properties."""
# First, for each event - if there are no group properties - add them
from happening.models import ConfigurationVariable
Event = apps.get_model("events", "Event")
contenttype = ContentType.objects.get_for_model(Event)
for event in Event.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=event.id,
key="group_properties")[0]
if not c.value:
c.value = json.dumps([{"type": "URLField", "name": "Github URL"}])
c.save()
# Then convert the github url for each group
Group = apps.get_model("groups", "Group")
contenttype = ContentType.objects.get_for_model(Group)
for group in Group.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=group.id,
key="custom_properties")[0]
if not c.value:
c.value = json.dumps({"github_url": group.github_url})
c.save()
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20150426_1231'),
]
operations = [
migrations.RunPython(migrate_github_urls),
]
|
Migrate Github URL to custom properties"""Move github urls into custom properties."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import json
def migrate_github_urls(apps, schema_editor):
"""Move github urls into custom properties."""
# First, for each event - if there are no group properties - add them
from happening.models import ConfigurationVariable
Event = apps.get_model("events", "Event")
contenttype = ContentType.objects.get_for_model(Event)
for event in Event.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=event.id,
key="group_properties")[0]
if not c.value:
c.value = json.dumps([{"type": "URLField", "name": "Github URL"}])
c.save()
# Then convert the github url for each group
Group = apps.get_model("groups", "Group")
contenttype = ContentType.objects.get_for_model(Group)
for group in Group.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=group.id,
key="custom_properties")[0]
if not c.value:
c.value = json.dumps({"github_url": group.github_url})
c.save()
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20150426_1231'),
]
operations = [
migrations.RunPython(migrate_github_urls),
]
|
<commit_before><commit_msg>Migrate Github URL to custom properties<commit_after>"""Move github urls into custom properties."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
import json
def migrate_github_urls(apps, schema_editor):
"""Move github urls into custom properties."""
# First, for each event - if there are no group properties - add them
from happening.models import ConfigurationVariable
Event = apps.get_model("events", "Event")
contenttype = ContentType.objects.get_for_model(Event)
for event in Event.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=event.id,
key="group_properties")[0]
if not c.value:
c.value = json.dumps([{"type": "URLField", "name": "Github URL"}])
c.save()
# Then convert the github url for each group
Group = apps.get_model("groups", "Group")
contenttype = ContentType.objects.get_for_model(Group)
for group in Group.objects.all():
c = ConfigurationVariable.objects.get_or_create(
content_type=contenttype,
object_id=group.id,
key="custom_properties")[0]
if not c.value:
c.value = json.dumps({"github_url": group.github_url})
c.save()
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20150426_1231'),
]
operations = [
migrations.RunPython(migrate_github_urls),
]
|
|
0cdd3213d0da5241b1a266db7d698d569bf10faa
|
bin/get-ec2-ip.py
|
bin/get-ec2-ip.py
|
#!/usr/bin/env /Users/alexrudy/.pyenv/versions/bitly-boto/bin/python
import boto3
import click
import re
import tempfile
from pathlib import Path
@click.command()
@click.option("--name", type=str, help="Name of the EC2 instance to query for.", required=True)
@click.option("--hostname", type=str, help="Hostname in the SSH Config.")
@click.option("--config", type=str, help="Path to the SSH Config to update.")
def main(name, hostname, config):
"""Find an EC2 box by name and print the IP address for it.
Optionally, update the `HOSTNAME` line in an SSH config
"""
ec2 = boto3.resource('ec2')
filters = [{'Name':'tag:Name', 'Values':[name]}]
instances = ec2.instances.filter(Filters=filters)
click.echo(f"Name: {name}")
try:
instance = next(iter(instances))
except StopIteration:
click.echo("No instance found...")
raise click.Abort()
click.echo(f"IP: {instance.public_ip_address}")
if config is not None and hostname is not None:
click.echo(f"Updating {config!s}")
update_config(config, hostname, instance.public_ip_address)
def update_config(config, hostname, ip_address):
"""Update the SSH Config with a new IP address"""
target = Path(config)
with tempfile.TemporaryDirectory() as tmpdirname:
target_backup = Path(tmpdirname) / target.name
with target_backup.open("w") as out_stream, target.open("r") as in_stream:
for line in iter_new_config(in_stream, hostname, ip_address):
out_stream.write(line)
target_backup.replace(target)
def iter_new_config(lines, target_host, new_address):
host = None
for line in lines:
m = re.match(r"\s*host(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m:
host = m.group(2)
m = re.match(r"\s*hostname(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m and host == target_host:
oldip = m.group(2)
line = line.replace(oldip, new_address)
yield line
if __name__ == '__main__':
main(auto_envvar_prefix='GET_EC2_IP')
|
Add a script to get an EC2 IP address and add to SSH config.
|
Add a script to get an EC2 IP address and add to SSH config.
|
Python
|
mit
|
alexrudy/dotfiles,alexrudy/dotfiles,alexrudy/dotfiles
|
Add a script to get an EC2 IP address and add to SSH config.
|
#!/usr/bin/env /Users/alexrudy/.pyenv/versions/bitly-boto/bin/python
import boto3
import click
import re
import tempfile
from pathlib import Path
@click.command()
@click.option("--name", type=str, help="Name of the EC2 instance to query for.", required=True)
@click.option("--hostname", type=str, help="Hostname in the SSH Config.")
@click.option("--config", type=str, help="Path to the SSH Config to update.")
def main(name, hostname, config):
"""Find an EC2 box by name and print the IP address for it.
Optionally, update the `HOSTNAME` line in an SSH config
"""
ec2 = boto3.resource('ec2')
filters = [{'Name':'tag:Name', 'Values':[name]}]
instances = ec2.instances.filter(Filters=filters)
click.echo(f"Name: {name}")
try:
instance = next(iter(instances))
except StopIteration:
click.echo("No instance found...")
raise click.Abort()
click.echo(f"IP: {instance.public_ip_address}")
if config is not None and hostname is not None:
click.echo(f"Updating {config!s}")
update_config(config, hostname, instance.public_ip_address)
def update_config(config, hostname, ip_address):
"""Update the SSH Config with a new IP address"""
target = Path(config)
with tempfile.TemporaryDirectory() as tmpdirname:
target_backup = Path(tmpdirname) / target.name
with target_backup.open("w") as out_stream, target.open("r") as in_stream:
for line in iter_new_config(in_stream, hostname, ip_address):
out_stream.write(line)
target_backup.replace(target)
def iter_new_config(lines, target_host, new_address):
host = None
for line in lines:
m = re.match(r"\s*host(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m:
host = m.group(2)
m = re.match(r"\s*hostname(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m and host == target_host:
oldip = m.group(2)
line = line.replace(oldip, new_address)
yield line
if __name__ == '__main__':
main(auto_envvar_prefix='GET_EC2_IP')
|
<commit_before><commit_msg>Add a script to get an EC2 IP address and add to SSH config.<commit_after>
|
#!/usr/bin/env /Users/alexrudy/.pyenv/versions/bitly-boto/bin/python
import boto3
import click
import re
import tempfile
from pathlib import Path
@click.command()
@click.option("--name", type=str, help="Name of the EC2 instance to query for.", required=True)
@click.option("--hostname", type=str, help="Hostname in the SSH Config.")
@click.option("--config", type=str, help="Path to the SSH Config to update.")
def main(name, hostname, config):
"""Find an EC2 box by name and print the IP address for it.
Optionally, update the `HOSTNAME` line in an SSH config
"""
ec2 = boto3.resource('ec2')
filters = [{'Name':'tag:Name', 'Values':[name]}]
instances = ec2.instances.filter(Filters=filters)
click.echo(f"Name: {name}")
try:
instance = next(iter(instances))
except StopIteration:
click.echo("No instance found...")
raise click.Abort()
click.echo(f"IP: {instance.public_ip_address}")
if config is not None and hostname is not None:
click.echo(f"Updating {config!s}")
update_config(config, hostname, instance.public_ip_address)
def update_config(config, hostname, ip_address):
"""Update the SSH Config with a new IP address"""
target = Path(config)
with tempfile.TemporaryDirectory() as tmpdirname:
target_backup = Path(tmpdirname) / target.name
with target_backup.open("w") as out_stream, target.open("r") as in_stream:
for line in iter_new_config(in_stream, hostname, ip_address):
out_stream.write(line)
target_backup.replace(target)
def iter_new_config(lines, target_host, new_address):
host = None
for line in lines:
m = re.match(r"\s*host(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m:
host = m.group(2)
m = re.match(r"\s*hostname(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m and host == target_host:
oldip = m.group(2)
line = line.replace(oldip, new_address)
yield line
if __name__ == '__main__':
main(auto_envvar_prefix='GET_EC2_IP')
|
Add a script to get an EC2 IP address and add to SSH config.#!/usr/bin/env /Users/alexrudy/.pyenv/versions/bitly-boto/bin/python
import boto3
import click
import re
import tempfile
from pathlib import Path
@click.command()
@click.option("--name", type=str, help="Name of the EC2 instance to query for.", required=True)
@click.option("--hostname", type=str, help="Hostname in the SSH Config.")
@click.option("--config", type=str, help="Path to the SSH Config to update.")
def main(name, hostname, config):
"""Find an EC2 box by name and print the IP address for it.
Optionally, update the `HOSTNAME` line in an SSH config
"""
ec2 = boto3.resource('ec2')
filters = [{'Name':'tag:Name', 'Values':[name]}]
instances = ec2.instances.filter(Filters=filters)
click.echo(f"Name: {name}")
try:
instance = next(iter(instances))
except StopIteration:
click.echo("No instance found...")
raise click.Abort()
click.echo(f"IP: {instance.public_ip_address}")
if config is not None and hostname is not None:
click.echo(f"Updating {config!s}")
update_config(config, hostname, instance.public_ip_address)
def update_config(config, hostname, ip_address):
"""Update the SSH Config with a new IP address"""
target = Path(config)
with tempfile.TemporaryDirectory() as tmpdirname:
target_backup = Path(tmpdirname) / target.name
with target_backup.open("w") as out_stream, target.open("r") as in_stream:
for line in iter_new_config(in_stream, hostname, ip_address):
out_stream.write(line)
target_backup.replace(target)
def iter_new_config(lines, target_host, new_address):
host = None
for line in lines:
m = re.match(r"\s*host(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m:
host = m.group(2)
m = re.match(r"\s*hostname(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m and host == target_host:
oldip = m.group(2)
line = line.replace(oldip, new_address)
yield line
if __name__ == '__main__':
main(auto_envvar_prefix='GET_EC2_IP')
|
<commit_before><commit_msg>Add a script to get an EC2 IP address and add to SSH config.<commit_after>#!/usr/bin/env /Users/alexrudy/.pyenv/versions/bitly-boto/bin/python
import boto3
import click
import re
import tempfile
from pathlib import Path
@click.command()
@click.option("--name", type=str, help="Name of the EC2 instance to query for.", required=True)
@click.option("--hostname", type=str, help="Hostname in the SSH Config.")
@click.option("--config", type=str, help="Path to the SSH Config to update.")
def main(name, hostname, config):
"""Find an EC2 box by name and print the IP address for it.
Optionally, update the `HOSTNAME` line in an SSH config
"""
ec2 = boto3.resource('ec2')
filters = [{'Name':'tag:Name', 'Values':[name]}]
instances = ec2.instances.filter(Filters=filters)
click.echo(f"Name: {name}")
try:
instance = next(iter(instances))
except StopIteration:
click.echo("No instance found...")
raise click.Abort()
click.echo(f"IP: {instance.public_ip_address}")
if config is not None and hostname is not None:
click.echo(f"Updating {config!s}")
update_config(config, hostname, instance.public_ip_address)
def update_config(config, hostname, ip_address):
"""Update the SSH Config with a new IP address"""
target = Path(config)
with tempfile.TemporaryDirectory() as tmpdirname:
target_backup = Path(tmpdirname) / target.name
with target_backup.open("w") as out_stream, target.open("r") as in_stream:
for line in iter_new_config(in_stream, hostname, ip_address):
out_stream.write(line)
target_backup.replace(target)
def iter_new_config(lines, target_host, new_address):
host = None
for line in lines:
m = re.match(r"\s*host(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m:
host = m.group(2)
m = re.match(r"\s*hostname(=|\s+)(.+?)(#.+)?", line, flags=re.I)
if m and host == target_host:
oldip = m.group(2)
line = line.replace(oldip, new_address)
yield line
if __name__ == '__main__':
main(auto_envvar_prefix='GET_EC2_IP')
|
|
dc53989363609e237586ab744bc900bcb979b14c
|
fabfile.py
|
fabfile.py
|
from fabric.api import cd, run
def deploy():
with cd('who-broke-build-slack'):
run('git reset --hard HEAD')
run('git pull origin master')
run('cp ../settings.py .')
try:
run("kill $(ps -ef | grep [w]ho_broke_build | awk '{print $2}')")
except:
pass
run('bash run.sh')
|
Add fab command to deploy
|
Add fab command to deploy
|
Python
|
mit
|
prontodev/who-broke-build-slack,prontodev/who-broke-build-slack,zkan/who-broke-build-slack,mrteera/who-broke-build-slack,mrteera/who-broke-build-slack,zkan/who-broke-build-slack
|
Add fab command to deploy
|
from fabric.api import cd, run
def deploy():
with cd('who-broke-build-slack'):
run('git reset --hard HEAD')
run('git pull origin master')
run('cp ../settings.py .')
try:
run("kill $(ps -ef | grep [w]ho_broke_build | awk '{print $2}')")
except:
pass
run('bash run.sh')
|
<commit_before><commit_msg>Add fab command to deploy<commit_after>
|
from fabric.api import cd, run
def deploy():
with cd('who-broke-build-slack'):
run('git reset --hard HEAD')
run('git pull origin master')
run('cp ../settings.py .')
try:
run("kill $(ps -ef | grep [w]ho_broke_build | awk '{print $2}')")
except:
pass
run('bash run.sh')
|
Add fab command to deployfrom fabric.api import cd, run
def deploy():
with cd('who-broke-build-slack'):
run('git reset --hard HEAD')
run('git pull origin master')
run('cp ../settings.py .')
try:
run("kill $(ps -ef | grep [w]ho_broke_build | awk '{print $2}')")
except:
pass
run('bash run.sh')
|
<commit_before><commit_msg>Add fab command to deploy<commit_after>from fabric.api import cd, run
def deploy():
with cd('who-broke-build-slack'):
run('git reset --hard HEAD')
run('git pull origin master')
run('cp ../settings.py .')
try:
run("kill $(ps -ef | grep [w]ho_broke_build | awk '{print $2}')")
except:
pass
run('bash run.sh')
|
|
c27736f169330cb2ffcba3f3260c6e5242c84f4d
|
scripts/40-convert_to_tcx.py
|
scripts/40-convert_to_tcx.py
|
#!/usr/bin/python
#
# Script to run the FIT-to-TCX converter on every new FIT file that is being
# downloaded by Garmin-Extractor.
#
# Adjust the fittotcx path to point to where you have put the FIT-to-TCX files.
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import subprocess
fittotcx = "/path/to/FIT-to-TCX/fittotcx.py"
action = sys.argv[1]
filename = sys.argv[2]
basedir = os.path.split(os.path.dirname(filename))[0]
basefile = os.path.basename(filename)
# Create directory
targetdir = os.path.join(basedir, "tcx")
try:
os.mkdir(targetdir)
except:
pass
targetfile = os.path.splitext(basefile)[0] + ".tcx"
# Run FIT-to-TCX
process = subprocess.Popen([fittotcx, filename], stdout=subprocess.PIPE)
(data, _) = process.communicate()
# Write result
f = file(os.path.join(targetdir, targetfile), 'w')
f.write(data)
f.close()
|
Add a script that converts FIT files to TCX files
|
Add a script that converts FIT files to TCX files
- Requires FIT-to-TCX to convert files
- Requires you to set the fittotcx variable to work
|
Python
|
mit
|
ddboline/Garmin-Forerunner-610-Extractor_fork
|
Add a script that converts FIT files to TCX files
- Requires FIT-to-TCX to convert files
- Requires you to set the fittotcx variable to work
|
#!/usr/bin/python
#
# Script to run the FIT-to-TCX converter on every new FIT file that is being
# downloaded by Garmin-Extractor.
#
# Adjust the fittotcx path to point to where you have put the FIT-to-TCX files.
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import subprocess
fittotcx = "/path/to/FIT-to-TCX/fittotcx.py"
action = sys.argv[1]
filename = sys.argv[2]
basedir = os.path.split(os.path.dirname(filename))[0]
basefile = os.path.basename(filename)
# Create directory
targetdir = os.path.join(basedir, "tcx")
try:
os.mkdir(targetdir)
except:
pass
targetfile = os.path.splitext(basefile)[0] + ".tcx"
# Run FIT-to-TCX
process = subprocess.Popen([fittotcx, filename], stdout=subprocess.PIPE)
(data, _) = process.communicate()
# Write result
f = file(os.path.join(targetdir, targetfile), 'w')
f.write(data)
f.close()
|
<commit_before><commit_msg>Add a script that converts FIT files to TCX files
- Requires FIT-to-TCX to convert files
- Requires you to set the fittotcx variable to work<commit_after>
|
#!/usr/bin/python
#
# Script to run the FIT-to-TCX converter on every new FIT file that is being
# downloaded by Garmin-Extractor.
#
# Adjust the fittotcx path to point to where you have put the FIT-to-TCX files.
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import subprocess
fittotcx = "/path/to/FIT-to-TCX/fittotcx.py"
action = sys.argv[1]
filename = sys.argv[2]
basedir = os.path.split(os.path.dirname(filename))[0]
basefile = os.path.basename(filename)
# Create directory
targetdir = os.path.join(basedir, "tcx")
try:
os.mkdir(targetdir)
except:
pass
targetfile = os.path.splitext(basefile)[0] + ".tcx"
# Run FIT-to-TCX
process = subprocess.Popen([fittotcx, filename], stdout=subprocess.PIPE)
(data, _) = process.communicate()
# Write result
f = file(os.path.join(targetdir, targetfile), 'w')
f.write(data)
f.close()
|
Add a script that converts FIT files to TCX files
- Requires FIT-to-TCX to convert files
- Requires you to set the fittotcx variable to work#!/usr/bin/python
#
# Script to run the FIT-to-TCX converter on every new FIT file that is being
# downloaded by Garmin-Extractor.
#
# Adjust the fittotcx path to point to where you have put the FIT-to-TCX files.
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import subprocess
fittotcx = "/path/to/FIT-to-TCX/fittotcx.py"
action = sys.argv[1]
filename = sys.argv[2]
basedir = os.path.split(os.path.dirname(filename))[0]
basefile = os.path.basename(filename)
# Create directory
targetdir = os.path.join(basedir, "tcx")
try:
os.mkdir(targetdir)
except:
pass
targetfile = os.path.splitext(basefile)[0] + ".tcx"
# Run FIT-to-TCX
process = subprocess.Popen([fittotcx, filename], stdout=subprocess.PIPE)
(data, _) = process.communicate()
# Write result
f = file(os.path.join(targetdir, targetfile), 'w')
f.write(data)
f.close()
|
<commit_before><commit_msg>Add a script that converts FIT files to TCX files
- Requires FIT-to-TCX to convert files
- Requires you to set the fittotcx variable to work<commit_after>#!/usr/bin/python
#
# Script to run the FIT-to-TCX converter on every new FIT file that is being
# downloaded by Garmin-Extractor.
#
# Adjust the fittotcx path to point to where you have put the FIT-to-TCX files.
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import os
import subprocess
fittotcx = "/path/to/FIT-to-TCX/fittotcx.py"
action = sys.argv[1]
filename = sys.argv[2]
basedir = os.path.split(os.path.dirname(filename))[0]
basefile = os.path.basename(filename)
# Create directory
targetdir = os.path.join(basedir, "tcx")
try:
os.mkdir(targetdir)
except:
pass
targetfile = os.path.splitext(basefile)[0] + ".tcx"
# Run FIT-to-TCX
process = subprocess.Popen([fittotcx, filename], stdout=subprocess.PIPE)
(data, _) = process.communicate()
# Write result
f = file(os.path.join(targetdir, targetfile), 'w')
f.write(data)
f.close()
|
|
38597c1a0a65ff99f3f1779b17a98e1e3d4e8f71
|
backend/scripts/copyproj.py
|
backend/scripts/copyproj.py
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
Add script to copy over project files to a directory tree.
|
Add script to copy over project files to a directory tree.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add script to copy over project files to a directory tree.
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
<commit_before><commit_msg>Add script to copy over project files to a directory tree.<commit_after>
|
#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
Add script to copy over project files to a directory tree.#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
<commit_before><commit_msg>Add script to copy over project files to a directory tree.<commit_after>#!/usr/bin/env python
from optparse import OptionParser
import rethinkdb as r
import sys
import shutil
import os
import errno
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def file_dir(path, fid):
pieces = fid.split("-")
path = os.path.join(path, pieces[1][0:2], pieces[1][2:4])
return path
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-p", "--project", dest="project", type="string",
help="project id to copy over")
parser.add_option("-s", "--src", dest="src", type="string",
help="location of directory to copy from",
default="/mcfs/data/test")
parser.add_option("-d", "--dest", dest="dest", type="string",
help="directory to copy to")
(options, args) = parser.parse_args()
if options.dest is None:
print "You must specify a destination (--to) directory"
sys.exit(1)
if options.project is None:
print "You must specify a project id to copy (--project)"
sys.exit(1)
conn = r.connect("localhost", options.port, db="materialscommons")
rql = r.table("project2datadir")\
.get_all(options.project, index="project_id")\
.eq_join("datadir_id", r.table("datadir2datafile"),
index="datadir_id")\
.zip()\
.eq_join("datafile_id", r.table("datafiles"))\
.zip()
for fentry in rql.run(conn):
src_dir = file_dir(options.src, fentry['id'])
dest = file_dir(options.dest, fentry['id'])
print "Copy %s from %s to %s" % (fentry['name'], src_dir, dest)
mkdirp(dest)
src_file_path = os.path.join(src_dir, fentry['id'])
dest_file_path = os.path.join(dest, fentry['id'])
try:
shutil.copy(src_file_path, dest_file_path)
except:
print "Problem copying file %s" % (fentry['name'])
|
|
58895272867683d40f41e2082e9947ba1c80caeb
|
indra/tests/test_sofia.py
|
indra/tests/test_sofia.py
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from nose.plugins.attrib import attr
from indra.sources import sofia
@attr('webservice', 'nonpublic')
def test_text_process_webservice():
txt = 'rainfall causes floods'
sp = sofia.process_text(txt)
assert len(sp.statements) == 1
assert sp.statements[0].subj.name == 'rainfall'
assert sp.statements[0].obj.name == 'floods'
|
Add test for SOFIA text processing
|
Add test for SOFIA text processing
|
Python
|
bsd-2-clause
|
pvtodorov/indra,johnbachman/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,pvtodorov/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,johnbachman/belpy,pvtodorov/indra,johnbachman/indra,bgyori/indra,sorgerlab/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy
|
Add test for SOFIA text processing
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from nose.plugins.attrib import attr
from indra.sources import sofia
@attr('webservice', 'nonpublic')
def test_text_process_webservice():
txt = 'rainfall causes floods'
sp = sofia.process_text(txt)
assert len(sp.statements) == 1
assert sp.statements[0].subj.name == 'rainfall'
assert sp.statements[0].obj.name == 'floods'
|
<commit_before><commit_msg>Add test for SOFIA text processing<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from nose.plugins.attrib import attr
from indra.sources import sofia
@attr('webservice', 'nonpublic')
def test_text_process_webservice():
txt = 'rainfall causes floods'
sp = sofia.process_text(txt)
assert len(sp.statements) == 1
assert sp.statements[0].subj.name == 'rainfall'
assert sp.statements[0].obj.name == 'floods'
|
Add test for SOFIA text processingfrom __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from nose.plugins.attrib import attr
from indra.sources import sofia
@attr('webservice', 'nonpublic')
def test_text_process_webservice():
txt = 'rainfall causes floods'
sp = sofia.process_text(txt)
assert len(sp.statements) == 1
assert sp.statements[0].subj.name == 'rainfall'
assert sp.statements[0].obj.name == 'floods'
|
<commit_before><commit_msg>Add test for SOFIA text processing<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from nose.plugins.attrib import attr
from indra.sources import sofia
@attr('webservice', 'nonpublic')
def test_text_process_webservice():
txt = 'rainfall causes floods'
sp = sofia.process_text(txt)
assert len(sp.statements) == 1
assert sp.statements[0].subj.name == 'rainfall'
assert sp.statements[0].obj.name == 'floods'
|
|
38481618f02521d31f69fccb3bfc72a708d25d35
|
django/publicmapping/redisutils.py
|
django/publicmapping/redisutils.py
|
## Utilities for Redis - mostly just a function for generating keys ##
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
Add redis utility for generating keys
|
Add redis utility for generating keys
|
Python
|
apache-2.0
|
JimCallahanOrlando/DistrictBuilder,JimCallahanOrlando/DistrictBuilder,JimCallahanOrlando/DistrictBuilder,JimCallahanOrlando/DistrictBuilder
|
Add redis utility for generating keys
|
## Utilities for Redis - mostly just a function for generating keys ##
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
<commit_before><commit_msg>Add redis utility for generating keys<commit_after>
|
## Utilities for Redis - mostly just a function for generating keys ##
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
Add redis utility for generating keys## Utilities for Redis - mostly just a function for generating keys ##
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
<commit_before><commit_msg>Add redis utility for generating keys<commit_after>## Utilities for Redis - mostly just a function for generating keys ##
def key_gen(**kwargs):
"""
Key generator for linux. Determines key based on
parameters supplied in kwargs.
Keyword Parameters:
@keyword geounit1: portable_id of a geounit
@keyword geounit2: portable_id of a geounit
@keyword region: region abbreviation
"""
if 'geounit1' in kwargs and 'geounit2' in kwargs:
return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2'])
if 'region' in kwargs:
return 'adj:region:%s' % kwargs['region']
|
|
41b27191becb90d452bbf210821f7f94eb8870f2
|
taskwiki/cache.py
|
taskwiki/cache.py
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
# def update_tasks(self):
# tasks = [t
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
self.uuid_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t.uuid for t in self.cache.values() if t.uuid]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.uuid_cache[task['uuid']].task = task
|
Add initial implementation of the bulk updates
|
Cache: Add initial implementation of the bulk updates
|
Python
|
mit
|
Spirotot/taskwiki,phha/taskwiki
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
# def update_tasks(self):
# tasks = [t
Cache: Add initial implementation of the bulk updates
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
self.uuid_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t.uuid for t in self.cache.values() if t.uuid]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.uuid_cache[task['uuid']].task = task
|
<commit_before>import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
# def update_tasks(self):
# tasks = [t
<commit_msg>Cache: Add initial implementation of the bulk updates<commit_after>
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
self.uuid_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t.uuid for t in self.cache.values() if t.uuid]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.uuid_cache[task['uuid']].task = task
|
import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
# def update_tasks(self):
# tasks = [t
Cache: Add initial implementation of the bulk updatesimport copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
self.uuid_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t.uuid for t in self.cache.values() if t.uuid]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.uuid_cache[task['uuid']].task = task
|
<commit_before>import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
# def update_tasks(self):
# tasks = [t
<commit_msg>Cache: Add initial implementation of the bulk updates<commit_after>import copy
import vim
from taskwiki.task import VimwikiTask
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.uuid_cache = dict()
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = VimwikiTask(vim.current.buffer[key], key, self.tw, self)
self.cache[key] = task
if task.uuid:
self.uuid_cache[task.uuid] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
self.uuid_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t.uuid for t in self.cache.values() if t.uuid]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.uuid_cache[task['uuid']].task = task
|
20a14eb74f79eac93c73af662ab8e997ba38ed27
|
tests/cli_test.py
|
tests/cli_test.py
|
"""tldextract integration tests."""
import sys
import pytest
from tldextract.cli import main
def test_cli_no_input(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 1
def test_cli_parses_args(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract", "--some", "nonsense"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 2
def test_cli_posargs(capsys, monkeypatch):
monkeypatch.setattr(
sys, "argv", ["tldextract", "example.com", "bbc.co.uk", "forums.bbc.co.uk"]
)
main()
stdout, stderr = capsys.readouterr()
assert not stderr
assert stdout == " example com\n bbc co.uk\nforums bbc co.uk\n"
|
Add basic CLI test coverage
|
Add basic CLI test coverage
|
Python
|
bsd-3-clause
|
john-kurkowski/tldextract
|
Add basic CLI test coverage
|
"""tldextract integration tests."""
import sys
import pytest
from tldextract.cli import main
def test_cli_no_input(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 1
def test_cli_parses_args(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract", "--some", "nonsense"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 2
def test_cli_posargs(capsys, monkeypatch):
monkeypatch.setattr(
sys, "argv", ["tldextract", "example.com", "bbc.co.uk", "forums.bbc.co.uk"]
)
main()
stdout, stderr = capsys.readouterr()
assert not stderr
assert stdout == " example com\n bbc co.uk\nforums bbc co.uk\n"
|
<commit_before><commit_msg>Add basic CLI test coverage<commit_after>
|
"""tldextract integration tests."""
import sys
import pytest
from tldextract.cli import main
def test_cli_no_input(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 1
def test_cli_parses_args(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract", "--some", "nonsense"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 2
def test_cli_posargs(capsys, monkeypatch):
monkeypatch.setattr(
sys, "argv", ["tldextract", "example.com", "bbc.co.uk", "forums.bbc.co.uk"]
)
main()
stdout, stderr = capsys.readouterr()
assert not stderr
assert stdout == " example com\n bbc co.uk\nforums bbc co.uk\n"
|
Add basic CLI test coverage"""tldextract integration tests."""
import sys
import pytest
from tldextract.cli import main
def test_cli_no_input(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 1
def test_cli_parses_args(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract", "--some", "nonsense"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 2
def test_cli_posargs(capsys, monkeypatch):
monkeypatch.setattr(
sys, "argv", ["tldextract", "example.com", "bbc.co.uk", "forums.bbc.co.uk"]
)
main()
stdout, stderr = capsys.readouterr()
assert not stderr
assert stdout == " example com\n bbc co.uk\nforums bbc co.uk\n"
|
<commit_before><commit_msg>Add basic CLI test coverage<commit_after>"""tldextract integration tests."""
import sys
import pytest
from tldextract.cli import main
def test_cli_no_input(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 1
def test_cli_parses_args(monkeypatch):
monkeypatch.setattr(sys, "argv", ["tldextract", "--some", "nonsense"])
with pytest.raises(SystemExit) as ex:
main()
assert ex.value.code == 2
def test_cli_posargs(capsys, monkeypatch):
monkeypatch.setattr(
sys, "argv", ["tldextract", "example.com", "bbc.co.uk", "forums.bbc.co.uk"]
)
main()
stdout, stderr = capsys.readouterr()
assert not stderr
assert stdout == " example com\n bbc co.uk\nforums bbc co.uk\n"
|
|
4dae49f56d4055bbc40cf339183a659f5eb761dd
|
nbstripout.py
|
nbstripout.py
|
#!/usr/bin/env python
"""
Clear outputs of IPython notebooks.
By default, it prints the notebooks without outputs into stdout.
When the --inplace option is given, all files will be overwritten.
"""
import sys
from IPython.nbformat import current as nbformat
def clear_outputs(nb):
"""Clear output of notebook `nb` INPLACE."""
for ws in nb.worksheets:
for cell in ws.cells:
cell.outputs = []
def stripoutput(inputs, inplace=False):
"""
Strip output of notebooks.
Parameters
----------
inputs : list of string
Path to the notebooks to be processed.
inplace : bool
If this is `True`, outputs in the input files will be deleted.
Default is `False`.
"""
for inpath in inputs:
with file(inpath) as fp:
nb = nbformat.read(fp, 'ipynb')
clear_outputs(nb)
if inplace:
with file(inpath, 'w') as fp:
nbformat.write(nb, fp, 'ipynb')
else:
nbformat.write(nb, sys.stdout, 'ipynb')
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('inputs', nargs='+', metavar='input',
help='Paths to notebook files.')
parser.add_argument('-i', '--inplace', default=False, action='store_true',
help='Overwrite existing notebook when given.')
args = parser.parse_args()
stripoutput(**vars(args))
if __name__ == '__main__':
main()
|
Add a script to clear outputs from notebooks
|
Add a script to clear outputs from notebooks
Currently, it only supports in-place conversion.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add a script to clear outputs from notebooks
Currently, it only supports in-place conversion.
|
#!/usr/bin/env python
"""
Clear outputs of IPython notebooks.
By default, it prints the notebooks without outputs into stdout.
When the --inplace option is given, all files will be overwritten.
"""
import sys
from IPython.nbformat import current as nbformat
def clear_outputs(nb):
"""Clear output of notebook `nb` INPLACE."""
for ws in nb.worksheets:
for cell in ws.cells:
cell.outputs = []
def stripoutput(inputs, inplace=False):
"""
Strip output of notebooks.
Parameters
----------
inputs : list of string
Path to the notebooks to be processed.
inplace : bool
If this is `True`, outputs in the input files will be deleted.
Default is `False`.
"""
for inpath in inputs:
with file(inpath) as fp:
nb = nbformat.read(fp, 'ipynb')
clear_outputs(nb)
if inplace:
with file(inpath, 'w') as fp:
nbformat.write(nb, fp, 'ipynb')
else:
nbformat.write(nb, sys.stdout, 'ipynb')
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('inputs', nargs='+', metavar='input',
help='Paths to notebook files.')
parser.add_argument('-i', '--inplace', default=False, action='store_true',
help='Overwrite existing notebook when given.')
args = parser.parse_args()
stripoutput(**vars(args))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to clear outputs from notebooks
Currently, it only supports in-place conversion.<commit_after>
|
#!/usr/bin/env python
"""
Clear outputs of IPython notebooks.
By default, it prints the notebooks without outputs into stdout.
When the --inplace option is given, all files will be overwritten.
"""
import sys
from IPython.nbformat import current as nbformat
def clear_outputs(nb):
"""Clear output of notebook `nb` INPLACE."""
for ws in nb.worksheets:
for cell in ws.cells:
cell.outputs = []
def stripoutput(inputs, inplace=False):
"""
Strip output of notebooks.
Parameters
----------
inputs : list of string
Path to the notebooks to be processed.
inplace : bool
If this is `True`, outputs in the input files will be deleted.
Default is `False`.
"""
for inpath in inputs:
with file(inpath) as fp:
nb = nbformat.read(fp, 'ipynb')
clear_outputs(nb)
if inplace:
with file(inpath, 'w') as fp:
nbformat.write(nb, fp, 'ipynb')
else:
nbformat.write(nb, sys.stdout, 'ipynb')
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('inputs', nargs='+', metavar='input',
help='Paths to notebook files.')
parser.add_argument('-i', '--inplace', default=False, action='store_true',
help='Overwrite existing notebook when given.')
args = parser.parse_args()
stripoutput(**vars(args))
if __name__ == '__main__':
main()
|
Add a script to clear outputs from notebooks
Currently, it only supports in-place conversion.#!/usr/bin/env python
"""
Clear outputs of IPython notebooks.
By default, it prints the notebooks without outputs into stdout.
When the --inplace option is given, all files will be overwritten.
"""
import sys
from IPython.nbformat import current as nbformat
def clear_outputs(nb):
"""Clear output of notebook `nb` INPLACE."""
for ws in nb.worksheets:
for cell in ws.cells:
cell.outputs = []
def stripoutput(inputs, inplace=False):
"""
Strip output of notebooks.
Parameters
----------
inputs : list of string
Path to the notebooks to be processed.
inplace : bool
If this is `True`, outputs in the input files will be deleted.
Default is `False`.
"""
for inpath in inputs:
with file(inpath) as fp:
nb = nbformat.read(fp, 'ipynb')
clear_outputs(nb)
if inplace:
with file(inpath, 'w') as fp:
nbformat.write(nb, fp, 'ipynb')
else:
nbformat.write(nb, sys.stdout, 'ipynb')
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('inputs', nargs='+', metavar='input',
help='Paths to notebook files.')
parser.add_argument('-i', '--inplace', default=False, action='store_true',
help='Overwrite existing notebook when given.')
args = parser.parse_args()
stripoutput(**vars(args))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to clear outputs from notebooks
Currently, it only supports in-place conversion.<commit_after>#!/usr/bin/env python
"""
Clear outputs of IPython notebooks.
By default, it prints the notebooks without outputs into stdout.
When the --inplace option is given, all files will be overwritten.
"""
import sys
from IPython.nbformat import current as nbformat
def clear_outputs(nb):
"""Clear output of notebook `nb` INPLACE."""
for ws in nb.worksheets:
for cell in ws.cells:
cell.outputs = []
def stripoutput(inputs, inplace=False):
"""
Strip output of notebooks.
Parameters
----------
inputs : list of string
Path to the notebooks to be processed.
inplace : bool
If this is `True`, outputs in the input files will be deleted.
Default is `False`.
"""
for inpath in inputs:
with file(inpath) as fp:
nb = nbformat.read(fp, 'ipynb')
clear_outputs(nb)
if inplace:
with file(inpath, 'w') as fp:
nbformat.write(nb, fp, 'ipynb')
else:
nbformat.write(nb, sys.stdout, 'ipynb')
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('inputs', nargs='+', metavar='input',
help='Paths to notebook files.')
parser.add_argument('-i', '--inplace', default=False, action='store_true',
help='Overwrite existing notebook when given.')
args = parser.parse_args()
stripoutput(**vars(args))
if __name__ == '__main__':
main()
|
|
3b5e295b42cfe0ecfa868f5f602ee25c097fcd81
|
cms_genome_browser/urls.py
|
cms_genome_browser/urls.py
|
from django.conf.urls import patterns, url
from cms_genome_browser.views import BrowserListView, BrowserDetailView
urlpatterns = patterns('',
url(r'^$', BrowserListView.as_view(), name='browser_list'),
url(r'^(?P<slug>[^/]+)$', BrowserDetailView.as_view(), name='browser_detail'),
)
|
Define URL patterns for Browser list and detail views
|
Define URL patterns for Browser list and detail views
|
Python
|
bsd-3-clause
|
mfcovington/djangocms-genome-browser,mfcovington/djangocms-genome-browser,mfcovington/djangocms-genome-browser
|
Define URL patterns for Browser list and detail views
|
from django.conf.urls import patterns, url
from cms_genome_browser.views import BrowserListView, BrowserDetailView
urlpatterns = patterns('',
url(r'^$', BrowserListView.as_view(), name='browser_list'),
url(r'^(?P<slug>[^/]+)$', BrowserDetailView.as_view(), name='browser_detail'),
)
|
<commit_before><commit_msg>Define URL patterns for Browser list and detail views<commit_after>
|
from django.conf.urls import patterns, url
from cms_genome_browser.views import BrowserListView, BrowserDetailView
urlpatterns = patterns('',
url(r'^$', BrowserListView.as_view(), name='browser_list'),
url(r'^(?P<slug>[^/]+)$', BrowserDetailView.as_view(), name='browser_detail'),
)
|
Define URL patterns for Browser list and detail viewsfrom django.conf.urls import patterns, url
from cms_genome_browser.views import BrowserListView, BrowserDetailView
urlpatterns = patterns('',
url(r'^$', BrowserListView.as_view(), name='browser_list'),
url(r'^(?P<slug>[^/]+)$', BrowserDetailView.as_view(), name='browser_detail'),
)
|
<commit_before><commit_msg>Define URL patterns for Browser list and detail views<commit_after>from django.conf.urls import patterns, url
from cms_genome_browser.views import BrowserListView, BrowserDetailView
urlpatterns = patterns('',
url(r'^$', BrowserListView.as_view(), name='browser_list'),
url(r'^(?P<slug>[^/]+)$', BrowserDetailView.as_view(), name='browser_detail'),
)
|
|
249728ae9888eaa83c0e22d60b0ed443de1ae707
|
cms_lab_members/cms_app.py
|
cms_lab_members/cms_app.py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_lab_members.menu import LabMembersMenu
class LabMembersApp(CMSApp):
name = _("Lab Member App")
urls = ["lab_members.urls"]
app_name = "lab_members"
menus = [LabMembersMenu]
apphook_pool.register(LabMembersApp)
|
Add Lab Members CMS app
|
Add Lab Members CMS app
|
Python
|
bsd-3-clause
|
mfcovington/djangocms-lab-members,mfcovington/djangocms-lab-members
|
Add Lab Members CMS app
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_lab_members.menu import LabMembersMenu
class LabMembersApp(CMSApp):
name = _("Lab Member App")
urls = ["lab_members.urls"]
app_name = "lab_members"
menus = [LabMembersMenu]
apphook_pool.register(LabMembersApp)
|
<commit_before><commit_msg>Add Lab Members CMS app<commit_after>
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_lab_members.menu import LabMembersMenu
class LabMembersApp(CMSApp):
name = _("Lab Member App")
urls = ["lab_members.urls"]
app_name = "lab_members"
menus = [LabMembersMenu]
apphook_pool.register(LabMembersApp)
|
Add Lab Members CMS app# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_lab_members.menu import LabMembersMenu
class LabMembersApp(CMSApp):
name = _("Lab Member App")
urls = ["lab_members.urls"]
app_name = "lab_members"
menus = [LabMembersMenu]
apphook_pool.register(LabMembersApp)
|
<commit_before><commit_msg>Add Lab Members CMS app<commit_after># -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms_lab_members.menu import LabMembersMenu
class LabMembersApp(CMSApp):
name = _("Lab Member App")
urls = ["lab_members.urls"]
app_name = "lab_members"
menus = [LabMembersMenu]
apphook_pool.register(LabMembersApp)
|
|
155966c9d2e02714988e9284dffe6ad25c26dd58
|
salt/modules/slsutil.py
|
salt/modules/slsutil.py
|
# -*- coding: utf-8 -*-
'''
Utility functions for use with or in SLS files
'''
from __future__ import absolute_import
from salt.utils.dictupdate import merge, update
update.__doc__ = update.__doc__ + '''\
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
'''
merge.__doc__ = '''\
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
|
Add execution module for working in sls files
|
Add execution module for working in sls files
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add execution module for working in sls files
|
# -*- coding: utf-8 -*-
'''
Utility functions for use with or in SLS files
'''
from __future__ import absolute_import
from salt.utils.dictupdate import merge, update
update.__doc__ = update.__doc__ + '''\
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
'''
merge.__doc__ = '''\
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
|
<commit_before><commit_msg>Add execution module for working in sls files<commit_after>
|
# -*- coding: utf-8 -*-
'''
Utility functions for use with or in SLS files
'''
from __future__ import absolute_import
from salt.utils.dictupdate import merge, update
update.__doc__ = update.__doc__ + '''\
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
'''
merge.__doc__ = '''\
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
|
Add execution module for working in sls files# -*- coding: utf-8 -*-
'''
Utility functions for use with or in SLS files
'''
from __future__ import absolute_import
from salt.utils.dictupdate import merge, update
update.__doc__ = update.__doc__ + '''\
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
'''
merge.__doc__ = '''\
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
|
<commit_before><commit_msg>Add execution module for working in sls files<commit_after># -*- coding: utf-8 -*-
'''
Utility functions for use with or in SLS files
'''
from __future__ import absolute_import
from salt.utils.dictupdate import merge, update
update.__doc__ = update.__doc__ + '''\
CLI Example:
.. code-block:: shell
salt '*' slsutil.update '{foo: Foo}' '{bar: Bar}'
'''
merge.__doc__ = '''\
Merge a data structure into another by choosing a merge strategy
Strategies:
* aggregate
* list
* overwrite
* recurse
* smart
CLI Example:
.. code-block:: shell
salt '*' slsutil.merge '{foo: Foo}' '{bar: Bar}'
'''
|
|
962b6374a1fe81a0650c7a840e8cc990ede6b65a
|
sharing_variables.py
|
sharing_variables.py
|
# Sharing variables
# Example code to use a shared threshold variable for all ReLUs
# Create var and pass it to relu() function
import tensorflow as tf
import numpy as np
def relu(X, threshold):
with tf.name_scope('relu'):
#[...........]
return tf.maximum(z, threshold, name='max')
threshold = tf.Variable(0.0, name='threshold')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name='output')
# Another alternative
# Set the shared var as an attribute of relu() upon first call
def relu(X):
with tf.name_scope('relu'):
if not hasattr(relu, 'threshold'):
relu.threshold = tf.Variable(0.0, name='threshold')
# [.........]
return tf.maximum(z, relu.threshold, name='max')
# Third option use get_variable() to create shared var if it does not exist
# or re-use if it does exist
with tf.variable_scope('relu'):
threshold = tf.get_variable('thershold', shape=(),
initializer=tf.constant_initializer(0.0))
# If var has been created by an earlier call to get_variable(),
# it will raise an exception. If you want to re-use, need to be explicit:
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# Alternatively:
with tf.variable_scope('relu') as scope:
scope.reuse_variables()
threshold = tf.get_variable('threshold')
# Pulling it all together
def relu(X):
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# [......]
return tf.maximum(z, threshold, name='max')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
with tf.variable_scope('relu'):
threshold = tf.get_variable('threshold', shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add(relus, name='output')
|
Add code on how to share variables
|
Add code on how to share variables
Add code on sharing variables between different nodes in the graph
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Add code on how to share variables
Add code on sharing variables between different nodes in the graph
|
# Sharing variables
# Example code to use a shared threshold variable for all ReLUs
# Create var and pass it to relu() function
import tensorflow as tf
import numpy as np
def relu(X, threshold):
with tf.name_scope('relu'):
#[...........]
return tf.maximum(z, threshold, name='max')
threshold = tf.Variable(0.0, name='threshold')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name='output')
# Another alternative
# Set the shared var as an attribute of relu() upon first call
def relu(X):
with tf.name_scope('relu'):
if not hasattr(relu, 'threshold'):
relu.threshold = tf.Variable(0.0, name='threshold')
# [.........]
return tf.maximum(z, relu.threshold, name='max')
# Third option use get_variable() to create shared var if it does not exist
# or re-use if it does exist
with tf.variable_scope('relu'):
threshold = tf.get_variable('thershold', shape=(),
initializer=tf.constant_initializer(0.0))
# If var has been created by an earlier call to get_variable(),
# it will raise an exception. If you want to re-use, need to be explicit:
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# Alternatively:
with tf.variable_scope('relu') as scope:
scope.reuse_variables()
threshold = tf.get_variable('threshold')
# Pulling it all together
def relu(X):
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# [......]
return tf.maximum(z, threshold, name='max')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
with tf.variable_scope('relu'):
threshold = tf.get_variable('threshold', shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add(relus, name='output')
|
<commit_before><commit_msg>Add code on how to share variables
Add code on sharing variables between different nodes in the graph<commit_after>
|
# Sharing variables
# Example code to use a shared threshold variable for all ReLUs
# Create var and pass it to relu() function
import tensorflow as tf
import numpy as np
def relu(X, threshold):
with tf.name_scope('relu'):
#[...........]
return tf.maximum(z, threshold, name='max')
threshold = tf.Variable(0.0, name='threshold')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name='output')
# Another alternative
# Set the shared var as an attribute of relu() upon first call
def relu(X):
with tf.name_scope('relu'):
if not hasattr(relu, 'threshold'):
relu.threshold = tf.Variable(0.0, name='threshold')
# [.........]
return tf.maximum(z, relu.threshold, name='max')
# Third option use get_variable() to create shared var if it does not exist
# or re-use if it does exist
with tf.variable_scope('relu'):
threshold = tf.get_variable('thershold', shape=(),
initializer=tf.constant_initializer(0.0))
# If var has been created by an earlier call to get_variable(),
# it will raise an exception. If you want to re-use, need to be explicit:
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# Alternatively:
with tf.variable_scope('relu') as scope:
scope.reuse_variables()
threshold = tf.get_variable('threshold')
# Pulling it all together
def relu(X):
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# [......]
return tf.maximum(z, threshold, name='max')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
with tf.variable_scope('relu'):
threshold = tf.get_variable('threshold', shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add(relus, name='output')
|
Add code on how to share variables
Add code on sharing variables between different nodes in the graph# Sharing variables
# Example code to use a shared threshold variable for all ReLUs
# Create var and pass it to relu() function
import tensorflow as tf
import numpy as np
def relu(X, threshold):
with tf.name_scope('relu'):
#[...........]
return tf.maximum(z, threshold, name='max')
threshold = tf.Variable(0.0, name='threshold')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name='output')
# Another alternative
# Set the shared var as an attribute of relu() upon first call
def relu(X):
with tf.name_scope('relu'):
if not hasattr(relu, 'threshold'):
relu.threshold = tf.Variable(0.0, name='threshold')
# [.........]
return tf.maximum(z, relu.threshold, name='max')
# Third option use get_variable() to create shared var if it does not exist
# or re-use if it does exist
with tf.variable_scope('relu'):
threshold = tf.get_variable('thershold', shape=(),
initializer=tf.constant_initializer(0.0))
# If var has been created by an earlier call to get_variable(),
# it will raise an exception. If you want to re-use, need to be explicit:
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# Alternatively:
with tf.variable_scope('relu') as scope:
scope.reuse_variables()
threshold = tf.get_variable('threshold')
# Pulling it all together
def relu(X):
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# [......]
return tf.maximum(z, threshold, name='max')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
with tf.variable_scope('relu'):
threshold = tf.get_variable('threshold', shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add(relus, name='output')
|
<commit_before><commit_msg>Add code on how to share variables
Add code on sharing variables between different nodes in the graph<commit_after># Sharing variables
# Example code to use a shared threshold variable for all ReLUs
# Create var and pass it to relu() function
import tensorflow as tf
import numpy as np
def relu(X, threshold):
with tf.name_scope('relu'):
#[...........]
return tf.maximum(z, threshold, name='max')
threshold = tf.Variable(0.0, name='threshold')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name='output')
# Another alternative
# Set the shared var as an attribute of relu() upon first call
def relu(X):
with tf.name_scope('relu'):
if not hasattr(relu, 'threshold'):
relu.threshold = tf.Variable(0.0, name='threshold')
# [.........]
return tf.maximum(z, relu.threshold, name='max')
# Third option use get_variable() to create shared var if it does not exist
# or re-use if it does exist
with tf.variable_scope('relu'):
threshold = tf.get_variable('thershold', shape=(),
initializer=tf.constant_initializer(0.0))
# If var has been created by an earlier call to get_variable(),
# it will raise an exception. If you want to re-use, need to be explicit:
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# Alternatively:
with tf.variable_scope('relu') as scope:
scope.reuse_variables()
threshold = tf.get_variable('threshold')
# Pulling it all together
def relu(X):
with tf.variable_scope('relu', reuse=True):
threshold = tf.get_variable('threshold')
# [......]
return tf.maximum(z, threshold, name='max')
X = tf.placeholder(tf.float32, shape=(None, n_features), name='X')
with tf.variable_scope('relu'):
threshold = tf.get_variable('threshold', shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add(relus, name='output')
|
|
ed33f5cd2871cd2e484119dbe3ce962f3ae15c5d
|
core/tests/test_mngment.py
|
core/tests/test_mngment.py
|
from django.core.management import call_command
from .utils import BaseTestCase as TestCase
from core.models import User, Group, Game, GameTag
import sys
from cStringIO import StringIO
class CommandsTestCase(TestCase):
def test_makedata(self):
"Test makedata command."
# Want to hide STDOUT because the command prints
sys.stdout = StringIO()
# Some Asserts.
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Group.objects.count(), 0)
self.assertEqual(Game.objects.count(), 0)
self.assertEqual(GameTag.objects.count(), 0)
args = []
opts = {}
call_command('makedata', *args, **opts)
# Some Asserts.
self.assertEqual(User.objects.count(), 100)
self.assertEqual(Group.objects.count(), 20)
self.assertEqual(Game.objects.count(), 100)
self.assertEqual(GameTag.objects.count(), 30)
|
Add mngment command test case.
|
Add mngment command test case.
|
Python
|
mit
|
joshsamara/game-website,joshsamara/game-website,joshsamara/game-website
|
Add mngment command test case.
|
from django.core.management import call_command
from .utils import BaseTestCase as TestCase
from core.models import User, Group, Game, GameTag
import sys
from cStringIO import StringIO
class CommandsTestCase(TestCase):
def test_makedata(self):
"Test makedata command."
# Want to hide STDOUT because the command prints
sys.stdout = StringIO()
# Some Asserts.
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Group.objects.count(), 0)
self.assertEqual(Game.objects.count(), 0)
self.assertEqual(GameTag.objects.count(), 0)
args = []
opts = {}
call_command('makedata', *args, **opts)
# Some Asserts.
self.assertEqual(User.objects.count(), 100)
self.assertEqual(Group.objects.count(), 20)
self.assertEqual(Game.objects.count(), 100)
self.assertEqual(GameTag.objects.count(), 30)
|
<commit_before><commit_msg>Add mngment command test case.<commit_after>
|
from django.core.management import call_command
from .utils import BaseTestCase as TestCase
from core.models import User, Group, Game, GameTag
import sys
from cStringIO import StringIO
class CommandsTestCase(TestCase):
def test_makedata(self):
"Test makedata command."
# Want to hide STDOUT because the command prints
sys.stdout = StringIO()
# Some Asserts.
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Group.objects.count(), 0)
self.assertEqual(Game.objects.count(), 0)
self.assertEqual(GameTag.objects.count(), 0)
args = []
opts = {}
call_command('makedata', *args, **opts)
# Some Asserts.
self.assertEqual(User.objects.count(), 100)
self.assertEqual(Group.objects.count(), 20)
self.assertEqual(Game.objects.count(), 100)
self.assertEqual(GameTag.objects.count(), 30)
|
Add mngment command test case.from django.core.management import call_command
from .utils import BaseTestCase as TestCase
from core.models import User, Group, Game, GameTag
import sys
from cStringIO import StringIO
class CommandsTestCase(TestCase):
def test_makedata(self):
"Test makedata command."
# Want to hide STDOUT because the command prints
sys.stdout = StringIO()
# Some Asserts.
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Group.objects.count(), 0)
self.assertEqual(Game.objects.count(), 0)
self.assertEqual(GameTag.objects.count(), 0)
args = []
opts = {}
call_command('makedata', *args, **opts)
# Some Asserts.
self.assertEqual(User.objects.count(), 100)
self.assertEqual(Group.objects.count(), 20)
self.assertEqual(Game.objects.count(), 100)
self.assertEqual(GameTag.objects.count(), 30)
|
<commit_before><commit_msg>Add mngment command test case.<commit_after>from django.core.management import call_command
from .utils import BaseTestCase as TestCase
from core.models import User, Group, Game, GameTag
import sys
from cStringIO import StringIO
class CommandsTestCase(TestCase):
def test_makedata(self):
"Test makedata command."
# Want to hide STDOUT because the command prints
sys.stdout = StringIO()
# Some Asserts.
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Group.objects.count(), 0)
self.assertEqual(Game.objects.count(), 0)
self.assertEqual(GameTag.objects.count(), 0)
args = []
opts = {}
call_command('makedata', *args, **opts)
# Some Asserts.
self.assertEqual(User.objects.count(), 100)
self.assertEqual(Group.objects.count(), 20)
self.assertEqual(Game.objects.count(), 100)
self.assertEqual(GameTag.objects.count(), 30)
|
|
2312feefcac891645c5e02d3b75a97bbc96bc205
|
uniqueids/tasks.py
|
uniqueids/tasks.py
|
from celery.task import Task
from celery.utils.log import get_task_logger
from familyconnect_registration import utils
logger = get_task_logger(__name__)
class AddUniqueIDToIdentity(Task):
def run(self, identity, unique_id, write_to, **kwargs):
"""
identity: the identity to receive the payload.
unique_id: the unique_id to add to the identity
write_to: the key to write the unique_id to
"""
details = utils.get_identity(identity)
if "details" in details:
# not a 404
payload = {
"details": details["details"]
}
payload["details"][write_to] = unique_id
utils.patch_identity(identity, payload)
return "Identity <%s> now has <%s> of <%s>" % (
identity, write_to, str(unique_id))
else:
return "Identity <%s> not found" % (identity,)
add_unique_id_to_identity = AddUniqueIDToIdentity()
|
Add identity patching for post unique ID generation
|
Add identity patching for post unique ID generation
|
Python
|
bsd-3-clause
|
praekelt/familyconnect-registration,praekelt/familyconnect-registration
|
Add identity patching for post unique ID generation
|
from celery.task import Task
from celery.utils.log import get_task_logger
from familyconnect_registration import utils
logger = get_task_logger(__name__)
class AddUniqueIDToIdentity(Task):
def run(self, identity, unique_id, write_to, **kwargs):
"""
identity: the identity to receive the payload.
unique_id: the unique_id to add to the identity
write_to: the key to write the unique_id to
"""
details = utils.get_identity(identity)
if "details" in details:
# not a 404
payload = {
"details": details["details"]
}
payload["details"][write_to] = unique_id
utils.patch_identity(identity, payload)
return "Identity <%s> now has <%s> of <%s>" % (
identity, write_to, str(unique_id))
else:
return "Identity <%s> not found" % (identity,)
add_unique_id_to_identity = AddUniqueIDToIdentity()
|
<commit_before><commit_msg>Add identity patching for post unique ID generation<commit_after>
|
from celery.task import Task
from celery.utils.log import get_task_logger
from familyconnect_registration import utils
logger = get_task_logger(__name__)
class AddUniqueIDToIdentity(Task):
def run(self, identity, unique_id, write_to, **kwargs):
"""
identity: the identity to receive the payload.
unique_id: the unique_id to add to the identity
write_to: the key to write the unique_id to
"""
details = utils.get_identity(identity)
if "details" in details:
# not a 404
payload = {
"details": details["details"]
}
payload["details"][write_to] = unique_id
utils.patch_identity(identity, payload)
return "Identity <%s> now has <%s> of <%s>" % (
identity, write_to, str(unique_id))
else:
return "Identity <%s> not found" % (identity,)
add_unique_id_to_identity = AddUniqueIDToIdentity()
|
Add identity patching for post unique ID generationfrom celery.task import Task
from celery.utils.log import get_task_logger
from familyconnect_registration import utils
logger = get_task_logger(__name__)
class AddUniqueIDToIdentity(Task):
def run(self, identity, unique_id, write_to, **kwargs):
"""
identity: the identity to receive the payload.
unique_id: the unique_id to add to the identity
write_to: the key to write the unique_id to
"""
details = utils.get_identity(identity)
if "details" in details:
# not a 404
payload = {
"details": details["details"]
}
payload["details"][write_to] = unique_id
utils.patch_identity(identity, payload)
return "Identity <%s> now has <%s> of <%s>" % (
identity, write_to, str(unique_id))
else:
return "Identity <%s> not found" % (identity,)
add_unique_id_to_identity = AddUniqueIDToIdentity()
|
<commit_before><commit_msg>Add identity patching for post unique ID generation<commit_after>from celery.task import Task
from celery.utils.log import get_task_logger
from familyconnect_registration import utils
logger = get_task_logger(__name__)
class AddUniqueIDToIdentity(Task):
def run(self, identity, unique_id, write_to, **kwargs):
"""
identity: the identity to receive the payload.
unique_id: the unique_id to add to the identity
write_to: the key to write the unique_id to
"""
details = utils.get_identity(identity)
if "details" in details:
# not a 404
payload = {
"details": details["details"]
}
payload["details"][write_to] = unique_id
utils.patch_identity(identity, payload)
return "Identity <%s> now has <%s> of <%s>" % (
identity, write_to, str(unique_id))
else:
return "Identity <%s> not found" % (identity,)
add_unique_id_to_identity = AddUniqueIDToIdentity()
|
|
60368c4b7d3d48945381aac89e95217a58c1e3a4
|
politics/__init__.py
|
politics/__init__.py
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1"
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
Add module info for politics.
|
Add module info for politics.
|
Python
|
apache-2.0
|
chrismattmann/politics-hacking
|
Add module info for politics.
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1"
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
<commit_before><commit_msg>Add module info for politics.<commit_after>
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1"
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
Add module info for politics.# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1"
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
<commit_before><commit_msg>Add module info for politics.<commit_after># encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1"
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
|
3cb26b7b3d62dbff5e457ee6b3ed30283c8d0e62
|
python/util/Openable.py
|
python/util/Openable.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
class Openable(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
Add missing files from Mac
|
Add missing files from Mac
|
Python
|
mit
|
rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh
|
Add missing files from Mac
|
from __future__ import absolute_import, division, print_function, unicode_literals
class Openable(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
<commit_before><commit_msg>Add missing files from Mac<commit_after>
|
from __future__ import absolute_import, division, print_function, unicode_literals
class Openable(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
Add missing files from Macfrom __future__ import absolute_import, division, print_function, unicode_literals
class Openable(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
<commit_before><commit_msg>Add missing files from Mac<commit_after>from __future__ import absolute_import, division, print_function, unicode_literals
class Openable(object):
def __init__(self):
self.is_open = True
def close(self):
self.is_open = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
|
cb0f24cd872b254ecbf8697a2d3cc09a4512d523
|
py/relative-ranks.py
|
py/relative-ranks.py
|
from operator import itemgetter
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
withRank = list(enumerate(sorted(list(enumerate(nums)), key=itemgetter(1), reverse=True), 1))
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
for i in xrange(min(len(withRank), 3)):
withRank[i] = (top3[i], withRank[i][1])
return map(lambda x:str(x[0]), sorted(withRank, key=lambda x:x[1][0]))
|
Add py solution for 506. Relative Ranks
|
Add py solution for 506. Relative Ranks
506. Relative Ranks: https://leetcode.com/problems/relative-ranks/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 506. Relative Ranks
506. Relative Ranks: https://leetcode.com/problems/relative-ranks/
|
from operator import itemgetter
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
withRank = list(enumerate(sorted(list(enumerate(nums)), key=itemgetter(1), reverse=True), 1))
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
for i in xrange(min(len(withRank), 3)):
withRank[i] = (top3[i], withRank[i][1])
return map(lambda x:str(x[0]), sorted(withRank, key=lambda x:x[1][0]))
|
<commit_before><commit_msg>Add py solution for 506. Relative Ranks
506. Relative Ranks: https://leetcode.com/problems/relative-ranks/<commit_after>
|
from operator import itemgetter
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
withRank = list(enumerate(sorted(list(enumerate(nums)), key=itemgetter(1), reverse=True), 1))
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
for i in xrange(min(len(withRank), 3)):
withRank[i] = (top3[i], withRank[i][1])
return map(lambda x:str(x[0]), sorted(withRank, key=lambda x:x[1][0]))
|
Add py solution for 506. Relative Ranks
506. Relative Ranks: https://leetcode.com/problems/relative-ranks/from operator import itemgetter
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
withRank = list(enumerate(sorted(list(enumerate(nums)), key=itemgetter(1), reverse=True), 1))
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
for i in xrange(min(len(withRank), 3)):
withRank[i] = (top3[i], withRank[i][1])
return map(lambda x:str(x[0]), sorted(withRank, key=lambda x:x[1][0]))
|
<commit_before><commit_msg>Add py solution for 506. Relative Ranks
506. Relative Ranks: https://leetcode.com/problems/relative-ranks/<commit_after>from operator import itemgetter
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
withRank = list(enumerate(sorted(list(enumerate(nums)), key=itemgetter(1), reverse=True), 1))
top3 = ["Gold Medal", "Silver Medal", "Bronze Medal"]
for i in xrange(min(len(withRank), 3)):
withRank[i] = (top3[i], withRank[i][1])
return map(lambda x:str(x[0]), sorted(withRank, key=lambda x:x[1][0]))
|
|
fb3c09c08e4c40230f6434708c61f27587c7e5d7
|
lintcode/Medium/179_Update_Bits.py
|
lintcode/Medium/179_Update_Bits.py
|
class Solution:
#@param n, m: Two integer
#@param i, j: Two bit positions
#return: An integer
def updateBits(self, n, m, i, j):
# write your code here
def twosComplement(num):
result = num
if (result < 0):
result *= -1
result = result ^ 0xffffffff
result += 1
return result
n = twosComplement(n)
m = twosComplement(m)
mask = ''
for k in range(32):
if (k <= j and k >= i):
mask = '0' + mask
else:
mask = '1' + mask
mask = int(mask, 2)
m = m << i
n = n & mask
n = n | m
if (n >> 31 == 1):
n -= 1
n = n ^ 0xffffffff
n *= -1
return n
|
Add solution to lintcode problem 179
|
Add solution to lintcode problem 179
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode problem 179
|
class Solution:
#@param n, m: Two integer
#@param i, j: Two bit positions
#return: An integer
def updateBits(self, n, m, i, j):
# write your code here
def twosComplement(num):
result = num
if (result < 0):
result *= -1
result = result ^ 0xffffffff
result += 1
return result
n = twosComplement(n)
m = twosComplement(m)
mask = ''
for k in range(32):
if (k <= j and k >= i):
mask = '0' + mask
else:
mask = '1' + mask
mask = int(mask, 2)
m = m << i
n = n & mask
n = n | m
if (n >> 31 == 1):
n -= 1
n = n ^ 0xffffffff
n *= -1
return n
|
<commit_before><commit_msg>Add solution to lintcode problem 179<commit_after>
|
class Solution:
#@param n, m: Two integer
#@param i, j: Two bit positions
#return: An integer
def updateBits(self, n, m, i, j):
# write your code here
def twosComplement(num):
result = num
if (result < 0):
result *= -1
result = result ^ 0xffffffff
result += 1
return result
n = twosComplement(n)
m = twosComplement(m)
mask = ''
for k in range(32):
if (k <= j and k >= i):
mask = '0' + mask
else:
mask = '1' + mask
mask = int(mask, 2)
m = m << i
n = n & mask
n = n | m
if (n >> 31 == 1):
n -= 1
n = n ^ 0xffffffff
n *= -1
return n
|
Add solution to lintcode problem 179class Solution:
#@param n, m: Two integer
#@param i, j: Two bit positions
#return: An integer
def updateBits(self, n, m, i, j):
# write your code here
def twosComplement(num):
result = num
if (result < 0):
result *= -1
result = result ^ 0xffffffff
result += 1
return result
n = twosComplement(n)
m = twosComplement(m)
mask = ''
for k in range(32):
if (k <= j and k >= i):
mask = '0' + mask
else:
mask = '1' + mask
mask = int(mask, 2)
m = m << i
n = n & mask
n = n | m
if (n >> 31 == 1):
n -= 1
n = n ^ 0xffffffff
n *= -1
return n
|
<commit_before><commit_msg>Add solution to lintcode problem 179<commit_after>class Solution:
#@param n, m: Two integer
#@param i, j: Two bit positions
#return: An integer
def updateBits(self, n, m, i, j):
# write your code here
def twosComplement(num):
result = num
if (result < 0):
result *= -1
result = result ^ 0xffffffff
result += 1
return result
n = twosComplement(n)
m = twosComplement(m)
mask = ''
for k in range(32):
if (k <= j and k >= i):
mask = '0' + mask
else:
mask = '1' + mask
mask = int(mask, 2)
m = m << i
n = n & mask
n = n | m
if (n >> 31 == 1):
n -= 1
n = n ^ 0xffffffff
n *= -1
return n
|
|
8cd3f3370bcf89e4b78a0bbffa2246f467facb52
|
problem1/steiner-prim.py
|
problem1/steiner-prim.py
|
import networkx as nx
from heapq import *
from sys import argv
def main():
G = nx.read_gml(argv[1])
terms = [i for i,d in G.nodes(data=True) if d['T']]
edges = []
number_of_components = len(terms)
heaps = {}
for t in terms:
edge = min(G.edges(terms[0], data=True), key = lambda e: e[2]['c'])
edges.append(edge)
heaps[t] = [(0,t)]
for i,t in G.nodes_iter(data=True):
t['c'] = None
for t in terms:
G.nodes(data=True)[t][1]['c'] = {'val':t}
while number_of_components != 1:
for i in terms:
print ("das: ", i, number_of_components, heaps)
if heaps[i] == []:
continue
cost,v = heappop(heaps[i])
comp = G.nodes(data = True)[v][1]['c']
while heaps[i] != [] and comp != None and comp['val'] == i:
cost,v = heappop(heaps[i])
if comp == None:
for v1,v2,d in G.edges(v, data=True):
heappush(heaps[i], (d['c'], v2))
G.nodes(data =True)[v][1]['c'] = G.nodes(data = True)[i][1]['c']
elif comp['val'] != i:
print("merge")
h1 = heaps[i]
h2 = heaps[comp['val']]
hm = h1 + h2
heapify(hm)
heaps[i] = hm
heaps[comp['val']] = hm
comp['val'] = G.nodes(data = True)[i][1]['c']['val']
number_of_components -= 1
if __name__ == '__main__':
main()
|
Add partial solution using combination of Prim
|
Add partial solution using combination of Prim
|
Python
|
mit
|
karulont/combopt
|
Add partial solution using combination of Prim
|
import networkx as nx
from heapq import *
from sys import argv
def main():
G = nx.read_gml(argv[1])
terms = [i for i,d in G.nodes(data=True) if d['T']]
edges = []
number_of_components = len(terms)
heaps = {}
for t in terms:
edge = min(G.edges(terms[0], data=True), key = lambda e: e[2]['c'])
edges.append(edge)
heaps[t] = [(0,t)]
for i,t in G.nodes_iter(data=True):
t['c'] = None
for t in terms:
G.nodes(data=True)[t][1]['c'] = {'val':t}
while number_of_components != 1:
for i in terms:
print ("das: ", i, number_of_components, heaps)
if heaps[i] == []:
continue
cost,v = heappop(heaps[i])
comp = G.nodes(data = True)[v][1]['c']
while heaps[i] != [] and comp != None and comp['val'] == i:
cost,v = heappop(heaps[i])
if comp == None:
for v1,v2,d in G.edges(v, data=True):
heappush(heaps[i], (d['c'], v2))
G.nodes(data =True)[v][1]['c'] = G.nodes(data = True)[i][1]['c']
elif comp['val'] != i:
print("merge")
h1 = heaps[i]
h2 = heaps[comp['val']]
hm = h1 + h2
heapify(hm)
heaps[i] = hm
heaps[comp['val']] = hm
comp['val'] = G.nodes(data = True)[i][1]['c']['val']
number_of_components -= 1
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add partial solution using combination of Prim<commit_after>
|
import networkx as nx
from heapq import *
from sys import argv
def main():
G = nx.read_gml(argv[1])
terms = [i for i,d in G.nodes(data=True) if d['T']]
edges = []
number_of_components = len(terms)
heaps = {}
for t in terms:
edge = min(G.edges(terms[0], data=True), key = lambda e: e[2]['c'])
edges.append(edge)
heaps[t] = [(0,t)]
for i,t in G.nodes_iter(data=True):
t['c'] = None
for t in terms:
G.nodes(data=True)[t][1]['c'] = {'val':t}
while number_of_components != 1:
for i in terms:
print ("das: ", i, number_of_components, heaps)
if heaps[i] == []:
continue
cost,v = heappop(heaps[i])
comp = G.nodes(data = True)[v][1]['c']
while heaps[i] != [] and comp != None and comp['val'] == i:
cost,v = heappop(heaps[i])
if comp == None:
for v1,v2,d in G.edges(v, data=True):
heappush(heaps[i], (d['c'], v2))
G.nodes(data =True)[v][1]['c'] = G.nodes(data = True)[i][1]['c']
elif comp['val'] != i:
print("merge")
h1 = heaps[i]
h2 = heaps[comp['val']]
hm = h1 + h2
heapify(hm)
heaps[i] = hm
heaps[comp['val']] = hm
comp['val'] = G.nodes(data = True)[i][1]['c']['val']
number_of_components -= 1
if __name__ == '__main__':
main()
|
Add partial solution using combination of Primimport networkx as nx
from heapq import *
from sys import argv
def main():
G = nx.read_gml(argv[1])
terms = [i for i,d in G.nodes(data=True) if d['T']]
edges = []
number_of_components = len(terms)
heaps = {}
for t in terms:
edge = min(G.edges(terms[0], data=True), key = lambda e: e[2]['c'])
edges.append(edge)
heaps[t] = [(0,t)]
for i,t in G.nodes_iter(data=True):
t['c'] = None
for t in terms:
G.nodes(data=True)[t][1]['c'] = {'val':t}
while number_of_components != 1:
for i in terms:
print ("das: ", i, number_of_components, heaps)
if heaps[i] == []:
continue
cost,v = heappop(heaps[i])
comp = G.nodes(data = True)[v][1]['c']
while heaps[i] != [] and comp != None and comp['val'] == i:
cost,v = heappop(heaps[i])
if comp == None:
for v1,v2,d in G.edges(v, data=True):
heappush(heaps[i], (d['c'], v2))
G.nodes(data =True)[v][1]['c'] = G.nodes(data = True)[i][1]['c']
elif comp['val'] != i:
print("merge")
h1 = heaps[i]
h2 = heaps[comp['val']]
hm = h1 + h2
heapify(hm)
heaps[i] = hm
heaps[comp['val']] = hm
comp['val'] = G.nodes(data = True)[i][1]['c']['val']
number_of_components -= 1
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add partial solution using combination of Prim<commit_after>import networkx as nx
from heapq import *
from sys import argv
def main():
G = nx.read_gml(argv[1])
terms = [i for i,d in G.nodes(data=True) if d['T']]
edges = []
number_of_components = len(terms)
heaps = {}
for t in terms:
edge = min(G.edges(terms[0], data=True), key = lambda e: e[2]['c'])
edges.append(edge)
heaps[t] = [(0,t)]
for i,t in G.nodes_iter(data=True):
t['c'] = None
for t in terms:
G.nodes(data=True)[t][1]['c'] = {'val':t}
while number_of_components != 1:
for i in terms:
print ("das: ", i, number_of_components, heaps)
if heaps[i] == []:
continue
cost,v = heappop(heaps[i])
comp = G.nodes(data = True)[v][1]['c']
while heaps[i] != [] and comp != None and comp['val'] == i:
cost,v = heappop(heaps[i])
if comp == None:
for v1,v2,d in G.edges(v, data=True):
heappush(heaps[i], (d['c'], v2))
G.nodes(data =True)[v][1]['c'] = G.nodes(data = True)[i][1]['c']
elif comp['val'] != i:
print("merge")
h1 = heaps[i]
h2 = heaps[comp['val']]
hm = h1 + h2
heapify(hm)
heaps[i] = hm
heaps[comp['val']] = hm
comp['val'] = G.nodes(data = True)[i][1]['c']['val']
number_of_components -= 1
if __name__ == '__main__':
main()
|
|
d31fb1a875cbec26a6bcc4a0401467ed02fc4051
|
examples/pos/postagging.py
|
examples/pos/postagging.py
|
import collections
import itertools
from nltk.corpus import brown
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.optimizers as O
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__(
embed=L.EmbedID(n_vocab, n_pos),
crf=L.CRF1d(n_pos),
)
def __call__(self, xs, ys):
hs = [self.embed(x) for x in xs]
return self.crf(hs, ys)
def viterbi(self, xs):
hs = [self.embed(x) for x in xs]
return self.crf.viterbi(hs)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
data = []
for sentence in brown.tagged_sents():
s = [(vocab[lex], pos_vocab[pos]) for lex, pos in sentence]
data.append(s)
if len(data) >= 100:
break
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
data.sort(key=len)
groups = []
for length, group in itertools.groupby(data, key=len):
groups.append(list(group))
model = CRF(len(vocab), len(pos_vocab))
#model.to_gpu()
xp = numpy
opt = O.Adam()
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.1))
n_epoch = 1000
for epoch in range(n_epoch):
accum_loss = 0
correct = 0
total = 0
for sentences in groups:
length = len(sentences[0])
xs = []
ys = []
for i in range(length):
x_data = xp.array([s[i][0] for s in sentences], numpy.int32)
y_data = xp.array([s[i][1] for s in sentences], numpy.int32)
xs.append(chainer.Variable(x_data))
ys.append(chainer.Variable(y_data))
loss = model(xs, ys)
accum_loss += loss.data
model.zerograds()
loss.backward()
opt.update()
_, path = model.viterbi(xs)
assert len(ys) == len(path)
for y, p in zip(ys, path):
correct += xp.sum(y.data == p)
total += len(y.data)
accuracy = float(correct) / total
print('Accuracy: {}'.format(accuracy))
print('Total loss: {}'.format(accum_loss))
|
Make a pos-tagging example using CRF
|
Make a pos-tagging example using CRF
|
Python
|
mit
|
okuta/chainer,keisuke-umezawa/chainer,hvy/chainer,niboshi/chainer,okuta/chainer,okuta/chainer,okuta/chainer,chainer/chainer,chainer/chainer,ronekko/chainer,jnishi/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer,wkentaro/chainer,anaruse/chainer,keisuke-umezawa/chainer,niboshi/chainer,tkerola/chainer,pfnet/chainer,rezoo/chainer,chainer/chainer,ktnyt/chainer,hvy/chainer,ktnyt/chainer,jnishi/chainer,jnishi/chainer,niboshi/chainer,ktnyt/chainer,hvy/chainer,ktnyt/chainer,jnishi/chainer,keisuke-umezawa/chainer,wkentaro/chainer,chainer/chainer,aonotas/chainer,wkentaro/chainer,niboshi/chainer
|
Make a pos-tagging example using CRF
|
import collections
import itertools
from nltk.corpus import brown
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.optimizers as O
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__(
embed=L.EmbedID(n_vocab, n_pos),
crf=L.CRF1d(n_pos),
)
def __call__(self, xs, ys):
hs = [self.embed(x) for x in xs]
return self.crf(hs, ys)
def viterbi(self, xs):
hs = [self.embed(x) for x in xs]
return self.crf.viterbi(hs)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
data = []
for sentence in brown.tagged_sents():
s = [(vocab[lex], pos_vocab[pos]) for lex, pos in sentence]
data.append(s)
if len(data) >= 100:
break
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
data.sort(key=len)
groups = []
for length, group in itertools.groupby(data, key=len):
groups.append(list(group))
model = CRF(len(vocab), len(pos_vocab))
#model.to_gpu()
xp = numpy
opt = O.Adam()
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.1))
n_epoch = 1000
for epoch in range(n_epoch):
accum_loss = 0
correct = 0
total = 0
for sentences in groups:
length = len(sentences[0])
xs = []
ys = []
for i in range(length):
x_data = xp.array([s[i][0] for s in sentences], numpy.int32)
y_data = xp.array([s[i][1] for s in sentences], numpy.int32)
xs.append(chainer.Variable(x_data))
ys.append(chainer.Variable(y_data))
loss = model(xs, ys)
accum_loss += loss.data
model.zerograds()
loss.backward()
opt.update()
_, path = model.viterbi(xs)
assert len(ys) == len(path)
for y, p in zip(ys, path):
correct += xp.sum(y.data == p)
total += len(y.data)
accuracy = float(correct) / total
print('Accuracy: {}'.format(accuracy))
print('Total loss: {}'.format(accum_loss))
|
<commit_before><commit_msg>Make a pos-tagging example using CRF<commit_after>
|
import collections
import itertools
from nltk.corpus import brown
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.optimizers as O
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__(
embed=L.EmbedID(n_vocab, n_pos),
crf=L.CRF1d(n_pos),
)
def __call__(self, xs, ys):
hs = [self.embed(x) for x in xs]
return self.crf(hs, ys)
def viterbi(self, xs):
hs = [self.embed(x) for x in xs]
return self.crf.viterbi(hs)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
data = []
for sentence in brown.tagged_sents():
s = [(vocab[lex], pos_vocab[pos]) for lex, pos in sentence]
data.append(s)
if len(data) >= 100:
break
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
data.sort(key=len)
groups = []
for length, group in itertools.groupby(data, key=len):
groups.append(list(group))
model = CRF(len(vocab), len(pos_vocab))
#model.to_gpu()
xp = numpy
opt = O.Adam()
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.1))
n_epoch = 1000
for epoch in range(n_epoch):
accum_loss = 0
correct = 0
total = 0
for sentences in groups:
length = len(sentences[0])
xs = []
ys = []
for i in range(length):
x_data = xp.array([s[i][0] for s in sentences], numpy.int32)
y_data = xp.array([s[i][1] for s in sentences], numpy.int32)
xs.append(chainer.Variable(x_data))
ys.append(chainer.Variable(y_data))
loss = model(xs, ys)
accum_loss += loss.data
model.zerograds()
loss.backward()
opt.update()
_, path = model.viterbi(xs)
assert len(ys) == len(path)
for y, p in zip(ys, path):
correct += xp.sum(y.data == p)
total += len(y.data)
accuracy = float(correct) / total
print('Accuracy: {}'.format(accuracy))
print('Total loss: {}'.format(accum_loss))
|
Make a pos-tagging example using CRFimport collections
import itertools
from nltk.corpus import brown
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.optimizers as O
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__(
embed=L.EmbedID(n_vocab, n_pos),
crf=L.CRF1d(n_pos),
)
def __call__(self, xs, ys):
hs = [self.embed(x) for x in xs]
return self.crf(hs, ys)
def viterbi(self, xs):
hs = [self.embed(x) for x in xs]
return self.crf.viterbi(hs)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
data = []
for sentence in brown.tagged_sents():
s = [(vocab[lex], pos_vocab[pos]) for lex, pos in sentence]
data.append(s)
if len(data) >= 100:
break
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
data.sort(key=len)
groups = []
for length, group in itertools.groupby(data, key=len):
groups.append(list(group))
model = CRF(len(vocab), len(pos_vocab))
#model.to_gpu()
xp = numpy
opt = O.Adam()
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.1))
n_epoch = 1000
for epoch in range(n_epoch):
accum_loss = 0
correct = 0
total = 0
for sentences in groups:
length = len(sentences[0])
xs = []
ys = []
for i in range(length):
x_data = xp.array([s[i][0] for s in sentences], numpy.int32)
y_data = xp.array([s[i][1] for s in sentences], numpy.int32)
xs.append(chainer.Variable(x_data))
ys.append(chainer.Variable(y_data))
loss = model(xs, ys)
accum_loss += loss.data
model.zerograds()
loss.backward()
opt.update()
_, path = model.viterbi(xs)
assert len(ys) == len(path)
for y, p in zip(ys, path):
correct += xp.sum(y.data == p)
total += len(y.data)
accuracy = float(correct) / total
print('Accuracy: {}'.format(accuracy))
print('Total loss: {}'.format(accum_loss))
|
<commit_before><commit_msg>Make a pos-tagging example using CRF<commit_after>import collections
import itertools
from nltk.corpus import brown
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.optimizers as O
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__(
embed=L.EmbedID(n_vocab, n_pos),
crf=L.CRF1d(n_pos),
)
def __call__(self, xs, ys):
hs = [self.embed(x) for x in xs]
return self.crf(hs, ys)
def viterbi(self, xs):
hs = [self.embed(x) for x in xs]
return self.crf.viterbi(hs)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
data = []
for sentence in brown.tagged_sents():
s = [(vocab[lex], pos_vocab[pos]) for lex, pos in sentence]
data.append(s)
if len(data) >= 100:
break
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
data.sort(key=len)
groups = []
for length, group in itertools.groupby(data, key=len):
groups.append(list(group))
model = CRF(len(vocab), len(pos_vocab))
#model.to_gpu()
xp = numpy
opt = O.Adam()
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.1))
n_epoch = 1000
for epoch in range(n_epoch):
accum_loss = 0
correct = 0
total = 0
for sentences in groups:
length = len(sentences[0])
xs = []
ys = []
for i in range(length):
x_data = xp.array([s[i][0] for s in sentences], numpy.int32)
y_data = xp.array([s[i][1] for s in sentences], numpy.int32)
xs.append(chainer.Variable(x_data))
ys.append(chainer.Variable(y_data))
loss = model(xs, ys)
accum_loss += loss.data
model.zerograds()
loss.backward()
opt.update()
_, path = model.viterbi(xs)
assert len(ys) == len(path)
for y, p in zip(ys, path):
correct += xp.sum(y.data == p)
total += len(y.data)
accuracy = float(correct) / total
print('Accuracy: {}'.format(accuracy))
print('Total loss: {}'.format(accum_loss))
|
|
282a8d120d486f15cc1e5cd9e1432c1100e722bf
|
examples/nogallery/create_a_dummy_hdf5_file.py
|
examples/nogallery/create_a_dummy_hdf5_file.py
|
import numpy as np
import km3pipe as kp
class APump(kp.Pump):
def configure(self):
self.index = 0
def process(self, blob):
data = {'a': self.index * np.arange(5),
'b': np.arange(5) ** self.index}
data2 = {'c': self.index * np.arange(10, dtype='f4') + 0.1,
'd': np.arange(10, dtype='f4') ** self.index + 0.2}
print(data2)
blob['Tablelike'] = kp.Table(data,
h5loc='/tablelike',
name='2D Table')
print(blob['Tablelike'])
blob['Columnwise'] = kp.Table(data2,
h5loc='/columnwise',
split_h5=True,
name='Column-wise Split')
self.index += 1
return blob
pipe = kp.Pipeline()
pipe.attach(APump)
pipe.attach(kp.io.HDF5Sink, filename='km3hdf5_example.h5')
pipe.drain(13)
|
Add a dummy script which creates an hdf5 file
|
Add a dummy script which creates an hdf5 file
|
Python
|
mit
|
tamasgal/km3pipe,tamasgal/km3pipe
|
Add a dummy script which creates an hdf5 file
|
import numpy as np
import km3pipe as kp
class APump(kp.Pump):
def configure(self):
self.index = 0
def process(self, blob):
data = {'a': self.index * np.arange(5),
'b': np.arange(5) ** self.index}
data2 = {'c': self.index * np.arange(10, dtype='f4') + 0.1,
'd': np.arange(10, dtype='f4') ** self.index + 0.2}
print(data2)
blob['Tablelike'] = kp.Table(data,
h5loc='/tablelike',
name='2D Table')
print(blob['Tablelike'])
blob['Columnwise'] = kp.Table(data2,
h5loc='/columnwise',
split_h5=True,
name='Column-wise Split')
self.index += 1
return blob
pipe = kp.Pipeline()
pipe.attach(APump)
pipe.attach(kp.io.HDF5Sink, filename='km3hdf5_example.h5')
pipe.drain(13)
|
<commit_before><commit_msg>Add a dummy script which creates an hdf5 file<commit_after>
|
import numpy as np
import km3pipe as kp
class APump(kp.Pump):
def configure(self):
self.index = 0
def process(self, blob):
data = {'a': self.index * np.arange(5),
'b': np.arange(5) ** self.index}
data2 = {'c': self.index * np.arange(10, dtype='f4') + 0.1,
'd': np.arange(10, dtype='f4') ** self.index + 0.2}
print(data2)
blob['Tablelike'] = kp.Table(data,
h5loc='/tablelike',
name='2D Table')
print(blob['Tablelike'])
blob['Columnwise'] = kp.Table(data2,
h5loc='/columnwise',
split_h5=True,
name='Column-wise Split')
self.index += 1
return blob
pipe = kp.Pipeline()
pipe.attach(APump)
pipe.attach(kp.io.HDF5Sink, filename='km3hdf5_example.h5')
pipe.drain(13)
|
Add a dummy script which creates an hdf5 fileimport numpy as np
import km3pipe as kp
class APump(kp.Pump):
def configure(self):
self.index = 0
def process(self, blob):
data = {'a': self.index * np.arange(5),
'b': np.arange(5) ** self.index}
data2 = {'c': self.index * np.arange(10, dtype='f4') + 0.1,
'd': np.arange(10, dtype='f4') ** self.index + 0.2}
print(data2)
blob['Tablelike'] = kp.Table(data,
h5loc='/tablelike',
name='2D Table')
print(blob['Tablelike'])
blob['Columnwise'] = kp.Table(data2,
h5loc='/columnwise',
split_h5=True,
name='Column-wise Split')
self.index += 1
return blob
pipe = kp.Pipeline()
pipe.attach(APump)
pipe.attach(kp.io.HDF5Sink, filename='km3hdf5_example.h5')
pipe.drain(13)
|
<commit_before><commit_msg>Add a dummy script which creates an hdf5 file<commit_after>import numpy as np
import km3pipe as kp
class APump(kp.Pump):
def configure(self):
self.index = 0
def process(self, blob):
data = {'a': self.index * np.arange(5),
'b': np.arange(5) ** self.index}
data2 = {'c': self.index * np.arange(10, dtype='f4') + 0.1,
'd': np.arange(10, dtype='f4') ** self.index + 0.2}
print(data2)
blob['Tablelike'] = kp.Table(data,
h5loc='/tablelike',
name='2D Table')
print(blob['Tablelike'])
blob['Columnwise'] = kp.Table(data2,
h5loc='/columnwise',
split_h5=True,
name='Column-wise Split')
self.index += 1
return blob
pipe = kp.Pipeline()
pipe.attach(APump)
pipe.attach(kp.io.HDF5Sink, filename='km3hdf5_example.h5')
pipe.drain(13)
|
|
6ee8ee2467d9c61b03a268a6b8d8ea9bc3cfe9e0
|
Lib/defcon/tools/fuzzyNumber.py
|
Lib/defcon/tools/fuzzyNumber.py
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return '[%d %d]' % (self.value, self.threshold)
def __cmp__(self, other):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
Allow for comparing to objects other than FuzzyNumber objects.
|
Allow for comparing to objects other than FuzzyNumber objects.
|
Python
|
mit
|
anthrotype/defcon,moyogo/defcon,typemytype/defcon,typesupply/defcon,adrientetar/defcon
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return '[%d %d]' % (self.value, self.threshold)
def __cmp__(self, other):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)Allow for comparing to objects other than FuzzyNumber objects.
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
<commit_before>class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return '[%d %d]' % (self.value, self.threshold)
def __cmp__(self, other):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)<commit_msg>Allow for comparing to objects other than FuzzyNumber objects.<commit_after>
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return '[%d %d]' % (self.value, self.threshold)
def __cmp__(self, other):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)Allow for comparing to objects other than FuzzyNumber objects.class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
<commit_before>class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return '[%d %d]' % (self.value, self.threshold)
def __cmp__(self, other):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)<commit_msg>Allow for comparing to objects other than FuzzyNumber objects.<commit_after>class FuzzyNumber(object):
def __init__(self, value, threshold):
self.value = value
self.threshold = threshold
def __repr__(self):
return "[%f %f]" % (self.value, self.threshold)
def __cmp__(self, other):
if hasattr(other, "value"):
if abs(self.value - other.value) < self.threshold:
return 0
else:
return cmp(self.value, other.value)
return cmp(self.value, other)
|
fbd185f06c1cd890fe5aa365a02a8ffd1de60405
|
course_discovery/apps/course_metadata/migrations/0064_auto_20171018_1528.py
|
course_discovery/apps/course_metadata/migrations/0064_auto_20171018_1528.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-18 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0063_auto_20171005_1931'),
]
operations = [
migrations.RemoveField(
model_name='subjecttranslation',
name='description_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='name_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='subtitle_t',
),
]
|
Remove unused fields for Subject via migration
|
Remove unused fields for Subject via migration
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Remove unused fields for Subject via migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-18 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0063_auto_20171005_1931'),
]
operations = [
migrations.RemoveField(
model_name='subjecttranslation',
name='description_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='name_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='subtitle_t',
),
]
|
<commit_before><commit_msg>Remove unused fields for Subject via migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-18 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0063_auto_20171005_1931'),
]
operations = [
migrations.RemoveField(
model_name='subjecttranslation',
name='description_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='name_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='subtitle_t',
),
]
|
Remove unused fields for Subject via migration# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-18 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0063_auto_20171005_1931'),
]
operations = [
migrations.RemoveField(
model_name='subjecttranslation',
name='description_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='name_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='subtitle_t',
),
]
|
<commit_before><commit_msg>Remove unused fields for Subject via migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-18 15:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0063_auto_20171005_1931'),
]
operations = [
migrations.RemoveField(
model_name='subjecttranslation',
name='description_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='name_t',
),
migrations.RemoveField(
model_name='subjecttranslation',
name='subtitle_t',
),
]
|
|
f3cfd7b980b1feda747b3142dd99a18e77a0e3cf
|
publisher/cms_toolbars.py
|
publisher/cms_toolbars.py
|
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from publisher.models import PublisherStateModel
log = logging.getLogger(__name__)
@toolbar_pool.register
class PublisherStateToolbar(CMSToolbar):
watch_models = [PublisherStateModel]
def populate(self):
user = self.request.user
has_ask_request_permission = PublisherStateModel.has_ask_request_permission(user, raise_exception=False)
has_reply_request_permission = PublisherStateModel.has_reply_request_permission(user, raise_exception=False)
if has_ask_request_permission or has_reply_request_permission:
menu = self.toolbar.get_or_create_menu(
key="publisher-state",
verbose_name=_("open requests"),
)
state_qs = PublisherStateModel.objects.all().filter_open() # All open entries
for state in state_qs:
publisher_instance = state.publisher_instance
try:
url = publisher_instance.get_absolute_url()
except AttributeError as err:
log.error("Can't add 'view on page' link: %s", err)
if settings.DEBUG:
url = "#%s" % err
else:
url = "#"
menu.add_link_item(
name="%s: %s" % (state.action_name, publisher_instance),
url=url,
)
menu.add_break()
menu.add_sideframe_item(
name=_("Publisher State list"),
url=admin_reverse("publisher_publisherstatemodel_changelist"),
)
|
Add django CMS toolbar entry
|
Add django CMS toolbar entry
|
Python
|
bsd-3-clause
|
wearehoods/django-model-publisher-ai,wearehoods/django-model-publisher-ai,wearehoods/django-model-publisher-ai
|
Add django CMS toolbar entry
|
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from publisher.models import PublisherStateModel
log = logging.getLogger(__name__)
@toolbar_pool.register
class PublisherStateToolbar(CMSToolbar):
watch_models = [PublisherStateModel]
def populate(self):
user = self.request.user
has_ask_request_permission = PublisherStateModel.has_ask_request_permission(user, raise_exception=False)
has_reply_request_permission = PublisherStateModel.has_reply_request_permission(user, raise_exception=False)
if has_ask_request_permission or has_reply_request_permission:
menu = self.toolbar.get_or_create_menu(
key="publisher-state",
verbose_name=_("open requests"),
)
state_qs = PublisherStateModel.objects.all().filter_open() # All open entries
for state in state_qs:
publisher_instance = state.publisher_instance
try:
url = publisher_instance.get_absolute_url()
except AttributeError as err:
log.error("Can't add 'view on page' link: %s", err)
if settings.DEBUG:
url = "#%s" % err
else:
url = "#"
menu.add_link_item(
name="%s: %s" % (state.action_name, publisher_instance),
url=url,
)
menu.add_break()
menu.add_sideframe_item(
name=_("Publisher State list"),
url=admin_reverse("publisher_publisherstatemodel_changelist"),
)
|
<commit_before><commit_msg>Add django CMS toolbar entry<commit_after>
|
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from publisher.models import PublisherStateModel
log = logging.getLogger(__name__)
@toolbar_pool.register
class PublisherStateToolbar(CMSToolbar):
watch_models = [PublisherStateModel]
def populate(self):
user = self.request.user
has_ask_request_permission = PublisherStateModel.has_ask_request_permission(user, raise_exception=False)
has_reply_request_permission = PublisherStateModel.has_reply_request_permission(user, raise_exception=False)
if has_ask_request_permission or has_reply_request_permission:
menu = self.toolbar.get_or_create_menu(
key="publisher-state",
verbose_name=_("open requests"),
)
state_qs = PublisherStateModel.objects.all().filter_open() # All open entries
for state in state_qs:
publisher_instance = state.publisher_instance
try:
url = publisher_instance.get_absolute_url()
except AttributeError as err:
log.error("Can't add 'view on page' link: %s", err)
if settings.DEBUG:
url = "#%s" % err
else:
url = "#"
menu.add_link_item(
name="%s: %s" % (state.action_name, publisher_instance),
url=url,
)
menu.add_break()
menu.add_sideframe_item(
name=_("Publisher State list"),
url=admin_reverse("publisher_publisherstatemodel_changelist"),
)
|
Add django CMS toolbar entry
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from publisher.models import PublisherStateModel
log = logging.getLogger(__name__)
@toolbar_pool.register
class PublisherStateToolbar(CMSToolbar):
watch_models = [PublisherStateModel]
def populate(self):
user = self.request.user
has_ask_request_permission = PublisherStateModel.has_ask_request_permission(user, raise_exception=False)
has_reply_request_permission = PublisherStateModel.has_reply_request_permission(user, raise_exception=False)
if has_ask_request_permission or has_reply_request_permission:
menu = self.toolbar.get_or_create_menu(
key="publisher-state",
verbose_name=_("open requests"),
)
state_qs = PublisherStateModel.objects.all().filter_open() # All open entries
for state in state_qs:
publisher_instance = state.publisher_instance
try:
url = publisher_instance.get_absolute_url()
except AttributeError as err:
log.error("Can't add 'view on page' link: %s", err)
if settings.DEBUG:
url = "#%s" % err
else:
url = "#"
menu.add_link_item(
name="%s: %s" % (state.action_name, publisher_instance),
url=url,
)
menu.add_break()
menu.add_sideframe_item(
name=_("Publisher State list"),
url=admin_reverse("publisher_publisherstatemodel_changelist"),
)
|
<commit_before><commit_msg>Add django CMS toolbar entry<commit_after>
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from publisher.models import PublisherStateModel
log = logging.getLogger(__name__)
@toolbar_pool.register
class PublisherStateToolbar(CMSToolbar):
watch_models = [PublisherStateModel]
def populate(self):
user = self.request.user
has_ask_request_permission = PublisherStateModel.has_ask_request_permission(user, raise_exception=False)
has_reply_request_permission = PublisherStateModel.has_reply_request_permission(user, raise_exception=False)
if has_ask_request_permission or has_reply_request_permission:
menu = self.toolbar.get_or_create_menu(
key="publisher-state",
verbose_name=_("open requests"),
)
state_qs = PublisherStateModel.objects.all().filter_open() # All open entries
for state in state_qs:
publisher_instance = state.publisher_instance
try:
url = publisher_instance.get_absolute_url()
except AttributeError as err:
log.error("Can't add 'view on page' link: %s", err)
if settings.DEBUG:
url = "#%s" % err
else:
url = "#"
menu.add_link_item(
name="%s: %s" % (state.action_name, publisher_instance),
url=url,
)
menu.add_break()
menu.add_sideframe_item(
name=_("Publisher State list"),
url=admin_reverse("publisher_publisherstatemodel_changelist"),
)
|
|
9397e9da1c22ce7bb461586a929f70e48c39a9a1
|
exercises/chapter_08/exercise_08_09/exercise_08_09.py
|
exercises/chapter_08/exercise_08_09/exercise_08_09.py
|
# 8-9. Magicians
def show_magicians(names):
"""Print each magician in the list"""
for name in names:
print (name)
magicians = [
"Harry Houdini",
"David Cooperfield",
"Criss Angel",
"David Blaine",
]
show_magicians(magicians)
|
Add solution to exercise 8.9.
|
Add solution to exercise 8.9.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add solution to exercise 8.9.
|
# 8-9. Magicians
def show_magicians(names):
"""Print each magician in the list"""
for name in names:
print (name)
magicians = [
"Harry Houdini",
"David Cooperfield",
"Criss Angel",
"David Blaine",
]
show_magicians(magicians)
|
<commit_before><commit_msg>Add solution to exercise 8.9.<commit_after>
|
# 8-9. Magicians
def show_magicians(names):
"""Print each magician in the list"""
for name in names:
print (name)
magicians = [
"Harry Houdini",
"David Cooperfield",
"Criss Angel",
"David Blaine",
]
show_magicians(magicians)
|
Add solution to exercise 8.9.# 8-9. Magicians
def show_magicians(names):
"""Print each magician in the list"""
for name in names:
print (name)
magicians = [
"Harry Houdini",
"David Cooperfield",
"Criss Angel",
"David Blaine",
]
show_magicians(magicians)
|
<commit_before><commit_msg>Add solution to exercise 8.9.<commit_after># 8-9. Magicians
def show_magicians(names):
"""Print each magician in the list"""
for name in names:
print (name)
magicians = [
"Harry Houdini",
"David Cooperfield",
"Criss Angel",
"David Blaine",
]
show_magicians(magicians)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.