commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57c4bcff678d65262463f94ea7996367a1c1ec35
|
analytics/tshirt_size_distribution.py
|
analytics/tshirt_size_distribution.py
|
import csv
import plotly.plotly as py
import sys
from collections import defaultdict
from plotly.graph_objs import *
city = sys.argv[1].lower()
path_to_csv_file = sys.argv[2]
column_number = int(sys.argv[3])
with open(path_to_csv_file) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data, None)
tshirt_distribution = defaultdict(int)
for data in csv_data:
tshirt_distribution[data[column_number]] += 1
tshirt_size = []
tshirt_size_quantity = []
for size, quantity in sorted(tshirt_distribution.items()):
tshirt_size.append(size)
tshirt_size_quantity.append(quantity)
bar = Bar(x=tshirt_size, y=tshirt_size_quantity)
data = Data([bar])
layout = Layout(
title='WearHacks {0} T-shirt Distribution'.format(city.capitalize()),
xaxis=XAxis(
title='T-shirt Size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Number of T-shirts',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
figure = Figure(data=data, layout=layout)
plot_url = py.plot(figure, filename='WearHacks/wearhacks_{0}_tshirt_distribution'.format(city))
|
Add script to determine and plot tshirt size distribution
|
Add script to determine and plot tshirt size distribution
|
Python
|
mit
|
wearhacks/wearhacks_analytics
|
Add script to determine and plot tshirt size distribution
|
import csv
import plotly.plotly as py
import sys
from collections import defaultdict
from plotly.graph_objs import *
city = sys.argv[1].lower()
path_to_csv_file = sys.argv[2]
column_number = int(sys.argv[3])
with open(path_to_csv_file) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data, None)
tshirt_distribution = defaultdict(int)
for data in csv_data:
tshirt_distribution[data[column_number]] += 1
tshirt_size = []
tshirt_size_quantity = []
for size, quantity in sorted(tshirt_distribution.items()):
tshirt_size.append(size)
tshirt_size_quantity.append(quantity)
bar = Bar(x=tshirt_size, y=tshirt_size_quantity)
data = Data([bar])
layout = Layout(
title='WearHacks {0} T-shirt Distribution'.format(city.capitalize()),
xaxis=XAxis(
title='T-shirt Size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Number of T-shirts',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
figure = Figure(data=data, layout=layout)
plot_url = py.plot(figure, filename='WearHacks/wearhacks_{0}_tshirt_distribution'.format(city))
|
<commit_before><commit_msg>Add script to determine and plot tshirt size distribution<commit_after>
|
import csv
import plotly.plotly as py
import sys
from collections import defaultdict
from plotly.graph_objs import *
city = sys.argv[1].lower()
path_to_csv_file = sys.argv[2]
column_number = int(sys.argv[3])
with open(path_to_csv_file) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data, None)
tshirt_distribution = defaultdict(int)
for data in csv_data:
tshirt_distribution[data[column_number]] += 1
tshirt_size = []
tshirt_size_quantity = []
for size, quantity in sorted(tshirt_distribution.items()):
tshirt_size.append(size)
tshirt_size_quantity.append(quantity)
bar = Bar(x=tshirt_size, y=tshirt_size_quantity)
data = Data([bar])
layout = Layout(
title='WearHacks {0} T-shirt Distribution'.format(city.capitalize()),
xaxis=XAxis(
title='T-shirt Size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Number of T-shirts',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
figure = Figure(data=data, layout=layout)
plot_url = py.plot(figure, filename='WearHacks/wearhacks_{0}_tshirt_distribution'.format(city))
|
Add script to determine and plot tshirt size distributionimport csv
import plotly.plotly as py
import sys
from collections import defaultdict
from plotly.graph_objs import *
city = sys.argv[1].lower()
path_to_csv_file = sys.argv[2]
column_number = int(sys.argv[3])
with open(path_to_csv_file) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data, None)
tshirt_distribution = defaultdict(int)
for data in csv_data:
tshirt_distribution[data[column_number]] += 1
tshirt_size = []
tshirt_size_quantity = []
for size, quantity in sorted(tshirt_distribution.items()):
tshirt_size.append(size)
tshirt_size_quantity.append(quantity)
bar = Bar(x=tshirt_size, y=tshirt_size_quantity)
data = Data([bar])
layout = Layout(
title='WearHacks {0} T-shirt Distribution'.format(city.capitalize()),
xaxis=XAxis(
title='T-shirt Size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Number of T-shirts',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
figure = Figure(data=data, layout=layout)
plot_url = py.plot(figure, filename='WearHacks/wearhacks_{0}_tshirt_distribution'.format(city))
|
<commit_before><commit_msg>Add script to determine and plot tshirt size distribution<commit_after>import csv
import plotly.plotly as py
import sys
from collections import defaultdict
from plotly.graph_objs import *
city = sys.argv[1].lower()
path_to_csv_file = sys.argv[2]
column_number = int(sys.argv[3])
with open(path_to_csv_file) as csv_file:
csv_data = csv.reader(csv_file)
next(csv_data, None)
tshirt_distribution = defaultdict(int)
for data in csv_data:
tshirt_distribution[data[column_number]] += 1
tshirt_size = []
tshirt_size_quantity = []
for size, quantity in sorted(tshirt_distribution.items()):
tshirt_size.append(size)
tshirt_size_quantity.append(quantity)
bar = Bar(x=tshirt_size, y=tshirt_size_quantity)
data = Data([bar])
layout = Layout(
title='WearHacks {0} T-shirt Distribution'.format(city.capitalize()),
xaxis=XAxis(
title='T-shirt Size',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=YAxis(
title='Number of T-shirts',
titlefont=Font(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
figure = Figure(data=data, layout=layout)
plot_url = py.plot(figure, filename='WearHacks/wearhacks_{0}_tshirt_distribution'.format(city))
|
|
9b17918bc992481efb51d8df972b879499424e79
|
lib/ansible/utils/module_docs_fragments/openstack.py
|
lib/ansible/utils/module_docs_fragments/openstack.py
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I{password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
auth_plugin:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
auth_token:
description:
- An auth token obtained previously. If I(auth_token) is given,
I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [publicURL, internalURL]
required: false
default: publicURL
requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays.
'''
|
Add doc fragment for new OpenStack modules
|
Add doc fragment for new OpenStack modules
|
Python
|
mit
|
thaim/ansible,thaim/ansible
|
Add doc fragment for new OpenStack modules
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I{password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
auth_plugin:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
auth_token:
description:
- An auth token obtained previously. If I(auth_token) is given,
I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [publicURL, internalURL]
required: false
default: publicURL
requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays.
'''
|
<commit_before><commit_msg>Add doc fragment for new OpenStack modules<commit_after>
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I{password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
auth_plugin:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
auth_token:
description:
- An auth token obtained previously. If I(auth_token) is given,
I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [publicURL, internalURL]
required: false
default: publicURL
requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays.
'''
|
Add doc fragment for new OpenStack modules# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I{password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
auth_plugin:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
auth_token:
description:
- An auth token obtained previously. If I(auth_token) is given,
I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [publicURL, internalURL]
required: false
default: publicURL
requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays.
'''
|
<commit_before><commit_msg>Add doc fragment for new OpenStack modules<commit_after># Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard openstack documentation fragment
DOCUMENTATION = '''
options:
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
plugin strategy. For the default I{password) plugin, this would contain
I(auth_url), I(username), I(password), I(project_name) and any
information about domains if the cloud supports them. For other plugins,
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
auth_plugin:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
auth_token:
description:
- An auth token obtained previously. If I(auth_token) is given,
I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
required: false
availability_zone:
description:
- Name of the availability zone.
required: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
required: false
default: "yes"
choices: ["yes", "no"]
timeout:
description:
- How long should ansible wait for the requested resource.
required: false
default: 180
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
choices: [publicURL, internalURL]
required: false
default: publicURL
requirements:
- shade
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
- Auth information is driven by os-client-config, which means that values
can come from a yaml config file in /etc/ansible/openstack.yaml,
/etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
standard environment variables, then finally by explicit parameters in
plays.
'''
|
|
dc0a1b2777acc4b9b97de6c471eb8e35dcac2254
|
tools/telemetry/telemetry/page/actions/action_runner_unittest.py
|
tools/telemetry/telemetry/page/actions/action_runner_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,chuan9/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,jaruba/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,dednal/chromium.src,Fireblend/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,chuan9/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,dednal/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,Just-D/chromium-1,ondra-novak/chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,Jonekee/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,markYoungH/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,littlstar/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,Just-D/chromium-1,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,M4sse/chromium.src,Just-D/chromium-1,Chilledheart/chromium,jaruba/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,ondra-novak/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,jaruba/chromium.src,M4sse/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,ltilve/chromium,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,ondra-novak/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
|
bec07279b0e14a9a0e3f11efc7ecbc6e908aa1ef
|
latex_utils.py
|
latex_utils.py
|
"""
Utilities to work with LaTeX files.
"""
import argparse
import re
def read_tex_file(tex_file, encoding="utf-8"):
with open(tex_file, 'r', encoding=encoding) as data:
tex_source = data.read()
return tex_source
def write_tex_file(tex_file, tex_source):
with open(tex_file, 'w') as open_file:
open_file.write(tex_source)
def get_relevant_warnings(log_file):
"""Extract relevant warnings from a LaTeX log file."""
overfull_lines = re.findall(r"Overfull \\hbox .*", log_file)
undefined_references = re.findall(r"LaTeX Warning: Citation `.*?' on page .*", log_file)
return overfull_lines + undefined_references
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('tex_file')
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_parser()
log_file = read_tex_file(args.tex_file, encoding="latin-1")
relevant_warnings = get_relevant_warnings(log_file)
for warning in relevant_warnings:
print(warning)
|
Add utilities to work with LaTeX files
|
Add utilities to work with LaTeX files
|
Python
|
mit
|
teunzwart/latex-production-tools
|
Add utilities to work with LaTeX files
|
"""
Utilities to work with LaTeX files.
"""
import argparse
import re
def read_tex_file(tex_file, encoding="utf-8"):
with open(tex_file, 'r', encoding=encoding) as data:
tex_source = data.read()
return tex_source
def write_tex_file(tex_file, tex_source):
with open(tex_file, 'w') as open_file:
open_file.write(tex_source)
def get_relevant_warnings(log_file):
"""Extract relevant warnings from a LaTeX log file."""
overfull_lines = re.findall(r"Overfull \\hbox .*", log_file)
undefined_references = re.findall(r"LaTeX Warning: Citation `.*?' on page .*", log_file)
return overfull_lines + undefined_references
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('tex_file')
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_parser()
log_file = read_tex_file(args.tex_file, encoding="latin-1")
relevant_warnings = get_relevant_warnings(log_file)
for warning in relevant_warnings:
print(warning)
|
<commit_before><commit_msg>Add utilities to work with LaTeX files<commit_after>
|
"""
Utilities to work with LaTeX files.
"""
import argparse
import re
def read_tex_file(tex_file, encoding="utf-8"):
with open(tex_file, 'r', encoding=encoding) as data:
tex_source = data.read()
return tex_source
def write_tex_file(tex_file, tex_source):
with open(tex_file, 'w') as open_file:
open_file.write(tex_source)
def get_relevant_warnings(log_file):
"""Extract relevant warnings from a LaTeX log file."""
overfull_lines = re.findall(r"Overfull \\hbox .*", log_file)
undefined_references = re.findall(r"LaTeX Warning: Citation `.*?' on page .*", log_file)
return overfull_lines + undefined_references
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('tex_file')
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_parser()
log_file = read_tex_file(args.tex_file, encoding="latin-1")
relevant_warnings = get_relevant_warnings(log_file)
for warning in relevant_warnings:
print(warning)
|
Add utilities to work with LaTeX files"""
Utilities to work with LaTeX files.
"""
import argparse
import re
def read_tex_file(tex_file, encoding="utf-8"):
with open(tex_file, 'r', encoding=encoding) as data:
tex_source = data.read()
return tex_source
def write_tex_file(tex_file, tex_source):
with open(tex_file, 'w') as open_file:
open_file.write(tex_source)
def get_relevant_warnings(log_file):
"""Extract relevant warnings from a LaTeX log file."""
overfull_lines = re.findall(r"Overfull \\hbox .*", log_file)
undefined_references = re.findall(r"LaTeX Warning: Citation `.*?' on page .*", log_file)
return overfull_lines + undefined_references
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('tex_file')
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_parser()
log_file = read_tex_file(args.tex_file, encoding="latin-1")
relevant_warnings = get_relevant_warnings(log_file)
for warning in relevant_warnings:
print(warning)
|
<commit_before><commit_msg>Add utilities to work with LaTeX files<commit_after>"""
Utilities to work with LaTeX files.
"""
import argparse
import re
def read_tex_file(tex_file, encoding="utf-8"):
with open(tex_file, 'r', encoding=encoding) as data:
tex_source = data.read()
return tex_source
def write_tex_file(tex_file, tex_source):
with open(tex_file, 'w') as open_file:
open_file.write(tex_source)
def get_relevant_warnings(log_file):
"""Extract relevant warnings from a LaTeX log file."""
overfull_lines = re.findall(r"Overfull \\hbox .*", log_file)
undefined_references = re.findall(r"LaTeX Warning: Citation `.*?' on page .*", log_file)
return overfull_lines + undefined_references
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument('tex_file')
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli_parser()
log_file = read_tex_file(args.tex_file, encoding="latin-1")
relevant_warnings = get_relevant_warnings(log_file)
for warning in relevant_warnings:
print(warning)
|
|
334695484d61693c41c8a30aea6cf753b9ed8267
|
lstm.py
|
lstm.py
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
Add code for LSTM based MNIST
|
Add code for LSTM based MNIST
Training takes time but accuracy is great after few
iterations.
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Add code for LSTM based MNIST
Training takes time but accuracy is great after few
iterations.
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
<commit_before><commit_msg>Add code for LSTM based MNIST
Training takes time but accuracy is great after few
iterations.<commit_after>
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
Add code for LSTM based MNIST
Training takes time but accuracy is great after few
iterations.import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
<commit_before><commit_msg>Add code for LSTM based MNIST
Training takes time but accuracy is great after few
iterations.<commit_after>import tensorflow as tf
import numpy as np
tf.set_random_seed(5)
n_inputs = 28
n_neurons = 150
n_layers = 3
n_steps = 28
n_outputs = 10
learning_rate = 0.001
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
multi_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for _ in range(3)])
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name='softmax')
x_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(x_entropy, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
states
top_layer_h_state
n_epochs = 25
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for k in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, 'Train acc: ', acc_train, "Test acc: ", acc_test)
|
|
0da81aee8d1d1c1badee561c594e191dbbffdc9c
|
pyres/failure/base.py
|
pyres/failure/base.py
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
return trace
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
if not trace:
return []
return trace.split('\n')
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
Save our backtraces in a compatible manner with resque.
|
Save our backtraces in a compatible manner with resque.
|
Python
|
mit
|
binarydud/pyres,guaijiao/pyres,TylerLubeck/pyres,Affectiva/pyres
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
return trace
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
Save our backtraces in a compatible manner with resque.
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
if not trace:
return []
return trace.split('\n')
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
<commit_before>import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
return trace
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
<commit_msg>Save our backtraces in a compatible manner with resque.<commit_after>
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
if not trace:
return []
return trace.split('\n')
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
return trace
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
Save our backtraces in a compatible manner with resque.import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
if not trace:
return []
return trace.split('\n')
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
<commit_before>import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
return trace
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
<commit_msg>Save our backtraces in a compatible manner with resque.<commit_after>import sys
import traceback
class BaseBackend(object):
"""Provides a base class that custom backends can subclass. Also provides basic
traceback and message parsing.
The ``__init__`` takes these keyword arguments:
``exp`` -- The exception generated by your failure.
``queue`` -- The queue in which the ``Job`` was enqueued when it failed.
``payload`` -- The payload that was passed to the ``Job``.
``worker`` -- The worker that was processing the ``Job`` when it failed.
"""
def __init__(self, exp, queue, payload, worker=None):
excc, _, tb = sys.exc_info()
self._exception = excc
self._traceback = traceback.format_exc()
self._worker = worker
self._queue = queue
self._payload = payload
def _parse_traceback(self, trace):
"""Return the given traceback string formatted for a notification."""
if not trace:
return []
return trace.split('\n')
def _parse_message(self, exc):
"""Return a message for a notification from the given exception."""
return '%s: %s' % (exc.__class__.__name__, str(exc))
|
a4845aff79b1efb89a210b3b2cc57739cc27bebf
|
chatterbot/ext/django_chatterbot/migrations/0018_text_max_length.py
|
chatterbot/ext/django_chatterbot/migrations/0018_text_max_length.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0017_tags_unique'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='in_response_to',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='statement',
name='search_in_response_to',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='search_text',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='text',
field=models.CharField(max_length=255),
),
]
|
Create Django migrations for statement text max length
|
Create Django migrations for statement text max length
|
Python
|
bsd-3-clause
|
gunthercox/ChatterBot,vkosuri/ChatterBot
|
Create Django migrations for statement text max length
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0017_tags_unique'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='in_response_to',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='statement',
name='search_in_response_to',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='search_text',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='text',
field=models.CharField(max_length=255),
),
]
|
<commit_before><commit_msg>Create Django migrations for statement text max length<commit_after>
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0017_tags_unique'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='in_response_to',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='statement',
name='search_in_response_to',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='search_text',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='text',
field=models.CharField(max_length=255),
),
]
|
Create Django migrations for statement text max lengthfrom django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0017_tags_unique'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='in_response_to',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='statement',
name='search_in_response_to',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='search_text',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='text',
field=models.CharField(max_length=255),
),
]
|
<commit_before><commit_msg>Create Django migrations for statement text max length<commit_after>from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_chatterbot', '0017_tags_unique'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='in_response_to',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='statement',
name='search_in_response_to',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='search_text',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='statement',
name='text',
field=models.CharField(max_length=255),
),
]
|
|
5c3a54a79a763b3b761f053f7360ad2d670ae5cb
|
bssrdf_estimate/interface/bssrdf_redner_widget.py
|
bssrdf_estimate/interface/bssrdf_redner_widget.py
|
# -*- coding: utf-8 -*-
from .image_widget import ImageWidget
class BSSRDFRenderWidget(ImageWidget):
def __init__(self, parent=None):
super(BSSRDFRenderWidget, self).__init__(parent)
|
Add BSSRDF render widget file.
|
Add BSSRDF render widget file.
|
Python
|
mit
|
tatsy/bssrdf-estimate,tatsy/bssrdf-estimate
|
Add BSSRDF render widget file.
|
# -*- coding: utf-8 -*-
from .image_widget import ImageWidget
class BSSRDFRenderWidget(ImageWidget):
def __init__(self, parent=None):
super(BSSRDFRenderWidget, self).__init__(parent)
|
<commit_before><commit_msg>Add BSSRDF render widget file.<commit_after>
|
# -*- coding: utf-8 -*-
from .image_widget import ImageWidget
class BSSRDFRenderWidget(ImageWidget):
def __init__(self, parent=None):
super(BSSRDFRenderWidget, self).__init__(parent)
|
Add BSSRDF render widget file.# -*- coding: utf-8 -*-
from .image_widget import ImageWidget
class BSSRDFRenderWidget(ImageWidget):
def __init__(self, parent=None):
super(BSSRDFRenderWidget, self).__init__(parent)
|
<commit_before><commit_msg>Add BSSRDF render widget file.<commit_after># -*- coding: utf-8 -*-
from .image_widget import ImageWidget
class BSSRDFRenderWidget(ImageWidget):
def __init__(self, parent=None):
super(BSSRDFRenderWidget, self).__init__(parent)
|
|
6a3d0ae17efd09bbfc184a4c28115cb61b2006e7
|
bjcp/style_parser.py
|
bjcp/style_parser.py
|
#! /usr/bin/env python
import csv
import pprint
import string
"""
Parse the BJCP 2015 Styles CSV file
"""
def main():
styles = {}
filename = '2015_Styles.csv'
with open(filename, 'rb') as f:
reader = csv.reader(f)
stylename = ''
for row in reader:
if row[0] and row[1]:
if row[3]:
category = row[1]
subcategory = ''
style = row[2]
if '.' in style:
subcategory, style = style.split('.')
og = row[3].split('-')
fg = row[4].split('-')
abv = row[5].split('-')
ibu = row[6].split('-')
color = row[7].split('-')
if og[0] == 'Varies':
og = ''
fg = ''
abv = ''
ibu = ''
color = ''
else:
og[1] = str(1.0 + float(og[1]) / 1000.)
if float(fg[1]) > 1.015:
fg[1] = str(1.0 + float(fg[1]) / 1000.)
styles[stylename].append({
'category': category,
'subcategory': subcategory.strip(),
'style': style.strip(),
'og': og,
'fg': fg,
'abv': abv,
'ibu': ibu,
'color': color,
})
else:
stylename = string.capwords(row[2])
styles[stylename] = []
pprint.pprint(styles)
if __name__ == "__main__":
main()
|
Add a utility to parse the BJCP 2015 Styles csv
|
Add a utility to parse the BJCP 2015 Styles csv
|
Python
|
mit
|
chrisgilmerproj/brewdata,chrisgilmerproj/brewdata
|
Add a utility to parse the BJCP 2015 Styles csv
|
#! /usr/bin/env python
import csv
import pprint
import string
"""
Parse the BJCP 2015 Styles CSV file
"""
def main():
styles = {}
filename = '2015_Styles.csv'
with open(filename, 'rb') as f:
reader = csv.reader(f)
stylename = ''
for row in reader:
if row[0] and row[1]:
if row[3]:
category = row[1]
subcategory = ''
style = row[2]
if '.' in style:
subcategory, style = style.split('.')
og = row[3].split('-')
fg = row[4].split('-')
abv = row[5].split('-')
ibu = row[6].split('-')
color = row[7].split('-')
if og[0] == 'Varies':
og = ''
fg = ''
abv = ''
ibu = ''
color = ''
else:
og[1] = str(1.0 + float(og[1]) / 1000.)
if float(fg[1]) > 1.015:
fg[1] = str(1.0 + float(fg[1]) / 1000.)
styles[stylename].append({
'category': category,
'subcategory': subcategory.strip(),
'style': style.strip(),
'og': og,
'fg': fg,
'abv': abv,
'ibu': ibu,
'color': color,
})
else:
stylename = string.capwords(row[2])
styles[stylename] = []
pprint.pprint(styles)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a utility to parse the BJCP 2015 Styles csv<commit_after>
|
#! /usr/bin/env python
import csv
import pprint
import string
"""
Parse the BJCP 2015 Styles CSV file
"""
def main():
styles = {}
filename = '2015_Styles.csv'
with open(filename, 'rb') as f:
reader = csv.reader(f)
stylename = ''
for row in reader:
if row[0] and row[1]:
if row[3]:
category = row[1]
subcategory = ''
style = row[2]
if '.' in style:
subcategory, style = style.split('.')
og = row[3].split('-')
fg = row[4].split('-')
abv = row[5].split('-')
ibu = row[6].split('-')
color = row[7].split('-')
if og[0] == 'Varies':
og = ''
fg = ''
abv = ''
ibu = ''
color = ''
else:
og[1] = str(1.0 + float(og[1]) / 1000.)
if float(fg[1]) > 1.015:
fg[1] = str(1.0 + float(fg[1]) / 1000.)
styles[stylename].append({
'category': category,
'subcategory': subcategory.strip(),
'style': style.strip(),
'og': og,
'fg': fg,
'abv': abv,
'ibu': ibu,
'color': color,
})
else:
stylename = string.capwords(row[2])
styles[stylename] = []
pprint.pprint(styles)
if __name__ == "__main__":
main()
|
Add a utility to parse the BJCP 2015 Styles csv#! /usr/bin/env python
import csv
import pprint
import string
"""
Parse the BJCP 2015 Styles CSV file
"""
def main():
styles = {}
filename = '2015_Styles.csv'
with open(filename, 'rb') as f:
reader = csv.reader(f)
stylename = ''
for row in reader:
if row[0] and row[1]:
if row[3]:
category = row[1]
subcategory = ''
style = row[2]
if '.' in style:
subcategory, style = style.split('.')
og = row[3].split('-')
fg = row[4].split('-')
abv = row[5].split('-')
ibu = row[6].split('-')
color = row[7].split('-')
if og[0] == 'Varies':
og = ''
fg = ''
abv = ''
ibu = ''
color = ''
else:
og[1] = str(1.0 + float(og[1]) / 1000.)
if float(fg[1]) > 1.015:
fg[1] = str(1.0 + float(fg[1]) / 1000.)
styles[stylename].append({
'category': category,
'subcategory': subcategory.strip(),
'style': style.strip(),
'og': og,
'fg': fg,
'abv': abv,
'ibu': ibu,
'color': color,
})
else:
stylename = string.capwords(row[2])
styles[stylename] = []
pprint.pprint(styles)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a utility to parse the BJCP 2015 Styles csv<commit_after>#! /usr/bin/env python
import csv
import pprint
import string
"""
Parse the BJCP 2015 Styles CSV file
"""
def main():
styles = {}
filename = '2015_Styles.csv'
with open(filename, 'rb') as f:
reader = csv.reader(f)
stylename = ''
for row in reader:
if row[0] and row[1]:
if row[3]:
category = row[1]
subcategory = ''
style = row[2]
if '.' in style:
subcategory, style = style.split('.')
og = row[3].split('-')
fg = row[4].split('-')
abv = row[5].split('-')
ibu = row[6].split('-')
color = row[7].split('-')
if og[0] == 'Varies':
og = ''
fg = ''
abv = ''
ibu = ''
color = ''
else:
og[1] = str(1.0 + float(og[1]) / 1000.)
if float(fg[1]) > 1.015:
fg[1] = str(1.0 + float(fg[1]) / 1000.)
styles[stylename].append({
'category': category,
'subcategory': subcategory.strip(),
'style': style.strip(),
'og': og,
'fg': fg,
'abv': abv,
'ibu': ibu,
'color': color,
})
else:
stylename = string.capwords(row[2])
styles[stylename] = []
pprint.pprint(styles)
if __name__ == "__main__":
main()
|
|
3763078a5a9a1973b17fd1c11411c29e176a034f
|
py/baseball-game.py
|
py/baseball-game.py
|
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
s = 0
stack = []
for c in ops:
try:
v = int(c)
stack.append(v)
except ValueError:
if c == 'C':
stack.pop()
elif c == 'D':
stack.append(stack[-1] * 2)
elif c == '+':
stack.append(stack[-1] + stack[-2])
return sum(stack)
|
Add py solution for 682. Baseball Game
|
Add py solution for 682. Baseball Game
682. Baseball Game: https://leetcode.com/problems/baseball-game/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 682. Baseball Game
682. Baseball Game: https://leetcode.com/problems/baseball-game/
|
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
s = 0
stack = []
for c in ops:
try:
v = int(c)
stack.append(v)
except ValueError:
if c == 'C':
stack.pop()
elif c == 'D':
stack.append(stack[-1] * 2)
elif c == '+':
stack.append(stack[-1] + stack[-2])
return sum(stack)
|
<commit_before><commit_msg>Add py solution for 682. Baseball Game
682. Baseball Game: https://leetcode.com/problems/baseball-game/<commit_after>
|
class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
s = 0
stack = []
for c in ops:
try:
v = int(c)
stack.append(v)
except ValueError:
if c == 'C':
stack.pop()
elif c == 'D':
stack.append(stack[-1] * 2)
elif c == '+':
stack.append(stack[-1] + stack[-2])
return sum(stack)
|
Add py solution for 682. Baseball Game
682. Baseball Game: https://leetcode.com/problems/baseball-game/class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
s = 0
stack = []
for c in ops:
try:
v = int(c)
stack.append(v)
except ValueError:
if c == 'C':
stack.pop()
elif c == 'D':
stack.append(stack[-1] * 2)
elif c == '+':
stack.append(stack[-1] + stack[-2])
return sum(stack)
|
<commit_before><commit_msg>Add py solution for 682. Baseball Game
682. Baseball Game: https://leetcode.com/problems/baseball-game/<commit_after>class Solution(object):
def calPoints(self, ops):
"""
:type ops: List[str]
:rtype: int
"""
s = 0
stack = []
for c in ops:
try:
v = int(c)
stack.append(v)
except ValueError:
if c == 'C':
stack.pop()
elif c == 'D':
stack.append(stack[-1] * 2)
elif c == '+':
stack.append(stack[-1] + stack[-2])
return sum(stack)
|
|
f25364ba8dbbd7924e068146599f56e5bd797c4e
|
test/python_api/lldbutil/TestLLDBIterator.py
|
test/python_api/lldbutil/TestLLDBIterator.py
|
"""
Test lldbutil.lldb_iter() which returns an iterator object for lldb's aggregate
data structures.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
class LLDBIteratorTestCase(TestBase):
mydir = "python_api/lldbutil"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_lldb_iter(self):
"""Test lldb_iter works correctly."""
self.buildDefault()
self.lldb_iter_test()
def lldb_iter_test(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint.IsValid(), VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
rc = lldb.SBError()
self.process = target.Launch (self.dbg.GetListener(), None, None, os.ctermid(), os.ctermid(), os.ctermid(), None, 0, False, rc)
if not rc.Success() or not self.process.IsValid():
self.fail("SBTarget.LaunchProcess() failed")
from lldbutil import lldb_iter, get_description
yours = []
for i in range(target.GetNumModules()):
yours.append(target.GetModuleAtIndex(i))
mine = []
for m in lldb_iter(target, 'GetNumModules', 'GetModuleAtIndex'):
mine.append(m)
self.assertTrue(len(yours) == len(mine))
for i in range(len(yours)):
if self.TraceOn():
print "yours[%d]='%s'" % (i, get_description(yours[i]))
print "mine[%d]='%s'" % (i, get_description(mine[i]))
self.assertTrue(yours[i].GetUUIDString() == mine[i].GetUUIDString(),
"UUID of yours[%d] and mine[%d] matches" % (i, i))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case for lldbutil.lldb_iter() which returns an iterator object for lldb objects which can contain other lldb objects. Examples are: SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains SBThread, SBThread contains SBFrame, etc.
|
Add a test case for lldbutil.lldb_iter() which returns an iterator object
for lldb objects which can contain other lldb objects. Examples are:
SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains
SBThread, SBThread contains SBFrame, etc.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130258 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb
|
Add a test case for lldbutil.lldb_iter() which returns an iterator object
for lldb objects which can contain other lldb objects. Examples are:
SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains
SBThread, SBThread contains SBFrame, etc.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130258 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test lldbutil.lldb_iter() which returns an iterator object for lldb's aggregate
data structures.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
class LLDBIteratorTestCase(TestBase):
mydir = "python_api/lldbutil"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_lldb_iter(self):
"""Test lldb_iter works correctly."""
self.buildDefault()
self.lldb_iter_test()
def lldb_iter_test(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint.IsValid(), VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
rc = lldb.SBError()
self.process = target.Launch (self.dbg.GetListener(), None, None, os.ctermid(), os.ctermid(), os.ctermid(), None, 0, False, rc)
if not rc.Success() or not self.process.IsValid():
self.fail("SBTarget.LaunchProcess() failed")
from lldbutil import lldb_iter, get_description
yours = []
for i in range(target.GetNumModules()):
yours.append(target.GetModuleAtIndex(i))
mine = []
for m in lldb_iter(target, 'GetNumModules', 'GetModuleAtIndex'):
mine.append(m)
self.assertTrue(len(yours) == len(mine))
for i in range(len(yours)):
if self.TraceOn():
print "yours[%d]='%s'" % (i, get_description(yours[i]))
print "mine[%d]='%s'" % (i, get_description(mine[i]))
self.assertTrue(yours[i].GetUUIDString() == mine[i].GetUUIDString(),
"UUID of yours[%d] and mine[%d] matches" % (i, i))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case for lldbutil.lldb_iter() which returns an iterator object
for lldb objects which can contain other lldb objects. Examples are:
SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains
SBThread, SBThread contains SBFrame, etc.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130258 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test lldbutil.lldb_iter() which returns an iterator object for lldb's aggregate
data structures.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
class LLDBIteratorTestCase(TestBase):
mydir = "python_api/lldbutil"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_lldb_iter(self):
"""Test lldb_iter works correctly."""
self.buildDefault()
self.lldb_iter_test()
def lldb_iter_test(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint.IsValid(), VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
rc = lldb.SBError()
self.process = target.Launch (self.dbg.GetListener(), None, None, os.ctermid(), os.ctermid(), os.ctermid(), None, 0, False, rc)
if not rc.Success() or not self.process.IsValid():
self.fail("SBTarget.LaunchProcess() failed")
from lldbutil import lldb_iter, get_description
yours = []
for i in range(target.GetNumModules()):
yours.append(target.GetModuleAtIndex(i))
mine = []
for m in lldb_iter(target, 'GetNumModules', 'GetModuleAtIndex'):
mine.append(m)
self.assertTrue(len(yours) == len(mine))
for i in range(len(yours)):
if self.TraceOn():
print "yours[%d]='%s'" % (i, get_description(yours[i]))
print "mine[%d]='%s'" % (i, get_description(mine[i]))
self.assertTrue(yours[i].GetUUIDString() == mine[i].GetUUIDString(),
"UUID of yours[%d] and mine[%d] matches" % (i, i))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a test case for lldbutil.lldb_iter() which returns an iterator object
for lldb objects which can contain other lldb objects. Examples are:
SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains
SBThread, SBThread contains SBFrame, etc.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130258 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test lldbutil.lldb_iter() which returns an iterator object for lldb's aggregate
data structures.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
class LLDBIteratorTestCase(TestBase):
mydir = "python_api/lldbutil"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_lldb_iter(self):
"""Test lldb_iter works correctly."""
self.buildDefault()
self.lldb_iter_test()
def lldb_iter_test(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint.IsValid(), VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
rc = lldb.SBError()
self.process = target.Launch (self.dbg.GetListener(), None, None, os.ctermid(), os.ctermid(), os.ctermid(), None, 0, False, rc)
if not rc.Success() or not self.process.IsValid():
self.fail("SBTarget.LaunchProcess() failed")
from lldbutil import lldb_iter, get_description
yours = []
for i in range(target.GetNumModules()):
yours.append(target.GetModuleAtIndex(i))
mine = []
for m in lldb_iter(target, 'GetNumModules', 'GetModuleAtIndex'):
mine.append(m)
self.assertTrue(len(yours) == len(mine))
for i in range(len(yours)):
if self.TraceOn():
print "yours[%d]='%s'" % (i, get_description(yours[i]))
print "mine[%d]='%s'" % (i, get_description(mine[i]))
self.assertTrue(yours[i].GetUUIDString() == mine[i].GetUUIDString(),
"UUID of yours[%d] and mine[%d] matches" % (i, i))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a test case for lldbutil.lldb_iter() which returns an iterator object
for lldb objects which can contain other lldb objects. Examples are:
SBTarget contains SBModule, SBModule contains SBSymbols, SBProcess contains
SBThread, SBThread contains SBFrame, etc.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@130258 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test lldbutil.lldb_iter() which returns an iterator object for lldb's aggregate
data structures.
"""
import os, time
import re
import unittest2
import lldb
from lldbtest import *
class LLDBIteratorTestCase(TestBase):
mydir = "python_api/lldbutil"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_lldb_iter(self):
"""Test lldb_iter works correctly."""
self.buildDefault()
self.lldb_iter_test()
def lldb_iter_test(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line)
self.assertTrue(breakpoint.IsValid(), VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
rc = lldb.SBError()
self.process = target.Launch (self.dbg.GetListener(), None, None, os.ctermid(), os.ctermid(), os.ctermid(), None, 0, False, rc)
if not rc.Success() or not self.process.IsValid():
self.fail("SBTarget.LaunchProcess() failed")
from lldbutil import lldb_iter, get_description
yours = []
for i in range(target.GetNumModules()):
yours.append(target.GetModuleAtIndex(i))
mine = []
for m in lldb_iter(target, 'GetNumModules', 'GetModuleAtIndex'):
mine.append(m)
self.assertTrue(len(yours) == len(mine))
for i in range(len(yours)):
if self.TraceOn():
print "yours[%d]='%s'" % (i, get_description(yours[i]))
print "mine[%d]='%s'" % (i, get_description(mine[i]))
self.assertTrue(yours[i].GetUUIDString() == mine[i].GetUUIDString(),
"UUID of yours[%d] and mine[%d] matches" % (i, i))
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
aa837386f4cce1ff23fbdf58a633d0e4bf1cc846
|
setup-utils/passhash_pbkdf2.py
|
setup-utils/passhash_pbkdf2.py
|
import sys
sys.path.append("..") # This needs to work from the setup-utils subdirectory
from txircd.modules.hash_pbkdf2 import HashPBKDF2
if len(sys.argv) < 2:
print("Usage: {} password".format(__file__))
else:
hasher = HashPBKDF2()
hashedPass = hasher.hash(sys.argv[1])
print(hashedPass)
|
Add a password hasher for opers
|
Add a password hasher for opers
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Add a password hasher for opers
|
import sys
sys.path.append("..") # This needs to work from the setup-utils subdirectory
from txircd.modules.hash_pbkdf2 import HashPBKDF2
if len(sys.argv) < 2:
print("Usage: {} password".format(__file__))
else:
hasher = HashPBKDF2()
hashedPass = hasher.hash(sys.argv[1])
print(hashedPass)
|
<commit_before><commit_msg>Add a password hasher for opers<commit_after>
|
import sys
sys.path.append("..") # This needs to work from the setup-utils subdirectory
from txircd.modules.hash_pbkdf2 import HashPBKDF2
if len(sys.argv) < 2:
print("Usage: {} password".format(__file__))
else:
hasher = HashPBKDF2()
hashedPass = hasher.hash(sys.argv[1])
print(hashedPass)
|
Add a password hasher for opersimport sys
sys.path.append("..") # This needs to work from the setup-utils subdirectory
from txircd.modules.hash_pbkdf2 import HashPBKDF2
if len(sys.argv) < 2:
print("Usage: {} password".format(__file__))
else:
hasher = HashPBKDF2()
hashedPass = hasher.hash(sys.argv[1])
print(hashedPass)
|
<commit_before><commit_msg>Add a password hasher for opers<commit_after>import sys
sys.path.append("..") # This needs to work from the setup-utils subdirectory
from txircd.modules.hash_pbkdf2 import HashPBKDF2
if len(sys.argv) < 2:
print("Usage: {} password".format(__file__))
else:
hasher = HashPBKDF2()
hashedPass = hasher.hash(sys.argv[1])
print(hashedPass)
|
|
bee9396a4a8bdef3a22b2010cf873637d5bf56db
|
numberutils.py
|
numberutils.py
|
##-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Number to Hangul string util.
'''
__author__ = 'SeomGi, Han'
__credits__ = ['SeomGi, Han']
__copyright__ = 'Copyright 2015, Python Utils Project'
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'SeomGi, Han'
__email__ = 'iandmyhand@gmail.com'
__status__ = 'Production'
HANGUL_NUMBER = [
'', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'
]
HANGUL_NUMBER_UNIT = [
'', '십', '백', '천'
]
HANGUL_NUMBER_BIG_UNIT = [
'', '만', '억', '조', '경', '해'
]
class NumberUtils:
def __init__(self):
pass
def convert_number_to_hangul_string(self, number_value):
result = ''
string_value = str(number_value)
len_string_value = len(string_value)
if string_value and string_value.isdigit():
index = 0
while index < len_string_value:
single_result = ''
hangul_number = HANGUL_NUMBER[int(string_value[index])]
if hangul_number:
unitindex = ((len_string_value - index) % 4) - 1
single_result += hangul_number + HANGUL_NUMBER_UNIT[unitindex]
if (len_string_value - index - 1) % 4 == 0:
big_unitindex = (len(string_value) - index - 1) // 4
if len(HANGUL_NUMBER_BIG_UNIT) > big_unitindex:
single_result += HANGUL_NUMBER_BIG_UNIT[big_unitindex]
result += single_result
index += 1
return result
|
Add a function can convert number to Hangul string.
|
Add a function can convert number to Hangul string.
|
Python
|
mit
|
iandmyhand/python-utils
|
Add a function can convert number to Hangul string.
|
##-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Number to Hangul string util.
'''
__author__ = 'SeomGi, Han'
__credits__ = ['SeomGi, Han']
__copyright__ = 'Copyright 2015, Python Utils Project'
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'SeomGi, Han'
__email__ = 'iandmyhand@gmail.com'
__status__ = 'Production'
HANGUL_NUMBER = [
'', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'
]
HANGUL_NUMBER_UNIT = [
'', '십', '백', '천'
]
HANGUL_NUMBER_BIG_UNIT = [
'', '만', '억', '조', '경', '해'
]
class NumberUtils:
def __init__(self):
pass
def convert_number_to_hangul_string(self, number_value):
result = ''
string_value = str(number_value)
len_string_value = len(string_value)
if string_value and string_value.isdigit():
index = 0
while index < len_string_value:
single_result = ''
hangul_number = HANGUL_NUMBER[int(string_value[index])]
if hangul_number:
unitindex = ((len_string_value - index) % 4) - 1
single_result += hangul_number + HANGUL_NUMBER_UNIT[unitindex]
if (len_string_value - index - 1) % 4 == 0:
big_unitindex = (len(string_value) - index - 1) // 4
if len(HANGUL_NUMBER_BIG_UNIT) > big_unitindex:
single_result += HANGUL_NUMBER_BIG_UNIT[big_unitindex]
result += single_result
index += 1
return result
|
<commit_before><commit_msg>Add a function can convert number to Hangul string.<commit_after>
|
##-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Number to Hangul string util.
'''
__author__ = 'SeomGi, Han'
__credits__ = ['SeomGi, Han']
__copyright__ = 'Copyright 2015, Python Utils Project'
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'SeomGi, Han'
__email__ = 'iandmyhand@gmail.com'
__status__ = 'Production'
HANGUL_NUMBER = [
'', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'
]
HANGUL_NUMBER_UNIT = [
'', '십', '백', '천'
]
HANGUL_NUMBER_BIG_UNIT = [
'', '만', '억', '조', '경', '해'
]
class NumberUtils:
def __init__(self):
pass
def convert_number_to_hangul_string(self, number_value):
result = ''
string_value = str(number_value)
len_string_value = len(string_value)
if string_value and string_value.isdigit():
index = 0
while index < len_string_value:
single_result = ''
hangul_number = HANGUL_NUMBER[int(string_value[index])]
if hangul_number:
unitindex = ((len_string_value - index) % 4) - 1
single_result += hangul_number + HANGUL_NUMBER_UNIT[unitindex]
if (len_string_value - index - 1) % 4 == 0:
big_unitindex = (len(string_value) - index - 1) // 4
if len(HANGUL_NUMBER_BIG_UNIT) > big_unitindex:
single_result += HANGUL_NUMBER_BIG_UNIT[big_unitindex]
result += single_result
index += 1
return result
|
Add a function can convert number to Hangul string.##-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Number to Hangul string util.
'''
__author__ = 'SeomGi, Han'
__credits__ = ['SeomGi, Han']
__copyright__ = 'Copyright 2015, Python Utils Project'
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'SeomGi, Han'
__email__ = 'iandmyhand@gmail.com'
__status__ = 'Production'
HANGUL_NUMBER = [
'', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'
]
HANGUL_NUMBER_UNIT = [
'', '십', '백', '천'
]
HANGUL_NUMBER_BIG_UNIT = [
'', '만', '억', '조', '경', '해'
]
class NumberUtils:
def __init__(self):
pass
def convert_number_to_hangul_string(self, number_value):
result = ''
string_value = str(number_value)
len_string_value = len(string_value)
if string_value and string_value.isdigit():
index = 0
while index < len_string_value:
single_result = ''
hangul_number = HANGUL_NUMBER[int(string_value[index])]
if hangul_number:
unitindex = ((len_string_value - index) % 4) - 1
single_result += hangul_number + HANGUL_NUMBER_UNIT[unitindex]
if (len_string_value - index - 1) % 4 == 0:
big_unitindex = (len(string_value) - index - 1) // 4
if len(HANGUL_NUMBER_BIG_UNIT) > big_unitindex:
single_result += HANGUL_NUMBER_BIG_UNIT[big_unitindex]
result += single_result
index += 1
return result
|
<commit_before><commit_msg>Add a function can convert number to Hangul string.<commit_after>##-*- coding: utf-8 -*-
#!/usr/bin/python
'''
Number to Hangul string util.
'''
__author__ = 'SeomGi, Han'
__credits__ = ['SeomGi, Han']
__copyright__ = 'Copyright 2015, Python Utils Project'
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'SeomGi, Han'
__email__ = 'iandmyhand@gmail.com'
__status__ = 'Production'
HANGUL_NUMBER = [
'', '일', '이', '삼', '사', '오', '육', '칠', '팔', '구', '십'
]
HANGUL_NUMBER_UNIT = [
'', '십', '백', '천'
]
HANGUL_NUMBER_BIG_UNIT = [
'', '만', '억', '조', '경', '해'
]
class NumberUtils:
def __init__(self):
pass
def convert_number_to_hangul_string(self, number_value):
result = ''
string_value = str(number_value)
len_string_value = len(string_value)
if string_value and string_value.isdigit():
index = 0
while index < len_string_value:
single_result = ''
hangul_number = HANGUL_NUMBER[int(string_value[index])]
if hangul_number:
unitindex = ((len_string_value - index) % 4) - 1
single_result += hangul_number + HANGUL_NUMBER_UNIT[unitindex]
if (len_string_value - index - 1) % 4 == 0:
big_unitindex = (len(string_value) - index - 1) // 4
if len(HANGUL_NUMBER_BIG_UNIT) > big_unitindex:
single_result += HANGUL_NUMBER_BIG_UNIT[big_unitindex]
result += single_result
index += 1
return result
|
|
d116c4013ebf8b5831d450ae12932446056e7a84
|
dump_model.py
|
dump_model.py
|
import pickle
import numpy as np
import random
import os
import struct
from config import *
def binary(num):
"""
Float to IEEE-754 single precision binary string.
"""
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def test_manual_classification(model):
"""
Perform classification manually to see if we can replicate scikit learn logistic regression.
"""
print model
print model.intercept_
print model.classes_
c = model.coef_
print c.shape
for i in xrange(1000000):
# x = [float(r) / 100 for r in reversed(list(range(70)))]
x = [random.random() for i in xrange(70)]
true_value = model.predict(x)[0]
result = np.dot(c, x) + model.intercept_
my_value = model.classes_[result.argmax()]
if true_value != my_value:
print "Predicted %s and my prediction is %s" % (true_value, my_value)
break
def dump_model(model, out_dir = 'model_dump'):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'intercept'), 'w') as f:
joiner = ''
for floaat in model.intercept_:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'coefficient'), 'w') as f:
row_joiner = ''
for row in model.coef_:
f.write(row_joiner)
row_joiner = '\n'
joiner = ''
for floaat in row:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'labels'), 'w') as f:
joiner = ''
for label in model.classes_:
f.write(joiner)
joiner = '\n'
f.write(LABEL_LIST[label])
if __name__ == "__main__":
model = None
with open('model.pickle', 'r') as f:
model = pickle.load(f)
test_manual_classification(model)
|
Add means to dump model for manual classification
|
Add means to dump model for manual classification
|
Python
|
mit
|
hptruong93/MouseGestureRecognition,hptruong93/MouseGestureRecognition
|
Add means to dump model for manual classification
|
import pickle
import numpy as np
import random
import os
import struct
from config import *
def binary(num):
"""
Float to IEEE-754 single precision binary string.
"""
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def test_manual_classification(model):
"""
Perform classification manually to see if we can replicate scikit learn logistic regression.
"""
print model
print model.intercept_
print model.classes_
c = model.coef_
print c.shape
for i in xrange(1000000):
# x = [float(r) / 100 for r in reversed(list(range(70)))]
x = [random.random() for i in xrange(70)]
true_value = model.predict(x)[0]
result = np.dot(c, x) + model.intercept_
my_value = model.classes_[result.argmax()]
if true_value != my_value:
print "Predicted %s and my prediction is %s" % (true_value, my_value)
break
def dump_model(model, out_dir = 'model_dump'):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'intercept'), 'w') as f:
joiner = ''
for floaat in model.intercept_:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'coefficient'), 'w') as f:
row_joiner = ''
for row in model.coef_:
f.write(row_joiner)
row_joiner = '\n'
joiner = ''
for floaat in row:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'labels'), 'w') as f:
joiner = ''
for label in model.classes_:
f.write(joiner)
joiner = '\n'
f.write(LABEL_LIST[label])
if __name__ == "__main__":
model = None
with open('model.pickle', 'r') as f:
model = pickle.load(f)
test_manual_classification(model)
|
<commit_before><commit_msg>Add means to dump model for manual classification<commit_after>
|
import pickle
import numpy as np
import random
import os
import struct
from config import *
def binary(num):
"""
Float to IEEE-754 single precision binary string.
"""
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def test_manual_classification(model):
"""
Perform classification manually to see if we can replicate scikit learn logistic regression.
"""
print model
print model.intercept_
print model.classes_
c = model.coef_
print c.shape
for i in xrange(1000000):
# x = [float(r) / 100 for r in reversed(list(range(70)))]
x = [random.random() for i in xrange(70)]
true_value = model.predict(x)[0]
result = np.dot(c, x) + model.intercept_
my_value = model.classes_[result.argmax()]
if true_value != my_value:
print "Predicted %s and my prediction is %s" % (true_value, my_value)
break
def dump_model(model, out_dir = 'model_dump'):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'intercept'), 'w') as f:
joiner = ''
for floaat in model.intercept_:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'coefficient'), 'w') as f:
row_joiner = ''
for row in model.coef_:
f.write(row_joiner)
row_joiner = '\n'
joiner = ''
for floaat in row:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'labels'), 'w') as f:
joiner = ''
for label in model.classes_:
f.write(joiner)
joiner = '\n'
f.write(LABEL_LIST[label])
if __name__ == "__main__":
model = None
with open('model.pickle', 'r') as f:
model = pickle.load(f)
test_manual_classification(model)
|
Add means to dump model for manual classification
import pickle
import numpy as np
import random
import os
import struct
from config import *
def binary(num):
"""
Float to IEEE-754 single precision binary string.
"""
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def test_manual_classification(model):
"""
Perform classification manually to see if we can replicate scikit learn logistic regression.
"""
print model
print model.intercept_
print model.classes_
c = model.coef_
print c.shape
for i in xrange(1000000):
# x = [float(r) / 100 for r in reversed(list(range(70)))]
x = [random.random() for i in xrange(70)]
true_value = model.predict(x)[0]
result = np.dot(c, x) + model.intercept_
my_value = model.classes_[result.argmax()]
if true_value != my_value:
print "Predicted %s and my prediction is %s" % (true_value, my_value)
break
def dump_model(model, out_dir = 'model_dump'):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'intercept'), 'w') as f:
joiner = ''
for floaat in model.intercept_:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'coefficient'), 'w') as f:
row_joiner = ''
for row in model.coef_:
f.write(row_joiner)
row_joiner = '\n'
joiner = ''
for floaat in row:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'labels'), 'w') as f:
joiner = ''
for label in model.classes_:
f.write(joiner)
joiner = '\n'
f.write(LABEL_LIST[label])
if __name__ == "__main__":
model = None
with open('model.pickle', 'r') as f:
model = pickle.load(f)
test_manual_classification(model)
|
<commit_before><commit_msg>Add means to dump model for manual classification<commit_after>
import pickle
import numpy as np
import random
import os
import struct
from config import *
def binary(num):
"""
Float to IEEE-754 single precision binary string.
"""
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
def test_manual_classification(model):
"""
Perform classification manually to see if we can replicate scikit learn logistic regression.
"""
print model
print model.intercept_
print model.classes_
c = model.coef_
print c.shape
for i in xrange(1000000):
# x = [float(r) / 100 for r in reversed(list(range(70)))]
x = [random.random() for i in xrange(70)]
true_value = model.predict(x)[0]
result = np.dot(c, x) + model.intercept_
my_value = model.classes_[result.argmax()]
if true_value != my_value:
print "Predicted %s and my prediction is %s" % (true_value, my_value)
break
def dump_model(model, out_dir = 'model_dump'):
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
with open(os.path.join(out_dir, 'intercept'), 'w') as f:
joiner = ''
for floaat in model.intercept_:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'coefficient'), 'w') as f:
row_joiner = ''
for row in model.coef_:
f.write(row_joiner)
row_joiner = '\n'
joiner = ''
for floaat in row:
f.write(joiner)
joiner = '\n'
f.write(binary(floaat))
with open(os.path.join(out_dir, 'labels'), 'w') as f:
joiner = ''
for label in model.classes_:
f.write(joiner)
joiner = '\n'
f.write(LABEL_LIST[label])
if __name__ == "__main__":
model = None
with open('model.pickle', 'r') as f:
model = pickle.load(f)
test_manual_classification(model)
|
|
a49adce8b7822d972bfef5046d69366a4b26e0bb
|
jjvm.py
|
jjvm.py
|
#!/usr/bin/python
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
byte = c.read(1)
while byte:
print '{0:02x}'.format(ord(byte))
byte = c.read(1)
|
Print sequence of bytes that comprise the class file
|
Print sequence of bytes that comprise the class file
|
Python
|
apache-2.0
|
justinccdev/jjvm
|
Print sequence of bytes that comprise the class file
|
#!/usr/bin/python
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
byte = c.read(1)
while byte:
print '{0:02x}'.format(ord(byte))
byte = c.read(1)
|
<commit_before><commit_msg>Print sequence of bytes that comprise the class file<commit_after>
|
#!/usr/bin/python
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
byte = c.read(1)
while byte:
print '{0:02x}'.format(ord(byte))
byte = c.read(1)
|
Print sequence of bytes that comprise the class file#!/usr/bin/python
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
byte = c.read(1)
while byte:
print '{0:02x}'.format(ord(byte))
byte = c.read(1)
|
<commit_before><commit_msg>Print sequence of bytes that comprise the class file<commit_after>#!/usr/bin/python
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = MyParser('Run bytecode in jjvm')
parser.add_argument('path', help='path to class')
args = parser.parse_args()
with open(args.path, "rb") as c:
byte = c.read(1)
while byte:
print '{0:02x}'.format(ord(byte))
byte = c.read(1)
|
|
0c4d113b7379ac4eea6a51f1c510881095caf9e8
|
contrib/session_generator.py
|
contrib/session_generator.py
|
#!/usr/bin/env python
# Works with both python2 and python3; please preserve this property
# Copyright (C) 2016 mod_auth_gssapi contributors - See COPYING for (C) terms
# Simple script to generate GssapiSessionKey values
import base64
import os
bits = base64.b64encode(os.urandom(32))
print("GssapiSessionKey key:" + bits.decode('utf-8'))
|
Add simple script for generating session keys
|
Add simple script for generating session keys
Signed-off-by: Robbie Harwood <3f8631879978f4cea53bf1163760f1ac18f9cb08@redhat.com>
Reviewed-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
Closes #105
|
Python
|
mit
|
frenche/mod_auth_gssapi,frenche/mod_auth_gssapi,frenche/mod_auth_gssapi,frenche/mod_auth_gssapi
|
Add simple script for generating session keys
Signed-off-by: Robbie Harwood <3f8631879978f4cea53bf1163760f1ac18f9cb08@redhat.com>
Reviewed-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
Closes #105
|
#!/usr/bin/env python
# Works with both python2 and python3; please preserve this property
# Copyright (C) 2016 mod_auth_gssapi contributors - See COPYING for (C) terms
# Simple script to generate GssapiSessionKey values
import base64
import os
bits = base64.b64encode(os.urandom(32))
print("GssapiSessionKey key:" + bits.decode('utf-8'))
|
<commit_before><commit_msg>Add simple script for generating session keys
Signed-off-by: Robbie Harwood <3f8631879978f4cea53bf1163760f1ac18f9cb08@redhat.com>
Reviewed-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
Closes #105<commit_after>
|
#!/usr/bin/env python
# Works with both python2 and python3; please preserve this property
# Copyright (C) 2016 mod_auth_gssapi contributors - See COPYING for (C) terms
# Simple script to generate GssapiSessionKey values
import base64
import os
bits = base64.b64encode(os.urandom(32))
print("GssapiSessionKey key:" + bits.decode('utf-8'))
|
Add simple script for generating session keys
Signed-off-by: Robbie Harwood <3f8631879978f4cea53bf1163760f1ac18f9cb08@redhat.com>
Reviewed-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
Closes #105#!/usr/bin/env python
# Works with both python2 and python3; please preserve this property
# Copyright (C) 2016 mod_auth_gssapi contributors - See COPYING for (C) terms
# Simple script to generate GssapiSessionKey values
import base64
import os
bits = base64.b64encode(os.urandom(32))
print("GssapiSessionKey key:" + bits.decode('utf-8'))
|
<commit_before><commit_msg>Add simple script for generating session keys
Signed-off-by: Robbie Harwood <3f8631879978f4cea53bf1163760f1ac18f9cb08@redhat.com>
Reviewed-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
Closes #105<commit_after>#!/usr/bin/env python
# Works with both python2 and python3; please preserve this property
# Copyright (C) 2016 mod_auth_gssapi contributors - See COPYING for (C) terms
# Simple script to generate GssapiSessionKey values
import base64
import os
bits = base64.b64encode(os.urandom(32))
print("GssapiSessionKey key:" + bits.decode('utf-8'))
|
|
7e0f1552ebdadb8f2023167afcd557bdc09b06f9
|
scripts/analysis/plot_velocity_based_position_controller_data.py
|
scripts/analysis/plot_velocity_based_position_controller_data.py
|
import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
|
Add plotting script for analyzing velocity based position controller
|
Add plotting script for analyzing velocity based position controller
|
Python
|
mpl-2.0
|
jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy,jhu-asco/aerial_autonomy
|
Add plotting script for analyzing velocity based position controller
|
import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
|
<commit_before><commit_msg>Add plotting script for analyzing velocity based position controller<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
|
Add plotting script for analyzing velocity based position controllerimport numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
|
<commit_before><commit_msg>Add plotting script for analyzing velocity based position controller<commit_after>import numpy as np
import matplotlib.pyplot as plt
import sys
if sys.argc < 2:
print "Usage ./plot_velocity_based_position_controller.py [File_name]"
sys.exit(-1)
data = np.genfromtxt(sys.argv[-1], delimiter=',')
# 0 for x, 1 for y 2 for z and 3 for yaw
plot_axis = 3;
ts = (data[:,0] - data[0, 0])/1e9
plt.figure(1+plot_axis)
plt.subplot(2,1,1)
plt.plot(ts, data[:, 1+plot_axis]);
plt.ylabel('Error')
plt.subplot(2,1,2)
plt.plot(ts, data[:, 5+plot_axis]);
plt.ylabel('Cumulative Error')
plt.xlabel('Time (seconds)')
plt.show()
plt.figure(2+plot_axis)
plt.plot(ts, data[:, 9+plot_axis])
plt.ylabel('Control')
plt.xlabel('Time (seconds)')
|
|
26c413ca10550454c773829d4fa7fceafe2e9504
|
tests/test_data_checksums.py
|
tests/test_data_checksums.py
|
""" test data_checksums"""
from nose.tools import assert_equal
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 6)
assert_equal(data_checksums['/neutron/simple_xs'], '3d6e086977783dcdf07e5c6b0c2416be')
|
Add basic test of data_checksums
|
Add basic test of data_checksums
|
Python
|
bsd-3-clause
|
pyne/simplesim
|
Add basic test of data_checksums
|
""" test data_checksums"""
from nose.tools import assert_equal
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 6)
assert_equal(data_checksums['/neutron/simple_xs'], '3d6e086977783dcdf07e5c6b0c2416be')
|
<commit_before><commit_msg>Add basic test of data_checksums<commit_after>
|
""" test data_checksums"""
from nose.tools import assert_equal
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 6)
assert_equal(data_checksums['/neutron/simple_xs'], '3d6e086977783dcdf07e5c6b0c2416be')
|
Add basic test of data_checksums""" test data_checksums"""
from nose.tools import assert_equal
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 6)
assert_equal(data_checksums['/neutron/simple_xs'], '3d6e086977783dcdf07e5c6b0c2416be')
|
<commit_before><commit_msg>Add basic test of data_checksums<commit_after>""" test data_checksums"""
from nose.tools import assert_equal
def test_data_checksums():
from pyne.data import data_checksums
assert_equal(len(data_checksums), 6)
assert_equal(data_checksums['/neutron/simple_xs'], '3d6e086977783dcdf07e5c6b0c2416be')
|
|
4eb18bb737cd878fb684d7ce4a0718ff88a1238d
|
chrome/installer/tools/shortcut_properties.py
|
chrome/installer/tools/shortcut_properties.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
Add a script to installer/tools to dump a shortcut's property bag.
|
Add a script to installer/tools to dump a shortcut's property bag.
This is required to verify DualMode and AppUserModelId properties which
are not otherwise visible in Windows shortcut properties UI.
BUG=501166
Review URL: https://codereview.chromium.org/1259213005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#341350}
|
Python
|
bsd-3-clause
|
ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend
|
Add a script to installer/tools to dump a shortcut's property bag.
This is required to verify DualMode and AppUserModelId properties which
are not otherwise visible in Windows shortcut properties UI.
BUG=501166
Review URL: https://codereview.chromium.org/1259213005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#341350}
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to installer/tools to dump a shortcut's property bag.
This is required to verify DualMode and AppUserModelId properties which
are not otherwise visible in Windows shortcut properties UI.
BUG=501166
Review URL: https://codereview.chromium.org/1259213005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#341350}<commit_after>
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
Add a script to installer/tools to dump a shortcut's property bag.
This is required to verify DualMode and AppUserModelId properties which
are not otherwise visible in Windows shortcut properties UI.
BUG=501166
Review URL: https://codereview.chromium.org/1259213005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#341350}# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to installer/tools to dump a shortcut's property bag.
This is required to verify DualMode and AppUserModelId properties which
are not otherwise visible in Windows shortcut properties UI.
BUG=501166
Review URL: https://codereview.chromium.org/1259213005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#341350}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a Windows shortcut's property bag to stdout.
This is required to confirm correctness of properties that aren't readily
available in Windows UI.
"""
import optparse
from pywintypes import IID
import sys
from win32com.propsys import propsys
from win32com.propsys import pscon
def PrintShortcutProperties(shortcut_path, dump_all):
properties = propsys.SHGetPropertyStoreFromParsingName(shortcut_path)
print 'Known properties (--dump-all for more):'
app_id = properties.GetValue(pscon.PKEY_AppUserModel_ID).GetValue()
print '\tAppUserModelId => "%s"' % app_id
# Hard code PKEY_AppUserModel_IsDualMode as pscon doesn't support it.
PKEY_AppUserModel_IsDualMode = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'),
11)
dual_mode = properties.GetValue(PKEY_AppUserModel_IsDualMode).GetValue()
print '\tDual Mode => "%s"' % dual_mode
# Dump all other properties with their raw ID if requested, add them above
# over time as we explicitly care about more properties, see propkey.h or
# pscon.py for a reference of existing PKEYs' meaning.
if dump_all:
print '\nOther properties:'
for i in range(0, properties.GetCount()):
property_key = properties.GetAt(i)
property_value = properties.GetValue(property_key).GetValue()
print '\t%s => "%s"' % (property_key, property_value)
def main():
usage = 'usage: %prog [options] "C:\\Path\\To\\My Shortcut.lnk"'
parser = optparse.OptionParser(usage,
description="Dumps a shortcut's properties.")
parser.add_option('-a', '--dump-all', action='store_true', dest='dump_all',
default=False)
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
PrintShortcutProperties(args[0], options.dump_all)
if __name__ == '__main__':
sys.exit(main())
|
|
4ed4baae070cd6be5164e03369aa28c75e7684f2
|
spec/bottling_specs/factory_specs/BottleSingletonAppLoader_specs.py
|
spec/bottling_specs/factory_specs/BottleSingletonAppLoader_specs.py
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
Add loader for singleton apps
|
Add loader for singleton apps
|
Python
|
mit
|
datamora/datamora,datamora/datamora
|
Add loader for singleton apps
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
<commit_before><commit_msg>Add loader for singleton apps<commit_after>
|
import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
Add loader for singleton appsimport fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
<commit_before><commit_msg>Add loader for singleton apps<commit_after>import fudge
from bottling.factory import BottleSingletonAppLoader
class describe_init:
def it_initializes_with_given_options(self):
ref = 'my_module:app'
kind = None
loader = BottleSingletonAppLoader(ref, kind)
assert loader.ref == ref
assert loader.kind == None
class describe_load:
@fudge.patch('bottle.load_app')
def given_no_config_or_runtime_dependencies(self, bottle_load_app):
app_ref = 'my_module:app'
(bottle_load_app
.expects_call()
.with_args(app_ref)
.returns({}))
loader = BottleSingletonAppLoader(ref=app_ref)
app = loader.load()
assert app is not None
|
|
5496ccfc164a235521d2b6412b836c3ad84eebc1
|
alembic/versions/4fe474604dbb_add_stores_managers_.py
|
alembic/versions/4fe474604dbb_add_stores_managers_.py
|
"""Add `stores_managers` table
Revision ID: 4fe474604dbb
Revises: 5a0e003fafb2
Create Date: 2013-06-28 22:18:42.292040
"""
# revision identifiers, used by Alembic.
revision = '4fe474604dbb'
down_revision = '5a0e003fafb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stores_managers',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('store_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['stores.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stores_managers')
### end Alembic commands ###
|
Add database revision which adds `stores_managers` table
|
Add database revision which adds `stores_managers` table
|
Python
|
mit
|
beni55/overholt,mattupstate/overholt,RohithKP/overholt,beni55/overholt,jstacoder/overholt,alexmerser/overholt,jstacoder/overholt,bbuneci/overholt,jstacoder/overholt,qpxu007/overholt,mattupstate/overholt,qpxu007/overholt,qpxu007/overholt,alexmerser/overholt,RohithKP/overholt,jamesblunt/overholt,manhtuhtk/overholt,RohithKP/overholt,manhtuhtk/overholt,manhtuhtk/overholt,bbuneci/overholt,bbuneci/overholt,peterlada/backend,jamesblunt/overholt,alexmerser/overholt,jamesblunt/overholt,peterlada/backend,beni55/overholt,peterlada/backend,mattupstate/overholt,beni55/overholt,jamesblunt/overholt,alexmerser/overholt,bbuneci/overholt,qpxu007/overholt,RohithKP/overholt,mattupstate/overholt,manhtuhtk/overholt,jstacoder/overholt
|
Add database revision which adds `stores_managers` table
|
"""Add `stores_managers` table
Revision ID: 4fe474604dbb
Revises: 5a0e003fafb2
Create Date: 2013-06-28 22:18:42.292040
"""
# revision identifiers, used by Alembic.
revision = '4fe474604dbb'
down_revision = '5a0e003fafb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stores_managers',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('store_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['stores.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stores_managers')
### end Alembic commands ###
|
<commit_before><commit_msg>Add database revision which adds `stores_managers` table<commit_after>
|
"""Add `stores_managers` table
Revision ID: 4fe474604dbb
Revises: 5a0e003fafb2
Create Date: 2013-06-28 22:18:42.292040
"""
# revision identifiers, used by Alembic.
revision = '4fe474604dbb'
down_revision = '5a0e003fafb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stores_managers',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('store_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['stores.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stores_managers')
### end Alembic commands ###
|
Add database revision which adds `stores_managers` table"""Add `stores_managers` table
Revision ID: 4fe474604dbb
Revises: 5a0e003fafb2
Create Date: 2013-06-28 22:18:42.292040
"""
# revision identifiers, used by Alembic.
revision = '4fe474604dbb'
down_revision = '5a0e003fafb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stores_managers',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('store_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['stores.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stores_managers')
### end Alembic commands ###
|
<commit_before><commit_msg>Add database revision which adds `stores_managers` table<commit_after>"""Add `stores_managers` table
Revision ID: 4fe474604dbb
Revises: 5a0e003fafb2
Create Date: 2013-06-28 22:18:42.292040
"""
# revision identifiers, used by Alembic.
revision = '4fe474604dbb'
down_revision = '5a0e003fafb2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stores_managers',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('store_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['store_id'], ['stores.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stores_managers')
### end Alembic commands ###
|
|
b096b3aee871596a535b05266a63fe2655883e91
|
compare_rankings.py
|
compare_rankings.py
|
"""
Copyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import os
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
def read_rankings(flname):
rankings = []
with open(flname) as fl:
reader = csv.reader(fl, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for rec in reader:
rankings.append(tuple(rec[:2]))
return rankings
def analyze_rankings(ranking_flname_1, ranking_flname_2):
rankings_1 = read_rankings(ranking_flname_1)
rankings_2 = read_rankings(ranking_flname_2)
thresholds = [0.01, 0.05, 0.1, 0.25, 0.5]
min_len = min(len(rankings_1), len(rankings_2))
for t in thresholds:
cutoff = int(t * min_len) + 1
intersection = set(rankings_1[:cutoff]).intersection(rankings_2[:cutoff])
percentage = 100.0 * float(len(intersection)) / float(cutoff)
print t, percentage
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--rankings-fl-1",
type=str,
required=True)
parser.add_argument("--rankings-fl-2",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
analyze_rankings(args.rankings_fl_1,
args.rankings_fl_2)
|
Add script for comparing rankings
|
Add script for comparing rankings
|
Python
|
apache-2.0
|
rnowling/asaph,rnowling/asaph,rnowling/aranyani
|
Add script for comparing rankings
|
"""
Copyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import os
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
def read_rankings(flname):
rankings = []
with open(flname) as fl:
reader = csv.reader(fl, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for rec in reader:
rankings.append(tuple(rec[:2]))
return rankings
def analyze_rankings(ranking_flname_1, ranking_flname_2):
rankings_1 = read_rankings(ranking_flname_1)
rankings_2 = read_rankings(ranking_flname_2)
thresholds = [0.01, 0.05, 0.1, 0.25, 0.5]
min_len = min(len(rankings_1), len(rankings_2))
for t in thresholds:
cutoff = int(t * min_len) + 1
intersection = set(rankings_1[:cutoff]).intersection(rankings_2[:cutoff])
percentage = 100.0 * float(len(intersection)) / float(cutoff)
print t, percentage
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--rankings-fl-1",
type=str,
required=True)
parser.add_argument("--rankings-fl-2",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
analyze_rankings(args.rankings_fl_1,
args.rankings_fl_2)
|
<commit_before><commit_msg>Add script for comparing rankings<commit_after>
|
"""
Copyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import os
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
def read_rankings(flname):
rankings = []
with open(flname) as fl:
reader = csv.reader(fl, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for rec in reader:
rankings.append(tuple(rec[:2]))
return rankings
def analyze_rankings(ranking_flname_1, ranking_flname_2):
rankings_1 = read_rankings(ranking_flname_1)
rankings_2 = read_rankings(ranking_flname_2)
thresholds = [0.01, 0.05, 0.1, 0.25, 0.5]
min_len = min(len(rankings_1), len(rankings_2))
for t in thresholds:
cutoff = int(t * min_len) + 1
intersection = set(rankings_1[:cutoff]).intersection(rankings_2[:cutoff])
percentage = 100.0 * float(len(intersection)) / float(cutoff)
print t, percentage
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--rankings-fl-1",
type=str,
required=True)
parser.add_argument("--rankings-fl-2",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
analyze_rankings(args.rankings_fl_1,
args.rankings_fl_2)
|
Add script for comparing rankings"""
Copyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import os
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
def read_rankings(flname):
rankings = []
with open(flname) as fl:
reader = csv.reader(fl, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for rec in reader:
rankings.append(tuple(rec[:2]))
return rankings
def analyze_rankings(ranking_flname_1, ranking_flname_2):
rankings_1 = read_rankings(ranking_flname_1)
rankings_2 = read_rankings(ranking_flname_2)
thresholds = [0.01, 0.05, 0.1, 0.25, 0.5]
min_len = min(len(rankings_1), len(rankings_2))
for t in thresholds:
cutoff = int(t * min_len) + 1
intersection = set(rankings_1[:cutoff]).intersection(rankings_2[:cutoff])
percentage = 100.0 * float(len(intersection)) / float(cutoff)
print t, percentage
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--rankings-fl-1",
type=str,
required=True)
parser.add_argument("--rankings-fl-2",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
analyze_rankings(args.rankings_fl_1,
args.rankings_fl_2)
|
<commit_before><commit_msg>Add script for comparing rankings<commit_after>"""
Copyright 2017 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import csv
import os
import matplotlib
matplotlib.use("PDF")
import matplotlib.pyplot as plt
def read_rankings(flname):
rankings = []
with open(flname) as fl:
reader = csv.reader(fl, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for rec in reader:
rankings.append(tuple(rec[:2]))
return rankings
def analyze_rankings(ranking_flname_1, ranking_flname_2):
rankings_1 = read_rankings(ranking_flname_1)
rankings_2 = read_rankings(ranking_flname_2)
thresholds = [0.01, 0.05, 0.1, 0.25, 0.5]
min_len = min(len(rankings_1), len(rankings_2))
for t in thresholds:
cutoff = int(t * min_len) + 1
intersection = set(rankings_1[:cutoff]).intersection(rankings_2[:cutoff])
percentage = 100.0 * float(len(intersection)) / float(cutoff)
print t, percentage
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument("--rankings-fl-1",
type=str,
required=True)
parser.add_argument("--rankings-fl-2",
type=str,
required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
analyze_rankings(args.rankings_fl_1,
args.rankings_fl_2)
|
|
bc2a6a9bc692c48efd894eaa4dce87a1dbb1b712
|
journal/migrations/0009_auto_20190211_1515.py
|
journal/migrations/0009_auto_20190211_1515.py
|
# Generated by Django 2.1.6 on 2019-02-11 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0008_remove_userinfo_deleted'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journal',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journalmember',
name='key',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='pubkey',
field=models.BinaryField(editable=True),
),
]
|
Add a migration for django 2.1.
|
Add a migration for django 2.1.
Before this version django itself ignored the editale directive,
so it now created a migration for it. Though it doesn't really
matter as we marked it as such for drf.
|
Python
|
agpl-3.0
|
etesync/journal-manager
|
Add a migration for django 2.1.
Before this version django itself ignored the editale directive,
so it now created a migration for it. Though it doesn't really
matter as we marked it as such for drf.
|
# Generated by Django 2.1.6 on 2019-02-11 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0008_remove_userinfo_deleted'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journal',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journalmember',
name='key',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='pubkey',
field=models.BinaryField(editable=True),
),
]
|
<commit_before><commit_msg>Add a migration for django 2.1.
Before this version django itself ignored the editale directive,
so it now created a migration for it. Though it doesn't really
matter as we marked it as such for drf.<commit_after>
|
# Generated by Django 2.1.6 on 2019-02-11 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0008_remove_userinfo_deleted'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journal',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journalmember',
name='key',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='pubkey',
field=models.BinaryField(editable=True),
),
]
|
Add a migration for django 2.1.
Before this version django itself ignored the editale directive,
so it now created a migration for it. Though it doesn't really
matter as we marked it as such for drf.# Generated by Django 2.1.6 on 2019-02-11 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0008_remove_userinfo_deleted'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journal',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journalmember',
name='key',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='pubkey',
field=models.BinaryField(editable=True),
),
]
|
<commit_before><commit_msg>Add a migration for django 2.1.
Before this version django itself ignored the editale directive,
so it now created a migration for it. Though it doesn't really
matter as we marked it as such for drf.<commit_after># Generated by Django 2.1.6 on 2019-02-11 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0008_remove_userinfo_deleted'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journal',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='journalmember',
name='key',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='content',
field=models.BinaryField(editable=True),
),
migrations.AlterField(
model_name='userinfo',
name='pubkey',
field=models.BinaryField(editable=True),
),
]
|
|
4f2f5c5e8bfda8a2ca383843e604c2faf6fcb817
|
tests/chainer_tests/dataset_tests/test_download.py
|
tests/chainer_tests/dataset_tests/test_download.py
|
import os
import unittest
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
testing.run_module(__name__, __file__)
|
Add some tests on download.py
|
Add some tests on download.py
|
Python
|
mit
|
wkentaro/chainer,niboshi/chainer,jnishi/chainer,okuta/chainer,cupy/cupy,niboshi/chainer,ysekky/chainer,cupy/cupy,pfnet/chainer,kashif/chainer,cupy/cupy,delta2323/chainer,chainer/chainer,anaruse/chainer,chainer/chainer,cupy/cupy,keisuke-umezawa/chainer,wkentaro/chainer,kikusu/chainer,ktnyt/chainer,chainer/chainer,keisuke-umezawa/chainer,wkentaro/chainer,jnishi/chainer,keisuke-umezawa/chainer,jnishi/chainer,kiyukuta/chainer,aonotas/chainer,tkerola/chainer,keisuke-umezawa/chainer,okuta/chainer,okuta/chainer,wkentaro/chainer,hvy/chainer,niboshi/chainer,hvy/chainer,kikusu/chainer,ronekko/chainer,ktnyt/chainer,ktnyt/chainer,chainer/chainer,ktnyt/chainer,rezoo/chainer,hvy/chainer,niboshi/chainer,okuta/chainer,hvy/chainer,jnishi/chainer
|
Add some tests on download.py
|
import os
import unittest
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add some tests on download.py<commit_after>
|
import os
import unittest
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
testing.run_module(__name__, __file__)
|
Add some tests on download.pyimport os
import unittest
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
testing.run_module(__name__, __file__)
|
<commit_before><commit_msg>Add some tests on download.py<commit_after>import os
import unittest
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
testing.run_module(__name__, __file__)
|
|
717dc2de1c605190127067263bd8fb1581d7d57d
|
lib/python2.6/aquilon/server/locks.py
|
lib/python2.6/aquilon/server/locks.py
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.locks import LockQueue, LockKey
from aquilon.exceptions_ import InternalError
from aquilon.server.logger import CLIENT_INFO
LOGGER = logging.getLogger('aquilon.server.locks')
# Single instance of the LockQueue that should be used by any code
# in the broker.
lock_queue = LockQueue()
# The concept of a "compile" lock somewhat oversimplifies.
# Broadly speaking there are four phases:
# 1 - Read plenary templates and profile templates for comparison.
# 2 - Write changed plenary templates and profile templates.
# 3 - Read relevant plenary, profile, cached profile, and domain
# templates for compile.
# 4 - Copy (write) compiled profiles and cache profile templates.
#
# If in phase one it is determined that a template will not be
# changed and a certain lock is not needed, then that template
# must not be rewritten in phase two.
#
# The compile lock is described in terms of output profiles,
# which really only matter in phase four. This seems like a
# relatively sane approach as generally the profile is the thing
# we actually care about being in a good state.
class CompileKey(LockKey):
def __init__(self, domain=None, profile=None,
logger=LOGGER, loglevel=CLIENT_INFO):
"""Define the desired compile lock with a domain and a host.
A profile could be a host or a cluster.
"""
self.domain = domain
self.profile = profile
components = ["compile"]
if self.domain:
components.append(self.domain)
if self.profile:
components.append(self.profile)
elif self.profile:
raise InternalError("Compile lock request for %s missing domain." %
self.profile)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class DeleteKey(LockKey):
"""Use when a broad deletion lock is required."""
def __init__(self, group=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.group = group
components = ["delete"]
if self.group:
components.append(self.group)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class SyncKey(LockKey):
"""Locks used by the refresh commands."""
def __init__(self, data=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.data = data
components = ["sync"]
if self.data:
components.append(self.data)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
|
Introduce the LockQueue into the broker.
|
Introduce the LockQueue into the broker.
The aquilon.server.locks module instantiates a lock_queue for use
by the broker.
It also defines a number of keys that can be used by the broker.
Each of these keys carve out a swath of the namespace available
and define the layers of the key.
|
Python
|
apache-2.0
|
quattor/aquilon,quattor/aquilon,guillaume-philippon/aquilon,guillaume-philippon/aquilon,stdweird/aquilon,guillaume-philippon/aquilon,stdweird/aquilon,stdweird/aquilon,quattor/aquilon
|
Introduce the LockQueue into the broker.
The aquilon.server.locks module instantiates a lock_queue for use
by the broker.
It also defines a number of keys that can be used by the broker.
Each of these keys carve out a swath of the namespace available
and define the layers of the key.
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.locks import LockQueue, LockKey
from aquilon.exceptions_ import InternalError
from aquilon.server.logger import CLIENT_INFO
LOGGER = logging.getLogger('aquilon.server.locks')
# Single instance of the LockQueue that should be used by any code
# in the broker.
lock_queue = LockQueue()
# The concept of a "compile" lock somewhat oversimplifies.
# Broadly speaking there are four phases:
# 1 - Read plenary templates and profile templates for comparison.
# 2 - Write changed plenary templates and profile templates.
# 3 - Read relevant plenary, profile, cached profile, and domain
# templates for compile.
# 4 - Copy (write) compiled profiles and cache profile templates.
#
# If in phase one it is determined that a template will not be
# changed and a certain lock is not needed, then that template
# must not be rewritten in phase two.
#
# The compile lock is described in terms of output profiles,
# which really only matter in phase four. This seems like a
# relatively sane approach as generally the profile is the thing
# we actually care about being in a good state.
class CompileKey(LockKey):
def __init__(self, domain=None, profile=None,
logger=LOGGER, loglevel=CLIENT_INFO):
"""Define the desired compile lock with a domain and a host.
A profile could be a host or a cluster.
"""
self.domain = domain
self.profile = profile
components = ["compile"]
if self.domain:
components.append(self.domain)
if self.profile:
components.append(self.profile)
elif self.profile:
raise InternalError("Compile lock request for %s missing domain." %
self.profile)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class DeleteKey(LockKey):
"""Use when a broad deletion lock is required."""
def __init__(self, group=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.group = group
components = ["delete"]
if self.group:
components.append(self.group)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class SyncKey(LockKey):
"""Locks used by the refresh commands."""
def __init__(self, data=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.data = data
components = ["sync"]
if self.data:
components.append(self.data)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
|
<commit_before><commit_msg>Introduce the LockQueue into the broker.
The aquilon.server.locks module instantiates a lock_queue for use
by the broker.
It also defines a number of keys that can be used by the broker.
Each of these keys carve out a swath of the namespace available
and define the layers of the key.<commit_after>
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.locks import LockQueue, LockKey
from aquilon.exceptions_ import InternalError
from aquilon.server.logger import CLIENT_INFO
LOGGER = logging.getLogger('aquilon.server.locks')
# Single instance of the LockQueue that should be used by any code
# in the broker.
lock_queue = LockQueue()
# The concept of a "compile" lock somewhat oversimplifies.
# Broadly speaking there are four phases:
# 1 - Read plenary templates and profile templates for comparison.
# 2 - Write changed plenary templates and profile templates.
# 3 - Read relevant plenary, profile, cached profile, and domain
# templates for compile.
# 4 - Copy (write) compiled profiles and cache profile templates.
#
# If in phase one it is determined that a template will not be
# changed and a certain lock is not needed, then that template
# must not be rewritten in phase two.
#
# The compile lock is described in terms of output profiles,
# which really only matter in phase four. This seems like a
# relatively sane approach as generally the profile is the thing
# we actually care about being in a good state.
class CompileKey(LockKey):
def __init__(self, domain=None, profile=None,
logger=LOGGER, loglevel=CLIENT_INFO):
"""Define the desired compile lock with a domain and a host.
A profile could be a host or a cluster.
"""
self.domain = domain
self.profile = profile
components = ["compile"]
if self.domain:
components.append(self.domain)
if self.profile:
components.append(self.profile)
elif self.profile:
raise InternalError("Compile lock request for %s missing domain." %
self.profile)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class DeleteKey(LockKey):
"""Use when a broad deletion lock is required."""
def __init__(self, group=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.group = group
components = ["delete"]
if self.group:
components.append(self.group)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class SyncKey(LockKey):
"""Locks used by the refresh commands."""
def __init__(self, data=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.data = data
components = ["sync"]
if self.data:
components.append(self.data)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
|
Introduce the LockQueue into the broker.
The aquilon.server.locks module instantiates a lock_queue for use
by the broker.
It also defines a number of keys that can be used by the broker.
Each of these keys carve out a swath of the namespace available
and define the layers of the key.# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.locks import LockQueue, LockKey
from aquilon.exceptions_ import InternalError
from aquilon.server.logger import CLIENT_INFO
LOGGER = logging.getLogger('aquilon.server.locks')
# Single instance of the LockQueue that should be used by any code
# in the broker.
lock_queue = LockQueue()
# The concept of a "compile" lock somewhat oversimplifies.
# Broadly speaking there are four phases:
# 1 - Read plenary templates and profile templates for comparison.
# 2 - Write changed plenary templates and profile templates.
# 3 - Read relevant plenary, profile, cached profile, and domain
# templates for compile.
# 4 - Copy (write) compiled profiles and cache profile templates.
#
# If in phase one it is determined that a template will not be
# changed and a certain lock is not needed, then that template
# must not be rewritten in phase two.
#
# The compile lock is described in terms of output profiles,
# which really only matter in phase four. This seems like a
# relatively sane approach as generally the profile is the thing
# we actually care about being in a good state.
class CompileKey(LockKey):
def __init__(self, domain=None, profile=None,
logger=LOGGER, loglevel=CLIENT_INFO):
"""Define the desired compile lock with a domain and a host.
A profile could be a host or a cluster.
"""
self.domain = domain
self.profile = profile
components = ["compile"]
if self.domain:
components.append(self.domain)
if self.profile:
components.append(self.profile)
elif self.profile:
raise InternalError("Compile lock request for %s missing domain." %
self.profile)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class DeleteKey(LockKey):
"""Use when a broad deletion lock is required."""
def __init__(self, group=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.group = group
components = ["delete"]
if self.group:
components.append(self.group)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class SyncKey(LockKey):
"""Locks used by the refresh commands."""
def __init__(self, data=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.data = data
components = ["sync"]
if self.data:
components.append(self.data)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
|
<commit_before><commit_msg>Introduce the LockQueue into the broker.
The aquilon.server.locks module instantiates a lock_queue for use
by the broker.
It also defines a number of keys that can be used by the broker.
Each of these keys carve out a swath of the namespace available
and define the layers of the key.<commit_after># ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.locks import LockQueue, LockKey
from aquilon.exceptions_ import InternalError
from aquilon.server.logger import CLIENT_INFO
LOGGER = logging.getLogger('aquilon.server.locks')
# Single instance of the LockQueue that should be used by any code
# in the broker.
lock_queue = LockQueue()
# The concept of a "compile" lock somewhat oversimplifies.
# Broadly speaking there are four phases:
# 1 - Read plenary templates and profile templates for comparison.
# 2 - Write changed plenary templates and profile templates.
# 3 - Read relevant plenary, profile, cached profile, and domain
# templates for compile.
# 4 - Copy (write) compiled profiles and cache profile templates.
#
# If in phase one it is determined that a template will not be
# changed and a certain lock is not needed, then that template
# must not be rewritten in phase two.
#
# The compile lock is described in terms of output profiles,
# which really only matter in phase four. This seems like a
# relatively sane approach as generally the profile is the thing
# we actually care about being in a good state.
class CompileKey(LockKey):
def __init__(self, domain=None, profile=None,
logger=LOGGER, loglevel=CLIENT_INFO):
"""Define the desired compile lock with a domain and a host.
A profile could be a host or a cluster.
"""
self.domain = domain
self.profile = profile
components = ["compile"]
if self.domain:
components.append(self.domain)
if self.profile:
components.append(self.profile)
elif self.profile:
raise InternalError("Compile lock request for %s missing domain." %
self.profile)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class DeleteKey(LockKey):
"""Use when a broad deletion lock is required."""
def __init__(self, group=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.group = group
components = ["delete"]
if self.group:
components.append(self.group)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
class SyncKey(LockKey):
"""Locks used by the refresh commands."""
def __init__(self, data=None, logger=LOGGER, loglevel=CLIENT_INFO):
self.data = data
components = ["sync"]
if self.data:
components.append(self.data)
LockKey.__init__(self, components, logger=logger, loglevel=loglevel)
|
|
5ca2e6cbb7056d0bc25ee52815e0742b4f13a703
|
tests/functional/test_six_imports.py
|
tests/functional/test_six_imports.py
|
import os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
Add linting for bare six imports
|
Add linting for bare six imports
|
Python
|
apache-2.0
|
pplu/botocore,boto/botocore
|
Add linting for bare six imports
|
import os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
<commit_before><commit_msg>Add linting for bare six imports<commit_after>
|
import os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
Add linting for bare six importsimport os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
<commit_before><commit_msg>Add linting for bare six imports<commit_after>import os
import botocore
import ast
ROOTDIR = os.path.dirname(botocore.__file__)
def test_no_bare_six_imports():
for rootdir, dirnames, filenames in os.walk(ROOTDIR):
if 'vendored' in dirnames:
# We don't need to lint our vendored packages.
dirnames.remove('vendored')
for filename in filenames:
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
yield _assert_no_bare_six_imports, fullname
def _assert_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
checker = SixImportChecker(filename).visit(parsed)
class SixImportChecker(ast.NodeVisitor):
def __init__(self, filename):
self.filename = filename
def visit_Import(self, node):
for alias in node.names:
if getattr(alias, 'name', '') == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'import six' was found in %s:\n"
"\n%s: %s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def visit_ImportFrom(self, node):
if node.module == 'six':
line = self._get_line_content(self.filename, node.lineno)
raise AssertionError(
"A bare 'from six import ...' was found in %s:\n"
"\n%s:%s\n"
"Please use 'from botocore.compat import six' instead" %
(self.filename, node.lineno, line))
def _get_line_content(self, filename, lineno):
with open(filename) as f:
contents = f.readlines()
return contents[lineno - 1]
|
|
c172d9a13e3abde902f743b2179bc396248d9fa9
|
magpy/server/_ejdb.py
|
magpy/server/_ejdb.py
|
"""EJDB driver for Magpy.
Currently does not do much except support certain command line scripts.
"""
import pyejdb
class Database(object):
"""Simple database connection for use in serverside scripts etc."""
def __init__(self,
database_name=None,
config_file=None):
self.config_file = config_file
self._database_name = database_name
self._filename = '%s.tct' % database_name
self.database = pyejdb.EJDB(self._filename,
pyejdb.DEFAULT_OPEN_MODE |
pyejdb.JBOTRUNC)
def get_collection(self, collection):
"""Get a collection by name."""
return Collection(collection, self)
def drop_collections(self, collections):
"""Drop a named tuple or list of collections."""
for collection in collections:
self.database.dropCollection(collection)
class Collection(object):
"""A mongodb style collection object."""
def __init__(self, name, database):
self.database = database
self.name = name
def find(self, collection):
"""Find instances from a collection."""
return self.database.database.find(self.name, collection)
def save(self, instance):
"""Save an instance."""
self.database.database.save(self.name, instance)
|
Add a little support for ejdb.
|
Add a little support for ejdb.
|
Python
|
bsd-3-clause
|
zeth/magpy,catsmith/magpy,zeth/magpy,catsmith/magpy
|
Add a little support for ejdb.
|
"""EJDB driver for Magpy.
Currently does not do much except support certain command line scripts.
"""
import pyejdb
class Database(object):
"""Simple database connection for use in serverside scripts etc."""
def __init__(self,
database_name=None,
config_file=None):
self.config_file = config_file
self._database_name = database_name
self._filename = '%s.tct' % database_name
self.database = pyejdb.EJDB(self._filename,
pyejdb.DEFAULT_OPEN_MODE |
pyejdb.JBOTRUNC)
def get_collection(self, collection):
"""Get a collection by name."""
return Collection(collection, self)
def drop_collections(self, collections):
"""Drop a named tuple or list of collections."""
for collection in collections:
self.database.dropCollection(collection)
class Collection(object):
"""A mongodb style collection object."""
def __init__(self, name, database):
self.database = database
self.name = name
def find(self, collection):
"""Find instances from a collection."""
return self.database.database.find(self.name, collection)
def save(self, instance):
"""Save an instance."""
self.database.database.save(self.name, instance)
|
<commit_before><commit_msg>Add a little support for ejdb.<commit_after>
|
"""EJDB driver for Magpy.
Currently does not do much except support certain command line scripts.
"""
import pyejdb
class Database(object):
"""Simple database connection for use in serverside scripts etc."""
def __init__(self,
database_name=None,
config_file=None):
self.config_file = config_file
self._database_name = database_name
self._filename = '%s.tct' % database_name
self.database = pyejdb.EJDB(self._filename,
pyejdb.DEFAULT_OPEN_MODE |
pyejdb.JBOTRUNC)
def get_collection(self, collection):
"""Get a collection by name."""
return Collection(collection, self)
def drop_collections(self, collections):
"""Drop a named tuple or list of collections."""
for collection in collections:
self.database.dropCollection(collection)
class Collection(object):
"""A mongodb style collection object."""
def __init__(self, name, database):
self.database = database
self.name = name
def find(self, collection):
"""Find instances from a collection."""
return self.database.database.find(self.name, collection)
def save(self, instance):
"""Save an instance."""
self.database.database.save(self.name, instance)
|
Add a little support for ejdb."""EJDB driver for Magpy.
Currently does not do much except support certain command line scripts.
"""
import pyejdb
class Database(object):
"""Simple database connection for use in serverside scripts etc."""
def __init__(self,
database_name=None,
config_file=None):
self.config_file = config_file
self._database_name = database_name
self._filename = '%s.tct' % database_name
self.database = pyejdb.EJDB(self._filename,
pyejdb.DEFAULT_OPEN_MODE |
pyejdb.JBOTRUNC)
def get_collection(self, collection):
"""Get a collection by name."""
return Collection(collection, self)
def drop_collections(self, collections):
"""Drop a named tuple or list of collections."""
for collection in collections:
self.database.dropCollection(collection)
class Collection(object):
"""A mongodb style collection object."""
def __init__(self, name, database):
self.database = database
self.name = name
def find(self, collection):
"""Find instances from a collection."""
return self.database.database.find(self.name, collection)
def save(self, instance):
"""Save an instance."""
self.database.database.save(self.name, instance)
|
<commit_before><commit_msg>Add a little support for ejdb.<commit_after>"""EJDB driver for Magpy.
Currently does not do much except support certain command line scripts.
"""
import pyejdb
class Database(object):
"""Simple database connection for use in serverside scripts etc."""
def __init__(self,
database_name=None,
config_file=None):
self.config_file = config_file
self._database_name = database_name
self._filename = '%s.tct' % database_name
self.database = pyejdb.EJDB(self._filename,
pyejdb.DEFAULT_OPEN_MODE |
pyejdb.JBOTRUNC)
def get_collection(self, collection):
"""Get a collection by name."""
return Collection(collection, self)
def drop_collections(self, collections):
"""Drop a named tuple or list of collections."""
for collection in collections:
self.database.dropCollection(collection)
class Collection(object):
"""A mongodb style collection object."""
def __init__(self, name, database):
self.database = database
self.name = name
def find(self, collection):
"""Find instances from a collection."""
return self.database.database.find(self.name, collection)
def save(self, instance):
"""Save an instance."""
self.database.database.save(self.name, instance)
|
|
70c69291358a606a21d77d7ae9a9270e98438ddc
|
mapnik/conversions.py
|
mapnik/conversions.py
|
"""Unit conversion helpers."""
def m2pt(x, pt_size=0.0254/72.0):
"""Converts distance from meters to points. Default value is PDF point size."""
return x / pt_size
def pt2m(x, pt_size=0.0254/72.0):
"""Converts distance from points to meters. Default value is PDF point size."""
return x * pt_size
def m2in(x):
"""Converts distance from meters to inches."""
return x / 0.0254
def m2px(x, resolution):
"""Converts distance from meters to pixels at the given resolution in DPI/PPI."""
return m2in(x) * resolution
|
Move the conversion helpers into a separate module
|
Move the conversion helpers into a separate module
No change in behaviour except more flexibility for m2pt pt_size
|
Python
|
lgpl-2.1
|
tomhughes/python-mapnik,mapnik/python-mapnik,tomhughes/python-mapnik,tomhughes/python-mapnik,mapnik/python-mapnik,mapnik/python-mapnik
|
Move the conversion helpers into a separate module
No change in behaviour except more flexibility for m2pt pt_size
|
"""Unit conversion helpers."""
def m2pt(x, pt_size=0.0254/72.0):
"""Converts distance from meters to points. Default value is PDF point size."""
return x / pt_size
def pt2m(x, pt_size=0.0254/72.0):
"""Converts distance from points to meters. Default value is PDF point size."""
return x * pt_size
def m2in(x):
"""Converts distance from meters to inches."""
return x / 0.0254
def m2px(x, resolution):
"""Converts distance from meters to pixels at the given resolution in DPI/PPI."""
return m2in(x) * resolution
|
<commit_before><commit_msg>Move the conversion helpers into a separate module
No change in behaviour except more flexibility for m2pt pt_size<commit_after>
|
"""Unit conversion helpers."""
def m2pt(x, pt_size=0.0254/72.0):
"""Converts distance from meters to points. Default value is PDF point size."""
return x / pt_size
def pt2m(x, pt_size=0.0254/72.0):
"""Converts distance from points to meters. Default value is PDF point size."""
return x * pt_size
def m2in(x):
"""Converts distance from meters to inches."""
return x / 0.0254
def m2px(x, resolution):
"""Converts distance from meters to pixels at the given resolution in DPI/PPI."""
return m2in(x) * resolution
|
Move the conversion helpers into a separate module
No change in behaviour except more flexibility for m2pt pt_size"""Unit conversion helpers."""
def m2pt(x, pt_size=0.0254/72.0):
"""Converts distance from meters to points. Default value is PDF point size."""
return x / pt_size
def pt2m(x, pt_size=0.0254/72.0):
"""Converts distance from points to meters. Default value is PDF point size."""
return x * pt_size
def m2in(x):
"""Converts distance from meters to inches."""
return x / 0.0254
def m2px(x, resolution):
"""Converts distance from meters to pixels at the given resolution in DPI/PPI."""
return m2in(x) * resolution
|
<commit_before><commit_msg>Move the conversion helpers into a separate module
No change in behaviour except more flexibility for m2pt pt_size<commit_after>"""Unit conversion helpers."""
def m2pt(x, pt_size=0.0254/72.0):
"""Converts distance from meters to points. Default value is PDF point size."""
return x / pt_size
def pt2m(x, pt_size=0.0254/72.0):
"""Converts distance from points to meters. Default value is PDF point size."""
return x * pt_size
def m2in(x):
"""Converts distance from meters to inches."""
return x / 0.0254
def m2px(x, resolution):
"""Converts distance from meters to pixels at the given resolution in DPI/PPI."""
return m2in(x) * resolution
|
|
1757b2fef3d617338bfc07802eaa8ac6e75c14e9
|
raiden/tests/integration/cli/conftest.py
|
raiden/tests/integration/cli/conftest.py
|
import os
import pytest
from raiden.tests.utils.smoketest import (
load_smoketest_config,
start_ethereum
)
@pytest.fixture(scope='session')
def blockchain_provider():
print("fixture init")
smoketest_config = load_smoketest_config()
ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])
discovery_contract_address = smoketest_config['contracts']['discovery_address']
registry_contract_address = smoketest_config['contracts']['registry_address']
eth_rpc_endpoint = 'http://127.0.0.1:{}'.format(ethereum_config['rpc'])
keystore_path = ethereum_config['keystore']
datadir_path = os.path.split(keystore_path)[0]
network_id = '627'
password_file_path = os.path.join(keystore_path, 'password')
with open(password_file_path, 'w') as handler:
handler.write('password')
return {'ethereum': ethereum,
'ethereum_config': ethereum_config,
'discovery_contract_address': discovery_contract_address,
'registry_contract_address': registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'keystore_path': keystore_path,
'datadir_path': datadir_path,
'password_file_path': password_file_path,
'network_id': network_id}
|
Add CLI tests session scoped fixture.
|
tests/integration/cli: Add CLI tests session scoped fixture.
The test fixture starts the blockchain with preconfigured
smoketest network, retrieves contract addresses and returns
an object to be accessed by test cases.
Refs: #1408
|
Python
|
mit
|
hackaugusto/raiden,hackaugusto/raiden
|
tests/integration/cli: Add CLI tests session scoped fixture.
The test fixture starts the blockchain with preconfigured
smoketest network, retrieves contract addresses and returns
an object to be accessed by test cases.
Refs: #1408
|
import os
import pytest
from raiden.tests.utils.smoketest import (
load_smoketest_config,
start_ethereum
)
@pytest.fixture(scope='session')
def blockchain_provider():
print("fixture init")
smoketest_config = load_smoketest_config()
ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])
discovery_contract_address = smoketest_config['contracts']['discovery_address']
registry_contract_address = smoketest_config['contracts']['registry_address']
eth_rpc_endpoint = 'http://127.0.0.1:{}'.format(ethereum_config['rpc'])
keystore_path = ethereum_config['keystore']
datadir_path = os.path.split(keystore_path)[0]
network_id = '627'
password_file_path = os.path.join(keystore_path, 'password')
with open(password_file_path, 'w') as handler:
handler.write('password')
return {'ethereum': ethereum,
'ethereum_config': ethereum_config,
'discovery_contract_address': discovery_contract_address,
'registry_contract_address': registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'keystore_path': keystore_path,
'datadir_path': datadir_path,
'password_file_path': password_file_path,
'network_id': network_id}
|
<commit_before><commit_msg>tests/integration/cli: Add CLI tests session scoped fixture.
The test fixture starts the blockchain with preconfigured
smoketest network, retrieves contract addresses and returns
an object to be accessed by test cases.
Refs: #1408<commit_after>
|
import os
import pytest
from raiden.tests.utils.smoketest import (
load_smoketest_config,
start_ethereum
)
@pytest.fixture(scope='session')
def blockchain_provider():
print("fixture init")
smoketest_config = load_smoketest_config()
ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])
discovery_contract_address = smoketest_config['contracts']['discovery_address']
registry_contract_address = smoketest_config['contracts']['registry_address']
eth_rpc_endpoint = 'http://127.0.0.1:{}'.format(ethereum_config['rpc'])
keystore_path = ethereum_config['keystore']
datadir_path = os.path.split(keystore_path)[0]
network_id = '627'
password_file_path = os.path.join(keystore_path, 'password')
with open(password_file_path, 'w') as handler:
handler.write('password')
return {'ethereum': ethereum,
'ethereum_config': ethereum_config,
'discovery_contract_address': discovery_contract_address,
'registry_contract_address': registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'keystore_path': keystore_path,
'datadir_path': datadir_path,
'password_file_path': password_file_path,
'network_id': network_id}
|
tests/integration/cli: Add CLI tests session scoped fixture.
The test fixture starts the blockchain with preconfigured
smoketest network, retrieves contract addresses and returns
an object to be accessed by test cases.
Refs: #1408import os
import pytest
from raiden.tests.utils.smoketest import (
load_smoketest_config,
start_ethereum
)
@pytest.fixture(scope='session')
def blockchain_provider():
print("fixture init")
smoketest_config = load_smoketest_config()
ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])
discovery_contract_address = smoketest_config['contracts']['discovery_address']
registry_contract_address = smoketest_config['contracts']['registry_address']
eth_rpc_endpoint = 'http://127.0.0.1:{}'.format(ethereum_config['rpc'])
keystore_path = ethereum_config['keystore']
datadir_path = os.path.split(keystore_path)[0]
network_id = '627'
password_file_path = os.path.join(keystore_path, 'password')
with open(password_file_path, 'w') as handler:
handler.write('password')
return {'ethereum': ethereum,
'ethereum_config': ethereum_config,
'discovery_contract_address': discovery_contract_address,
'registry_contract_address': registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'keystore_path': keystore_path,
'datadir_path': datadir_path,
'password_file_path': password_file_path,
'network_id': network_id}
|
<commit_before><commit_msg>tests/integration/cli: Add CLI tests session scoped fixture.
The test fixture starts the blockchain with preconfigured
smoketest network, retrieves contract addresses and returns
an object to be accessed by test cases.
Refs: #1408<commit_after>import os
import pytest
from raiden.tests.utils.smoketest import (
load_smoketest_config,
start_ethereum
)
@pytest.fixture(scope='session')
def blockchain_provider():
print("fixture init")
smoketest_config = load_smoketest_config()
ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])
discovery_contract_address = smoketest_config['contracts']['discovery_address']
registry_contract_address = smoketest_config['contracts']['registry_address']
eth_rpc_endpoint = 'http://127.0.0.1:{}'.format(ethereum_config['rpc'])
keystore_path = ethereum_config['keystore']
datadir_path = os.path.split(keystore_path)[0]
network_id = '627'
password_file_path = os.path.join(keystore_path, 'password')
with open(password_file_path, 'w') as handler:
handler.write('password')
return {'ethereum': ethereum,
'ethereum_config': ethereum_config,
'discovery_contract_address': discovery_contract_address,
'registry_contract_address': registry_contract_address,
'eth_rpc_endpoint': eth_rpc_endpoint,
'keystore_path': keystore_path,
'datadir_path': datadir_path,
'password_file_path': password_file_path,
'network_id': network_id}
|
|
0eb9964f42f74596b38d9031b329f8ce2e0c1e9f
|
directory_lister.py
|
directory_lister.py
|
#Test to see recursive directory lister
import os
def lister(dir_name):
os.chdir(dir_name)
dirs = os.listdir(os.getcwd())
for file in dirs:
if not os.path.isdir(file):
print(file + "Location: "+os.getcwd())
else:
lister(file)
all_files = []
dir_name = input("Directory: ")
lister(dir_name)
print(all_files)
|
Test for a recursive directory lister.
|
Test for a recursive directory lister.
|
Python
|
mit
|
cvhariharan/Offline,cvhariharan/Offline,cvhariharan/Offline
|
Test for a recursive directory lister.
|
#Test to see recursive directory lister
import os
def lister(dir_name):
os.chdir(dir_name)
dirs = os.listdir(os.getcwd())
for file in dirs:
if not os.path.isdir(file):
print(file + "Location: "+os.getcwd())
else:
lister(file)
all_files = []
dir_name = input("Directory: ")
lister(dir_name)
print(all_files)
|
<commit_before><commit_msg>Test for a recursive directory lister.<commit_after>
|
#Test to see recursive directory lister
import os
def lister(dir_name):
os.chdir(dir_name)
dirs = os.listdir(os.getcwd())
for file in dirs:
if not os.path.isdir(file):
print(file + "Location: "+os.getcwd())
else:
lister(file)
all_files = []
dir_name = input("Directory: ")
lister(dir_name)
print(all_files)
|
Test for a recursive directory lister.#Test to see recursive directory lister
import os
def lister(dir_name):
os.chdir(dir_name)
dirs = os.listdir(os.getcwd())
for file in dirs:
if not os.path.isdir(file):
print(file + "Location: "+os.getcwd())
else:
lister(file)
all_files = []
dir_name = input("Directory: ")
lister(dir_name)
print(all_files)
|
<commit_before><commit_msg>Test for a recursive directory lister.<commit_after>#Test to see recursive directory lister
import os
def lister(dir_name):
os.chdir(dir_name)
dirs = os.listdir(os.getcwd())
for file in dirs:
if not os.path.isdir(file):
print(file + "Location: "+os.getcwd())
else:
lister(file)
all_files = []
dir_name = input("Directory: ")
lister(dir_name)
print(all_files)
|
|
7598bb04c2826f5f6bbcf8ff26f9a799ded7cbe3
|
tests/nose_plugins.py
|
tests/nose_plugins.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from nose.plugins import Plugin
from django.core import management
def flush_database():
"""Flushes the default test database.
"""
management.call_command('flush', verbosity=0, interactive=False)
class DatabaseFlushPlugin(Plugin):
"""Nose plugin to flush the database after every test.
The instances of models generated in one test may cause other tests to fail.
So it is necessary to clear the test database after every test.
"""
name = 'DatabaseFlushPlugin'
enabled = True
def options(self, parser, env):
return Plugin.options(self, parser, env)
def configure(self, parser, env):
Plugin.configure(self, parser, env)
self.enabled = True
def afterTest(self, test):
flush_database()
|
Add a nose plugin to clear the database after every test.
|
Add a nose plugin to clear the database after every test.
|
Python
|
apache-2.0
|
cidadania/e-cidadania,cidadania/e-cidadania
|
Add a nose plugin to clear the database after every test.
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from nose.plugins import Plugin
from django.core import management
def flush_database():
"""Flushes the default test database.
"""
management.call_command('flush', verbosity=0, interactive=False)
class DatabaseFlushPlugin(Plugin):
"""Nose plugin to flush the database after every test.
The instances of models generated in one test may cause other tests to fail.
So it is necessary to clear the test database after every test.
"""
name = 'DatabaseFlushPlugin'
enabled = True
def options(self, parser, env):
return Plugin.options(self, parser, env)
def configure(self, parser, env):
Plugin.configure(self, parser, env)
self.enabled = True
def afterTest(self, test):
flush_database()
|
<commit_before><commit_msg>Add a nose plugin to clear the database after every test.<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from nose.plugins import Plugin
from django.core import management
def flush_database():
"""Flushes the default test database.
"""
management.call_command('flush', verbosity=0, interactive=False)
class DatabaseFlushPlugin(Plugin):
"""Nose plugin to flush the database after every test.
The instances of models generated in one test may cause other tests to fail.
So it is necessary to clear the test database after every test.
"""
name = 'DatabaseFlushPlugin'
enabled = True
def options(self, parser, env):
return Plugin.options(self, parser, env)
def configure(self, parser, env):
Plugin.configure(self, parser, env)
self.enabled = True
def afterTest(self, test):
flush_database()
|
Add a nose plugin to clear the database after every test.# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from nose.plugins import Plugin
from django.core import management
def flush_database():
"""Flushes the default test database.
"""
management.call_command('flush', verbosity=0, interactive=False)
class DatabaseFlushPlugin(Plugin):
"""Nose plugin to flush the database after every test.
The instances of models generated in one test may cause other tests to fail.
So it is necessary to clear the test database after every test.
"""
name = 'DatabaseFlushPlugin'
enabled = True
def options(self, parser, env):
return Plugin.options(self, parser, env)
def configure(self, parser, env):
Plugin.configure(self, parser, env)
self.enabled = True
def afterTest(self, test):
flush_database()
|
<commit_before><commit_msg>Add a nose plugin to clear the database after every test.<commit_after># -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2012 Cidadania S. Coop. Galega
#
# This file is part of e-cidadania.
#
# e-cidadania is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# e-cidadania is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with e-cidadania. If not, see <http://www.gnu.org/licenses/>.
import os
from nose.plugins import Plugin
from django.core import management
def flush_database():
"""Flushes the default test database.
"""
management.call_command('flush', verbosity=0, interactive=False)
class DatabaseFlushPlugin(Plugin):
"""Nose plugin to flush the database after every test.
The instances of models generated in one test may cause other tests to fail.
So it is necessary to clear the test database after every test.
"""
name = 'DatabaseFlushPlugin'
enabled = True
def options(self, parser, env):
return Plugin.options(self, parser, env)
def configure(self, parser, env):
Plugin.configure(self, parser, env)
self.enabled = True
def afterTest(self, test):
flush_database()
|
|
b45b6256b442942564ba447913ad4a1bec77db19
|
Problem063/Python/solution_1.py
|
Problem063/Python/solution_1.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2017
#
# @team: DestructHub
# @project: ProjectEuler
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
print(len([a ** n
for a in range(1, 10)
for n in range(1, 22)
if len(str(a ** n)) == n]))
|
Add solution for Problem063 written in Python
|
Add solution for Problem063 written in Python
|
Python
|
mit
|
DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler,DestructHub/ProjectEuler
|
Add solution for Problem063 written in Python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2017
#
# @team: DestructHub
# @project: ProjectEuler
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
print(len([a ** n
for a in range(1, 10)
for n in range(1, 22)
if len(str(a ** n)) == n]))
|
<commit_before><commit_msg>Add solution for Problem063 written in Python<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2017
#
# @team: DestructHub
# @project: ProjectEuler
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
print(len([a ** n
for a in range(1, 10)
for n in range(1, 22)
if len(str(a ** n)) == n]))
|
Add solution for Problem063 written in Python#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2017
#
# @team: DestructHub
# @project: ProjectEuler
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
print(len([a ** n
for a in range(1, 10)
for n in range(1, 22)
if len(str(a ** n)) == n]))
|
<commit_before><commit_msg>Add solution for Problem063 written in Python<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2017
#
# @team: DestructHub
# @project: ProjectEuler
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
print(len([a ** n
for a in range(1, 10)
for n in range(1, 22)
if len(str(a ** n)) == n]))
|
|
5c91a2c8dda69d37fd3cd0989ff6c3883851eaef
|
saleor/product/templatetags/product_images.py
|
saleor/product/templatetags/product_images.py
|
import logging
import warnings
from django.template.context_processors import static
from django import template
from django.conf import settings
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def product_image(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('dist/images/product-image-placeholder.png')
|
Introduce templatetag for fetching image thumbnails
|
Introduce templatetag for fetching image thumbnails
|
Python
|
bsd-3-clause
|
tfroehlich82/saleor,itbabu/saleor,UITools/saleor,maferelo/saleor,tfroehlich82/saleor,tfroehlich82/saleor,car3oon/saleor,KenMutemi/saleor,KenMutemi/saleor,jreigel/saleor,maferelo/saleor,car3oon/saleor,KenMutemi/saleor,mociepka/saleor,car3oon/saleor,HyperManTT/ECommerceSaleor,HyperManTT/ECommerceSaleor,maferelo/saleor,UITools/saleor,HyperManTT/ECommerceSaleor,jreigel/saleor,UITools/saleor,itbabu/saleor,mociepka/saleor,jreigel/saleor,UITools/saleor,UITools/saleor,mociepka/saleor,itbabu/saleor
|
Introduce templatetag for fetching image thumbnails
|
import logging
import warnings
from django.template.context_processors import static
from django import template
from django.conf import settings
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def product_image(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('dist/images/product-image-placeholder.png')
|
<commit_before><commit_msg>Introduce templatetag for fetching image thumbnails<commit_after>
|
import logging
import warnings
from django.template.context_processors import static
from django import template
from django.conf import settings
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def product_image(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('dist/images/product-image-placeholder.png')
|
Introduce templatetag for fetching image thumbnailsimport logging
import warnings
from django.template.context_processors import static
from django import template
from django.conf import settings
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def product_image(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('dist/images/product-image-placeholder.png')
|
<commit_before><commit_msg>Introduce templatetag for fetching image thumbnails<commit_after>import logging
import warnings
from django.template.context_processors import static
from django import template
from django.conf import settings
logger = logging.getLogger(__name__)
register = template.Library()
# cache available sizes at module level
def get_available_sizes():
all_sizes = set()
keys = settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
for size_group, sizes in keys.items():
for size_name, size in sizes:
all_sizes.add(size)
return all_sizes
AVAILABLE_SIZES = get_available_sizes()
@register.simple_tag()
def product_image(instance, size, method='crop'):
if instance:
size_name = '%s__%s' % (method, size)
if (size_name not in AVAILABLE_SIZES and not
settings.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand']):
msg = ('Thumbnail size %s is not defined in settings '
'and it won\'t be generated automatically' % size_name)
warnings.warn(msg)
try:
if method == 'crop':
thumbnail = instance.crop[size]
else:
thumbnail = instance.thumbnail[size]
except:
logger.exception('Thumbnail fetch failed',
extra={'instance': instance, 'size': size})
else:
return thumbnail.url
return static('dist/images/product-image-placeholder.png')
|
|
761ec2bd6492b041eb658ee836a63ffb877469d5
|
cbv/management/commands/load_all_django_versions.py
|
cbv/management/commands/load_all_django_versions.py
|
import os
import re
from django.conf import settings
from django.core.management import call_command, BaseCommand
class Command(BaseCommand):
"""Load the Django project fixtures and all version fixtures"""
def handle(self, **options):
fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')
self.stdout.write('Loading project.json')
call_command('loaddata', 'cbv/fixtures/project.json')
version_fixtures = [re.match(r'((?:\d+\.){2,3}json)', filename) for filename in os.listdir(fixtures_dir)]
for match in version_fixtures:
try:
fixture = match.group()
except AttributeError:
continue
self.stdout.write('Loading {}'.format(fixture))
call_command('loaddata', 'cbv/fixtures/{}'.format(fixture))
|
Add management command to load all version fixtures
|
Add management command to load all version fixtures
Because life's too short
|
Python
|
bsd-2-clause
|
refreshoxford/django-cbv-inspector,refreshoxford/django-cbv-inspector,refreshoxford/django-cbv-inspector,refreshoxford/django-cbv-inspector
|
Add management command to load all version fixtures
Because life's too short
|
import os
import re
from django.conf import settings
from django.core.management import call_command, BaseCommand
class Command(BaseCommand):
"""Load the Django project fixtures and all version fixtures"""
def handle(self, **options):
fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')
self.stdout.write('Loading project.json')
call_command('loaddata', 'cbv/fixtures/project.json')
version_fixtures = [re.match(r'((?:\d+\.){2,3}json)', filename) for filename in os.listdir(fixtures_dir)]
for match in version_fixtures:
try:
fixture = match.group()
except AttributeError:
continue
self.stdout.write('Loading {}'.format(fixture))
call_command('loaddata', 'cbv/fixtures/{}'.format(fixture))
|
<commit_before><commit_msg>Add management command to load all version fixtures
Because life's too short<commit_after>
|
import os
import re
from django.conf import settings
from django.core.management import call_command, BaseCommand
class Command(BaseCommand):
"""Load the Django project fixtures and all version fixtures"""
def handle(self, **options):
fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')
self.stdout.write('Loading project.json')
call_command('loaddata', 'cbv/fixtures/project.json')
version_fixtures = [re.match(r'((?:\d+\.){2,3}json)', filename) for filename in os.listdir(fixtures_dir)]
for match in version_fixtures:
try:
fixture = match.group()
except AttributeError:
continue
self.stdout.write('Loading {}'.format(fixture))
call_command('loaddata', 'cbv/fixtures/{}'.format(fixture))
|
Add management command to load all version fixtures
Because life's too shortimport os
import re
from django.conf import settings
from django.core.management import call_command, BaseCommand
class Command(BaseCommand):
"""Load the Django project fixtures and all version fixtures"""
def handle(self, **options):
fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')
self.stdout.write('Loading project.json')
call_command('loaddata', 'cbv/fixtures/project.json')
version_fixtures = [re.match(r'((?:\d+\.){2,3}json)', filename) for filename in os.listdir(fixtures_dir)]
for match in version_fixtures:
try:
fixture = match.group()
except AttributeError:
continue
self.stdout.write('Loading {}'.format(fixture))
call_command('loaddata', 'cbv/fixtures/{}'.format(fixture))
|
<commit_before><commit_msg>Add management command to load all version fixtures
Because life's too short<commit_after>import os
import re
from django.conf import settings
from django.core.management import call_command, BaseCommand
class Command(BaseCommand):
"""Load the Django project fixtures and all version fixtures"""
def handle(self, **options):
fixtures_dir = os.path.join(settings.DIRNAME, 'cbv', 'fixtures')
self.stdout.write('Loading project.json')
call_command('loaddata', 'cbv/fixtures/project.json')
version_fixtures = [re.match(r'((?:\d+\.){2,3}json)', filename) for filename in os.listdir(fixtures_dir)]
for match in version_fixtures:
try:
fixture = match.group()
except AttributeError:
continue
self.stdout.write('Loading {}'.format(fixture))
call_command('loaddata', 'cbv/fixtures/{}'.format(fixture))
|
|
eb429be1fdc7335bec5ba036fcece309778b23f0
|
examples/rmg/heptane-filterReactions/input.py
|
examples/rmg/heptane-filterReactions/input.py
|
# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumCarbonAtoms = 7,
)
# List of species
species(
label='n-heptane',
structure=SMILES("CCCCCCC"),
)
species(
label='Ar',
reactive=False,
structure=SMILES("[Ar]"),
)
simpleReactor(
temperature=(1600,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simpleReactor(
temperature=(2000,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=0.01,
filterReactions=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,3000,'K',8),
pressures=(0.001,100,'bar',5),
interpolation=('Chebyshev', 6, 4),
)
|
Add an example that uses filterReactions AND pdep at the same time
|
Add an example that uses filterReactions AND pdep at the same time
|
Python
|
mit
|
pierrelb/RMG-Py,nyee/RMG-Py,nickvandewiele/RMG-Py,nyee/RMG-Py,chatelak/RMG-Py,chatelak/RMG-Py,pierrelb/RMG-Py,nickvandewiele/RMG-Py
|
Add an example that uses filterReactions AND pdep at the same time
|
# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumCarbonAtoms = 7,
)
# List of species
species(
label='n-heptane',
structure=SMILES("CCCCCCC"),
)
species(
label='Ar',
reactive=False,
structure=SMILES("[Ar]"),
)
simpleReactor(
temperature=(1600,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simpleReactor(
temperature=(2000,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=0.01,
filterReactions=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,3000,'K',8),
pressures=(0.001,100,'bar',5),
interpolation=('Chebyshev', 6, 4),
)
|
<commit_before><commit_msg>Add an example that uses filterReactions AND pdep at the same time<commit_after>
|
# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumCarbonAtoms = 7,
)
# List of species
species(
label='n-heptane',
structure=SMILES("CCCCCCC"),
)
species(
label='Ar',
reactive=False,
structure=SMILES("[Ar]"),
)
simpleReactor(
temperature=(1600,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simpleReactor(
temperature=(2000,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=0.01,
filterReactions=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,3000,'K',8),
pressures=(0.001,100,'bar',5),
interpolation=('Chebyshev', 6, 4),
)
|
Add an example that uses filterReactions AND pdep at the same time# Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumCarbonAtoms = 7,
)
# List of species
species(
label='n-heptane',
structure=SMILES("CCCCCCC"),
)
species(
label='Ar',
reactive=False,
structure=SMILES("[Ar]"),
)
simpleReactor(
temperature=(1600,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simpleReactor(
temperature=(2000,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=0.01,
filterReactions=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,3000,'K',8),
pressures=(0.001,100,'bar',5),
interpolation=('Chebyshev', 6, 4),
)
|
<commit_before><commit_msg>Add an example that uses filterReactions AND pdep at the same time<commit_after># Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# Constraints on generated species
generatedSpeciesConstraints(
maximumCarbonAtoms = 7,
)
# List of species
species(
label='n-heptane',
structure=SMILES("CCCCCCC"),
)
species(
label='Ar',
reactive=False,
structure=SMILES("[Ar]"),
)
simpleReactor(
temperature=(1600,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simpleReactor(
temperature=(2000,'K'),
pressure=(400,'Pa'),
initialMoleFractions={
"n-heptane": 0.02,
"Ar": 0.98,
},
terminationConversion={
'n-heptane': 0.99,
},
terminationTime=(1e6,'s'),
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=0.01,
filterReactions=True,
)
pressureDependence(
method='modified strong collision',
maximumGrainSize=(0.5,'kcal/mol'),
minimumNumberOfGrains=250,
temperatures=(300,3000,'K',8),
pressures=(0.001,100,'bar',5),
interpolation=('Chebyshev', 6, 4),
)
|
|
3a240005142da25aa49938a15d39ddf68dd7cead
|
nova/tests/functional/api/openstack/placement/test_verify_policy.py
|
nova/tests/functional/api/openstack/placement/test_verify_policy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)
|
Add functional test to verify presence of policy
|
[placement] Add functional test to verify presence of policy
Add a test that traverses all available placement URLs at the latest
microversion and tries to access them as non-admin. If something other
than a 403 response is given a failed test with a message like
method POST on route /resource_providers/{uuid}/inventories
is open for user, status: 404
is produced.
This works because we do authZ handling early in each handler, before
data processing and path parameter handling.
The method is pretty straightforward: traverse ROUTE_DECLARATIONS, visit
every url with each the declared methods, except the root version document,
and confirm a 403 response when the provided auth token is non-admin.
This has been created to avoid situations where a route is added without
policy like happened on https://review.openstack.org/#/c/576927/ . Until
recently we had a failover where any route not defined to have policy
would default to admin. That went away so now we need some test
automation to catch our forgetful humanness.
Change-Id: Id582886ec4b621b97d7cc7237b4670ad7bb12295
|
Python
|
apache-2.0
|
mahak/nova,rahulunair/nova,klmitch/nova,openstack/nova,mikalstill/nova,rahulunair/nova,gooddata/openstack-nova,mikalstill/nova,gooddata/openstack-nova,rahulunair/nova,klmitch/nova,mikalstill/nova,klmitch/nova,mahak/nova,openstack/nova,klmitch/nova,openstack/nova,mahak/nova,gooddata/openstack-nova,gooddata/openstack-nova
|
[placement] Add functional test to verify presence of policy
Add a test that traverses all available placement URLs at the latest
microversion and tries to access them as non-admin. If something other
than a 403 response is given a failed test with a message like
method POST on route /resource_providers/{uuid}/inventories
is open for user, status: 404
is produced.
This works because we do authZ handling early in each handler, before
data processing and path parameter handling.
The method is pretty straightforward: traverse ROUTE_DECLARATIONS, visit
every url with each the declared methods, except the root version document,
and confirm a 403 response when the provided auth token is non-admin.
This has been created to avoid situations where a route is added without
policy like happened on https://review.openstack.org/#/c/576927/ . Until
recently we had a failover where any route not defined to have policy
would default to admin. That went away so now we need some test
automation to catch our forgetful humanness.
Change-Id: Id582886ec4b621b97d7cc7237b4670ad7bb12295
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)
|
<commit_before><commit_msg>[placement] Add functional test to verify presence of policy
Add a test that traverses all available placement URLs at the latest
microversion and tries to access them as non-admin. If something other
than a 403 response is given a failed test with a message like
method POST on route /resource_providers/{uuid}/inventories
is open for user, status: 404
is produced.
This works because we do authZ handling early in each handler, before
data processing and path parameter handling.
The method is pretty straightforward: traverse ROUTE_DECLARATIONS, visit
every url with each the declared methods, except the root version document,
and confirm a 403 response when the provided auth token is non-admin.
This has been created to avoid situations where a route is added without
policy like happened on https://review.openstack.org/#/c/576927/ . Until
recently we had a failover where any route not defined to have policy
would default to admin. That went away so now we need some test
automation to catch our forgetful humanness.
Change-Id: Id582886ec4b621b97d7cc7237b4670ad7bb12295<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)
|
[placement] Add functional test to verify presence of policy
Add a test that traverses all available placement URLs at the latest
microversion and tries to access them as non-admin. If something other
than a 403 response is given a failed test with a message like
method POST on route /resource_providers/{uuid}/inventories
is open for user, status: 404
is produced.
This works because we do authZ handling early in each handler, before
data processing and path parameter handling.
The method is pretty straightforward: traverse ROUTE_DECLARATIONS, visit
every url with each the declared methods, except the root version document,
and confirm a 403 response when the provided auth token is non-admin.
This has been created to avoid situations where a route is added without
policy like happened on https://review.openstack.org/#/c/576927/ . Until
recently we had a failover where any route not defined to have policy
would default to admin. That went away so now we need some test
automation to catch our forgetful humanness.
Change-Id: Id582886ec4b621b97d7cc7237b4670ad7bb12295# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)
|
<commit_before><commit_msg>[placement] Add functional test to verify presence of policy
Add a test that traverses all available placement URLs at the latest
microversion and tries to access them as non-admin. If something other
than a 403 response is given a failed test with a message like
method POST on route /resource_providers/{uuid}/inventories
is open for user, status: 404
is produced.
This works because we do authZ handling early in each handler, before
data processing and path parameter handling.
The method is pretty straightforward: traverse ROUTE_DECLARATIONS, visit
every url with each the declared methods, except the root version document,
and confirm a 403 response when the provided auth token is non-admin.
This has been created to avoid situations where a route is added without
policy like happened on https://review.openstack.org/#/c/576927/ . Until
recently we had a failover where any route not defined to have policy
would default to admin. That went away so now we need some test
automation to catch our forgetful humanness.
Change-Id: Id582886ec4b621b97d7cc7237b4670ad7bb12295<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack.placement import direct
from nova.api.openstack.placement import handler
from nova.tests.functional.api.openstack.placement import base
CONF = cfg.CONF
class TestVerifyPolicy(base.TestCase):
"""Verify that all defined placement routes have a policy."""
# Paths that don't need a policy check
EXCEPTIONS = ['/', '']
def _test_request_403(self, client, method, route):
headers = {
'x-auth-token': 'user',
'content-type': 'application/json'
}
request_method = getattr(client, method.lower())
# We send an empty request body on all requests. Because
# policy handling comes before other processing, the value
# of the body is irrelevant.
response = request_method(route, data='', headers=headers)
self.assertEqual(
403, response.status_code,
'method %s on route %s is open for user, status: %s' %
(method, route, response.status_code))
def test_verify_policy(self):
with direct.PlacementDirect(CONF, latest_microversion=True) as client:
for route, methods in handler.ROUTE_DECLARATIONS.items():
if route in self.EXCEPTIONS:
continue
for method in methods:
self._test_request_403(client, method, route)
|
|
8f6db5945348879a7340f8a4c7da6111a06cd062
|
python/smqtk/web/search_app/modules/static_host.py
|
python/smqtk/web/search_app/modules/static_host.py
|
import flask
__author__ = 'paul.tunison@kitware.com'
class StaticDirectoryHost (flask.Blueprint):
"""
Module that will host a given directory to the given URL prefix (relative to
the parent module's prefix).
Instances of this class will have nothing set to their static URL path, as a
blank string is used. Please reference the URL prefix value.
"""
def __init__(self, name, static_dir, url_prefix):
# make sure URL prefix starts with a slash
if not url_prefix.startswith('/'):
url_prefix = '/' + url_prefix
super(StaticDirectoryHost, self).__init__(name, __name__,
static_folder=static_dir,
static_url_path="",
url_prefix=url_prefix)
|
Add new module to statically host an arbitrary directory
|
Add new module to statically host an arbitrary directory
|
Python
|
bsd-3-clause
|
Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK,Purg/SMQTK,Purg/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,Purg/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK,kfieldho/SMQTK
|
Add new module to statically host an arbitrary directory
|
import flask
__author__ = 'paul.tunison@kitware.com'
class StaticDirectoryHost (flask.Blueprint):
"""
Module that will host a given directory to the given URL prefix (relative to
the parent module's prefix).
Instances of this class will have nothing set to their static URL path, as a
blank string is used. Please reference the URL prefix value.
"""
def __init__(self, name, static_dir, url_prefix):
# make sure URL prefix starts with a slash
if not url_prefix.startswith('/'):
url_prefix = '/' + url_prefix
super(StaticDirectoryHost, self).__init__(name, __name__,
static_folder=static_dir,
static_url_path="",
url_prefix=url_prefix)
|
<commit_before><commit_msg>Add new module to statically host an arbitrary directory<commit_after>
|
import flask
__author__ = 'paul.tunison@kitware.com'
class StaticDirectoryHost (flask.Blueprint):
"""
Module that will host a given directory to the given URL prefix (relative to
the parent module's prefix).
Instances of this class will have nothing set to their static URL path, as a
blank string is used. Please reference the URL prefix value.
"""
def __init__(self, name, static_dir, url_prefix):
# make sure URL prefix starts with a slash
if not url_prefix.startswith('/'):
url_prefix = '/' + url_prefix
super(StaticDirectoryHost, self).__init__(name, __name__,
static_folder=static_dir,
static_url_path="",
url_prefix=url_prefix)
|
Add new module to statically host an arbitrary directoryimport flask
__author__ = 'paul.tunison@kitware.com'
class StaticDirectoryHost (flask.Blueprint):
"""
Module that will host a given directory to the given URL prefix (relative to
the parent module's prefix).
Instances of this class will have nothing set to their static URL path, as a
blank string is used. Please reference the URL prefix value.
"""
def __init__(self, name, static_dir, url_prefix):
# make sure URL prefix starts with a slash
if not url_prefix.startswith('/'):
url_prefix = '/' + url_prefix
super(StaticDirectoryHost, self).__init__(name, __name__,
static_folder=static_dir,
static_url_path="",
url_prefix=url_prefix)
|
<commit_before><commit_msg>Add new module to statically host an arbitrary directory<commit_after>import flask
__author__ = 'paul.tunison@kitware.com'
class StaticDirectoryHost (flask.Blueprint):
"""
Module that will host a given directory to the given URL prefix (relative to
the parent module's prefix).
Instances of this class will have nothing set to their static URL path, as a
blank string is used. Please reference the URL prefix value.
"""
def __init__(self, name, static_dir, url_prefix):
# make sure URL prefix starts with a slash
if not url_prefix.startswith('/'):
url_prefix = '/' + url_prefix
super(StaticDirectoryHost, self).__init__(name, __name__,
static_folder=static_dir,
static_url_path="",
url_prefix=url_prefix)
|
|
fdb8f36fd4eed11d5d757d8477b3c2b8619aae8a
|
corehq/apps/commtrack/management/commands/product_program_last_modified.py
|
corehq/apps/commtrack/management/commands/product_program_last_modified.py
|
from django.core.management.base import BaseCommand
from corehq.apps.commtrack.models import Product, Program
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
import json
class Command(BaseCommand):
help = 'Populate last_modified field for products and programs'
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
relevant_ids = set([r['id'] for r in Product.get_db().view(
'commtrack/products',
reduce=False,
).all()])
to_save = []
for product in iter_docs(Product.get_db(), relevant_ids):
if 'last_modified' not in product or not product['last_modified']:
print product['_id']
product['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(product)
if len(to_save) > 500:
Product.get_db().bulk_save(to_save)
to_save = []
if to_save:
Product.get_db().bulk_save(to_save)
self.stdout.write("Processing programs...\n")
relevant_ids = set([r['id'] for r in Program.get_db().view(
'commtrack/programs',
reduce=False,
).all()])
to_save = []
for program in iter_docs(Program.get_db(), relevant_ids):
if 'last_modified' not in program or not program['last_modified']:
print program['_id']
program['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(program)
if len(to_save) > 500:
Program.get_db().bulk_save(to_save)
to_save = []
if to_save:
Program.get_db().bulk_save(to_save)
|
Add management command to populate last_modified fields
|
Add management command to populate last_modified fields
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq
|
Add management command to populate last_modified fields
|
from django.core.management.base import BaseCommand
from corehq.apps.commtrack.models import Product, Program
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
import json
class Command(BaseCommand):
help = 'Populate last_modified field for products and programs'
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
relevant_ids = set([r['id'] for r in Product.get_db().view(
'commtrack/products',
reduce=False,
).all()])
to_save = []
for product in iter_docs(Product.get_db(), relevant_ids):
if 'last_modified' not in product or not product['last_modified']:
print product['_id']
product['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(product)
if len(to_save) > 500:
Product.get_db().bulk_save(to_save)
to_save = []
if to_save:
Product.get_db().bulk_save(to_save)
self.stdout.write("Processing programs...\n")
relevant_ids = set([r['id'] for r in Program.get_db().view(
'commtrack/programs',
reduce=False,
).all()])
to_save = []
for program in iter_docs(Program.get_db(), relevant_ids):
if 'last_modified' not in program or not program['last_modified']:
print program['_id']
program['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(program)
if len(to_save) > 500:
Program.get_db().bulk_save(to_save)
to_save = []
if to_save:
Program.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Add management command to populate last_modified fields<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.commtrack.models import Product, Program
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
import json
class Command(BaseCommand):
help = 'Populate last_modified field for products and programs'
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
relevant_ids = set([r['id'] for r in Product.get_db().view(
'commtrack/products',
reduce=False,
).all()])
to_save = []
for product in iter_docs(Product.get_db(), relevant_ids):
if 'last_modified' not in product or not product['last_modified']:
print product['_id']
product['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(product)
if len(to_save) > 500:
Product.get_db().bulk_save(to_save)
to_save = []
if to_save:
Product.get_db().bulk_save(to_save)
self.stdout.write("Processing programs...\n")
relevant_ids = set([r['id'] for r in Program.get_db().view(
'commtrack/programs',
reduce=False,
).all()])
to_save = []
for program in iter_docs(Program.get_db(), relevant_ids):
if 'last_modified' not in program or not program['last_modified']:
print program['_id']
program['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(program)
if len(to_save) > 500:
Program.get_db().bulk_save(to_save)
to_save = []
if to_save:
Program.get_db().bulk_save(to_save)
|
Add management command to populate last_modified fieldsfrom django.core.management.base import BaseCommand
from corehq.apps.commtrack.models import Product, Program
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
import json
class Command(BaseCommand):
help = 'Populate last_modified field for products and programs'
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
relevant_ids = set([r['id'] for r in Product.get_db().view(
'commtrack/products',
reduce=False,
).all()])
to_save = []
for product in iter_docs(Product.get_db(), relevant_ids):
if 'last_modified' not in product or not product['last_modified']:
print product['_id']
product['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(product)
if len(to_save) > 500:
Product.get_db().bulk_save(to_save)
to_save = []
if to_save:
Product.get_db().bulk_save(to_save)
self.stdout.write("Processing programs...\n")
relevant_ids = set([r['id'] for r in Program.get_db().view(
'commtrack/programs',
reduce=False,
).all()])
to_save = []
for program in iter_docs(Program.get_db(), relevant_ids):
if 'last_modified' not in program or not program['last_modified']:
print program['_id']
program['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(program)
if len(to_save) > 500:
Program.get_db().bulk_save(to_save)
to_save = []
if to_save:
Program.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Add management command to populate last_modified fields<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.commtrack.models import Product, Program
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
import json
class Command(BaseCommand):
help = 'Populate last_modified field for products and programs'
def handle(self, *args, **options):
self.stdout.write("Processing products...\n")
relevant_ids = set([r['id'] for r in Product.get_db().view(
'commtrack/products',
reduce=False,
).all()])
to_save = []
for product in iter_docs(Product.get_db(), relevant_ids):
if 'last_modified' not in product or not product['last_modified']:
print product['_id']
product['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(product)
if len(to_save) > 500:
Product.get_db().bulk_save(to_save)
to_save = []
if to_save:
Product.get_db().bulk_save(to_save)
self.stdout.write("Processing programs...\n")
relevant_ids = set([r['id'] for r in Program.get_db().view(
'commtrack/programs',
reduce=False,
).all()])
to_save = []
for program in iter_docs(Program.get_db(), relevant_ids):
if 'last_modified' not in program or not program['last_modified']:
print program['_id']
program['last_modified'] = json.dumps(datetime.now().isoformat())
to_save.append(program)
if len(to_save) > 500:
Program.get_db().bulk_save(to_save)
to_save = []
if to_save:
Program.get_db().bulk_save(to_save)
|
|
bbc4351a5611a035bbee1f18cb55b74d9583cdcd
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Object.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Object.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Object(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Object, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/object/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response["entity"]:
return 'error'
# have an id to read
if 'id' not in data_response["entity"]:
# continue to Error
return 'error'
# return the ID
userdata.id = data_response["entity"]['id']
return 'done'
|
Create a state for add an object
|
Create a state for add an object
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Create a state for add an object
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Object(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Object, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/object/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response["entity"]:
return 'error'
# have an id to read
if 'id' not in data_response["entity"]:
# continue to Error
return 'error'
# return the ID
userdata.id = data_response["entity"]['id']
return 'done'
|
<commit_before><commit_msg>Create a state for add an object<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Object(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Object, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/object/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response["entity"]:
return 'error'
# have an id to read
if 'id' not in data_response["entity"]:
# continue to Error
return 'error'
# return the ID
userdata.id = data_response["entity"]['id']
return 'done'
|
Create a state for add an object#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Object(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Object, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/object/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response["entity"]:
return 'error'
# have an id to read
if 'id' not in data_response["entity"]:
# continue to Error
return 'error'
# return the ID
userdata.id = data_response["entity"]['id']
return 'done'
|
<commit_before><commit_msg>Create a state for add an object<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Object(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
># name string name of the human
># roomID string ID on the BDD or name of the room
># x_pos int Position on X
># y_pos int Position on Y
># z_pos int Position on Z
#> id int ID on the BDD of the human
<= done data sent correctly
<= error error while data is reading
'''
def __init__(self):
super(Wonderland_Add_Object, self).__init__(outcomes=['done', 'error'],
output_keys=['id'],
input_keys=['name', 'roomID', 'x_pos', 'y_pos', 'z_pos'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
if isinstance(userdata.roomID, (int, long)):
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomID': userdata.roomID}
else:
dataPost = {'name': userdata.name, 'x': userdata.x_pos, 'y': userdata.y_pos, 'z': userdata.z_pos,
'roomName': userdata.roomID}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/object/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response["entity"]:
return 'error'
# have an id to read
if 'id' not in data_response["entity"]:
# continue to Error
return 'error'
# return the ID
userdata.id = data_response["entity"]['id']
return 'done'
|
|
78bc96307fb52d95e36eab1da6fa57a66af736e8
|
corehq/apps/sms/management/commands/delete_messaging_couch_phone_numbers.py
|
corehq/apps/sms/management/commands/delete_messaging_couch_phone_numbers.py
|
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import PhoneNumber
from dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Deletes all messaging phone numbers stored in couch")
option_list = BaseCommand.option_list + (
make_option("--delete-interval",
action="store",
dest="delete_interval",
type="int",
default=5,
help="The number of seconds to wait between each bulk delete."),
)
def get_couch_ids(self):
result = VerifiedNumber.view(
'phone_numbers/verified_number_by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def get_soft_deleted_couch_ids(self):
result = VerifiedNumber.view(
'all_docs/by_doc_type',
startkey=['VerifiedNumber-Deleted'],
endkey=['VerifiedNumber-Deleted', {}],
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def delete_models(self, delete_interval):
print 'Deleting VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_couch_ids(),
'VerifiedNumber',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
print 'Deleting Soft-Deleted VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_soft_deleted_couch_ids(),
'VerifiedNumber-Deleted',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
def handle(self, *args, **options):
self.delete_models(options['delete_interval'])
|
Add script to delete couch phone numbers
|
Add script to delete couch phone numbers
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add script to delete couch phone numbers
|
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import PhoneNumber
from dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Deletes all messaging phone numbers stored in couch")
option_list = BaseCommand.option_list + (
make_option("--delete-interval",
action="store",
dest="delete_interval",
type="int",
default=5,
help="The number of seconds to wait between each bulk delete."),
)
def get_couch_ids(self):
result = VerifiedNumber.view(
'phone_numbers/verified_number_by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def get_soft_deleted_couch_ids(self):
result = VerifiedNumber.view(
'all_docs/by_doc_type',
startkey=['VerifiedNumber-Deleted'],
endkey=['VerifiedNumber-Deleted', {}],
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def delete_models(self, delete_interval):
print 'Deleting VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_couch_ids(),
'VerifiedNumber',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
print 'Deleting Soft-Deleted VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_soft_deleted_couch_ids(),
'VerifiedNumber-Deleted',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
def handle(self, *args, **options):
self.delete_models(options['delete_interval'])
|
<commit_before><commit_msg>Add script to delete couch phone numbers<commit_after>
|
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import PhoneNumber
from dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Deletes all messaging phone numbers stored in couch")
option_list = BaseCommand.option_list + (
make_option("--delete-interval",
action="store",
dest="delete_interval",
type="int",
default=5,
help="The number of seconds to wait between each bulk delete."),
)
def get_couch_ids(self):
result = VerifiedNumber.view(
'phone_numbers/verified_number_by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def get_soft_deleted_couch_ids(self):
result = VerifiedNumber.view(
'all_docs/by_doc_type',
startkey=['VerifiedNumber-Deleted'],
endkey=['VerifiedNumber-Deleted', {}],
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def delete_models(self, delete_interval):
print 'Deleting VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_couch_ids(),
'VerifiedNumber',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
print 'Deleting Soft-Deleted VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_soft_deleted_couch_ids(),
'VerifiedNumber-Deleted',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
def handle(self, *args, **options):
self.delete_models(options['delete_interval'])
|
Add script to delete couch phone numbersfrom corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import PhoneNumber
from dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Deletes all messaging phone numbers stored in couch")
option_list = BaseCommand.option_list + (
make_option("--delete-interval",
action="store",
dest="delete_interval",
type="int",
default=5,
help="The number of seconds to wait between each bulk delete."),
)
def get_couch_ids(self):
result = VerifiedNumber.view(
'phone_numbers/verified_number_by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def get_soft_deleted_couch_ids(self):
result = VerifiedNumber.view(
'all_docs/by_doc_type',
startkey=['VerifiedNumber-Deleted'],
endkey=['VerifiedNumber-Deleted', {}],
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def delete_models(self, delete_interval):
print 'Deleting VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_couch_ids(),
'VerifiedNumber',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
print 'Deleting Soft-Deleted VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_soft_deleted_couch_ids(),
'VerifiedNumber-Deleted',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
def handle(self, *args, **options):
self.delete_models(options['delete_interval'])
|
<commit_before><commit_msg>Add script to delete couch phone numbers<commit_after>from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import PhoneNumber
from dimagi.utils.couch.database import iter_docs_with_retry, iter_bulk_delete_with_doc_type_verification
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Deletes all messaging phone numbers stored in couch")
option_list = BaseCommand.option_list + (
make_option("--delete-interval",
action="store",
dest="delete_interval",
type="int",
default=5,
help="The number of seconds to wait between each bulk delete."),
)
def get_couch_ids(self):
result = VerifiedNumber.view(
'phone_numbers/verified_number_by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def get_soft_deleted_couch_ids(self):
result = VerifiedNumber.view(
'all_docs/by_doc_type',
startkey=['VerifiedNumber-Deleted'],
endkey=['VerifiedNumber-Deleted', {}],
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result]
def delete_models(self, delete_interval):
print 'Deleting VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_couch_ids(),
'VerifiedNumber',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
print 'Deleting Soft-Deleted VerifiedNumbers...'
count = iter_bulk_delete_with_doc_type_verification(
VerifiedNumber.get_db(),
self.get_soft_deleted_couch_ids(),
'VerifiedNumber-Deleted',
wait_time=delete_interval,
max_fetch_attempts=5
)
print 'Deleted %s documents' % count
def handle(self, *args, **options):
self.delete_models(options['delete_interval'])
|
|
e0770c4a671c650f4569036350b4047fcf925506
|
AnalysisDemo.py
|
AnalysisDemo.py
|
import wx
#import matplotlib
class AnalysisDemo(wx.Frame):
def __init__(self, *args, **kw):
super(AnalysisDemo, self).__init__(*args, **kw)
self.initMain()
def initMain(self):
pn = wx.Panel(self)
self.showPackage = wx.RadioButton(pn, label='Organize in package')
self.showClass = wx.RadioButton(pn, label='Organize in class')
# self.canvas = matplotlib.figure.Figure()
self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE | wx.HSCROLL)
self.create = wx.Button(pn, label='Create Figure')
self.create.Bind(wx.EVT_BUTTON, self.createFigure)
optionBoxSizer = wx.BoxSizer(wx.VERTICAL)
optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)
mainBoxSizer = wx.BoxSizer()
mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=5)
pn.SetSizer(mainBoxSizer)
self.SetTitle('Analysis Demo')
self.SetSize((600,400))
self.Centre()
self.Show(True)
def createFigure(self, event):
pass
def main():
app = wx.App()
AnalysisDemo(None)
app.MainLoop()
if __name__ == '__main__':
main()
|
Add a demo app to illustrate the result
|
Add a demo app to illustrate the result
|
Python
|
mit
|
plumer/codana,plumer/codana
|
Add a demo app to illustrate the result
|
import wx
#import matplotlib
class AnalysisDemo(wx.Frame):
def __init__(self, *args, **kw):
super(AnalysisDemo, self).__init__(*args, **kw)
self.initMain()
def initMain(self):
pn = wx.Panel(self)
self.showPackage = wx.RadioButton(pn, label='Organize in package')
self.showClass = wx.RadioButton(pn, label='Organize in class')
# self.canvas = matplotlib.figure.Figure()
self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE | wx.HSCROLL)
self.create = wx.Button(pn, label='Create Figure')
self.create.Bind(wx.EVT_BUTTON, self.createFigure)
optionBoxSizer = wx.BoxSizer(wx.VERTICAL)
optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)
mainBoxSizer = wx.BoxSizer()
mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=5)
pn.SetSizer(mainBoxSizer)
self.SetTitle('Analysis Demo')
self.SetSize((600,400))
self.Centre()
self.Show(True)
def createFigure(self, event):
pass
def main():
app = wx.App()
AnalysisDemo(None)
app.MainLoop()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a demo app to illustrate the result<commit_after>
|
import wx
#import matplotlib
class AnalysisDemo(wx.Frame):
def __init__(self, *args, **kw):
super(AnalysisDemo, self).__init__(*args, **kw)
self.initMain()
def initMain(self):
pn = wx.Panel(self)
self.showPackage = wx.RadioButton(pn, label='Organize in package')
self.showClass = wx.RadioButton(pn, label='Organize in class')
# self.canvas = matplotlib.figure.Figure()
self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE | wx.HSCROLL)
self.create = wx.Button(pn, label='Create Figure')
self.create.Bind(wx.EVT_BUTTON, self.createFigure)
optionBoxSizer = wx.BoxSizer(wx.VERTICAL)
optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)
mainBoxSizer = wx.BoxSizer()
mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=5)
pn.SetSizer(mainBoxSizer)
self.SetTitle('Analysis Demo')
self.SetSize((600,400))
self.Centre()
self.Show(True)
def createFigure(self, event):
pass
def main():
app = wx.App()
AnalysisDemo(None)
app.MainLoop()
if __name__ == '__main__':
main()
|
Add a demo app to illustrate the resultimport wx
#import matplotlib
class AnalysisDemo(wx.Frame):
def __init__(self, *args, **kw):
super(AnalysisDemo, self).__init__(*args, **kw)
self.initMain()
def initMain(self):
pn = wx.Panel(self)
self.showPackage = wx.RadioButton(pn, label='Organize in package')
self.showClass = wx.RadioButton(pn, label='Organize in class')
# self.canvas = matplotlib.figure.Figure()
self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE | wx.HSCROLL)
self.create = wx.Button(pn, label='Create Figure')
self.create.Bind(wx.EVT_BUTTON, self.createFigure)
optionBoxSizer = wx.BoxSizer(wx.VERTICAL)
optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)
mainBoxSizer = wx.BoxSizer()
mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=5)
pn.SetSizer(mainBoxSizer)
self.SetTitle('Analysis Demo')
self.SetSize((600,400))
self.Centre()
self.Show(True)
def createFigure(self, event):
pass
def main():
app = wx.App()
AnalysisDemo(None)
app.MainLoop()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a demo app to illustrate the result<commit_after>import wx
#import matplotlib
class AnalysisDemo(wx.Frame):
def __init__(self, *args, **kw):
super(AnalysisDemo, self).__init__(*args, **kw)
self.initMain()
def initMain(self):
pn = wx.Panel(self)
self.showPackage = wx.RadioButton(pn, label='Organize in package')
self.showClass = wx.RadioButton(pn, label='Organize in class')
# self.canvas = matplotlib.figure.Figure()
self.canvas = wx.TextCtrl(pn, style=wx.TE_MULTILINE | wx.HSCROLL)
self.create = wx.Button(pn, label='Create Figure')
self.create.Bind(wx.EVT_BUTTON, self.createFigure)
optionBoxSizer = wx.BoxSizer(wx.VERTICAL)
optionBoxSizer.Add(self.showPackage, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.showClass, proportion=0, flag=wx.TOP, border=5)
optionBoxSizer.Add(self.create, proportion=0, flag=wx.TOP, border=5)
mainBoxSizer = wx.BoxSizer()
mainBoxSizer.Add(self.canvas, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)
mainBoxSizer.Add(optionBoxSizer, proportion=0, flag=wx.EXPAND | wx.TOP | wx.BOTTOM | wx.RIGHT, border=5)
pn.SetSizer(mainBoxSizer)
self.SetTitle('Analysis Demo')
self.SetSize((600,400))
self.Centre()
self.Show(True)
def createFigure(self, event):
pass
def main():
app = wx.App()
AnalysisDemo(None)
app.MainLoop()
if __name__ == '__main__':
main()
|
|
c3a83ed6158fcd9335f9253417ca4b24e9ab7934
|
tests/pytests/unit/modules/test_network.py
|
tests/pytests/unit/modules/test_network.py
|
import threading
import pytest
import salt.modules.network as networkmod
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {networkmod: {}}
@pytest.fixture
def socket_errors():
# Not sure what kind of errors could be returned by getfqdn or
# gethostbyaddr, but we have reports that thread leaks are happening
with patch("socket.getfqdn", autospec=True, side_effect=Exception), patch(
"socket.gethostbyaddr", autospec=True, side_effect=Exception
):
yield
@pytest.mark.xfail
def test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):
before_threads = threading.active_count()
networkmod.fqdns()
after_threads = threading.active_count()
assert (
before_threads == after_threads
), "Difference in thread count means the thread pool is not correctly cleaning up."
|
Add test for fqdn thread leak
|
Add test for fqdn thread leak
I wasn't positive what other errors could be raised here, but we did
have a user reporting thread leaks, and the existing code *could* have
thread leaks if unhandled exceptions are raised on the ThreadPool.
This test *should* work - there might be some weird circumstances where
other threads are magically closed between here? But that seems less
likely.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add test for fqdn thread leak
I wasn't positive what other errors could be raised here, but we did
have a user reporting thread leaks, and the existing code *could* have
thread leaks if unhandled exceptions are raised on the ThreadPool.
This test *should* work - there might be some weird circumstances where
other threads are magically closed between here? But that seems less
likely.
|
import threading
import pytest
import salt.modules.network as networkmod
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {networkmod: {}}
@pytest.fixture
def socket_errors():
# Not sure what kind of errors could be returned by getfqdn or
# gethostbyaddr, but we have reports that thread leaks are happening
with patch("socket.getfqdn", autospec=True, side_effect=Exception), patch(
"socket.gethostbyaddr", autospec=True, side_effect=Exception
):
yield
@pytest.mark.xfail
def test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):
before_threads = threading.active_count()
networkmod.fqdns()
after_threads = threading.active_count()
assert (
before_threads == after_threads
), "Difference in thread count means the thread pool is not correctly cleaning up."
|
<commit_before><commit_msg>Add test for fqdn thread leak
I wasn't positive what other errors could be raised here, but we did
have a user reporting thread leaks, and the existing code *could* have
thread leaks if unhandled exceptions are raised on the ThreadPool.
This test *should* work - there might be some weird circumstances where
other threads are magically closed between here? But that seems less
likely.<commit_after>
|
import threading
import pytest
import salt.modules.network as networkmod
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {networkmod: {}}
@pytest.fixture
def socket_errors():
# Not sure what kind of errors could be returned by getfqdn or
# gethostbyaddr, but we have reports that thread leaks are happening
with patch("socket.getfqdn", autospec=True, side_effect=Exception), patch(
"socket.gethostbyaddr", autospec=True, side_effect=Exception
):
yield
@pytest.mark.xfail
def test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):
before_threads = threading.active_count()
networkmod.fqdns()
after_threads = threading.active_count()
assert (
before_threads == after_threads
), "Difference in thread count means the thread pool is not correctly cleaning up."
|
Add test for fqdn thread leak
I wasn't positive what other errors could be raised here, but we did
have a user reporting thread leaks, and the existing code *could* have
thread leaks if unhandled exceptions are raised on the ThreadPool.
This test *should* work - there might be some weird circumstances where
other threads are magically closed between here? But that seems less
likely.import threading
import pytest
import salt.modules.network as networkmod
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {networkmod: {}}
@pytest.fixture
def socket_errors():
# Not sure what kind of errors could be returned by getfqdn or
# gethostbyaddr, but we have reports that thread leaks are happening
with patch("socket.getfqdn", autospec=True, side_effect=Exception), patch(
"socket.gethostbyaddr", autospec=True, side_effect=Exception
):
yield
@pytest.mark.xfail
def test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):
before_threads = threading.active_count()
networkmod.fqdns()
after_threads = threading.active_count()
assert (
before_threads == after_threads
), "Difference in thread count means the thread pool is not correctly cleaning up."
|
<commit_before><commit_msg>Add test for fqdn thread leak
I wasn't positive what other errors could be raised here, but we did
have a user reporting thread leaks, and the existing code *could* have
thread leaks if unhandled exceptions are raised on the ThreadPool.
This test *should* work - there might be some weird circumstances where
other threads are magically closed between here? But that seems less
likely.<commit_after>import threading
import pytest
import salt.modules.network as networkmod
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {networkmod: {}}
@pytest.fixture
def socket_errors():
# Not sure what kind of errors could be returned by getfqdn or
# gethostbyaddr, but we have reports that thread leaks are happening
with patch("socket.getfqdn", autospec=True, side_effect=Exception), patch(
"socket.gethostbyaddr", autospec=True, side_effect=Exception
):
yield
@pytest.mark.xfail
def test_when_errors_happen_looking_up_fqdns_threads_should_not_leak(socket_errors):
before_threads = threading.active_count()
networkmod.fqdns()
after_threads = threading.active_count()
assert (
before_threads == after_threads
), "Difference in thread count means the thread pool is not correctly cleaning up."
|
|
9925f3a677b7a855a2242176139bde4ab9d62ba0
|
labonneboite/scripts/nb_hirings/rome_nb_bonne_boite.py
|
labonneboite/scripts/nb_hirings/rome_nb_bonne_boite.py
|
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')
df_rome_nb_bonne_boite = df.groupby(['rome'])['is a bonne boite ?'].sum()
df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')
|
Add script which will compute the number of 'bonnes boites' per rome
|
Add script which will compute the number of 'bonnes boites' per rome
|
Python
|
agpl-3.0
|
StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite
|
Add script which will compute the number of 'bonnes boites' per rome
|
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')
df_rome_nb_bonne_boite = df.groupby(['rome'])['is a bonne boite ?'].sum()
df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')
|
<commit_before><commit_msg>Add script which will compute the number of 'bonnes boites' per rome<commit_after>
|
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')
df_rome_nb_bonne_boite = df.groupby(['rome'])['is a bonne boite ?'].sum()
df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')
|
Add script which will compute the number of 'bonnes boites' per romeimport pandas as pd
if __name__ == '__main__':
df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')
df_rome_nb_bonne_boite = df.groupby(['rome'])['is a bonne boite ?'].sum()
df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')
|
<commit_before><commit_msg>Add script which will compute the number of 'bonnes boites' per rome<commit_after>import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('prediction_per_company_per_rome2019-11-08.csv')
df_rome_nb_bonne_boite = df.groupby(['rome'])['is a bonne boite ?'].sum()
df_rome_nb_bonne_boite.to_csv('nb_bonne_boite_per_rome2019-11-089.csv')
|
|
a6c8176e3f4602e846888293093fc64b7b20233b
|
corehq/motech/repeaters/management/commands/send_cancelled_records.py
|
corehq/motech/repeaters/management/commands/send_cancelled_records.py
|
import csv
import datetime
import re
import time
from django.core.management.base import BaseCommand
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
Send cancelled repeat records. You may optionally specify a regex to
filter records using --include or --exclude, an a sleep time with --sleep
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('repeater_id')
parser.add_argument(
'--include',
dest='include_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to include it."),
)
parser.add_argument(
'--exclude',
dest='exclude_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to exclude it."),
)
parser.add_argument(
'--sleep',
dest='sleep_time',
help="Time in seconds to sleep between each request.",
)
def handle(self, domain, repeater_id, *args, **options):
sleep_time = options.get('sleep_time')
include_regex = options.get('include_regex')
exclude_regex = options.get('exclude_regex')
if include_regex and exclude_regex:
print "You may not specify both include and exclude"
def meets_filter(record):
if include_regex:
if not record.failure_reason:
return False
return bool(re.search(include_regex, record.failure_reason))
elif exclude_regex:
if not record.failure_reason:
return True
return not bool(re.search(exclude_regex, record.failure_reason))
return True # No filter applied
records = filter(
meets_filter,
iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
)
total_records = len(records)
print "Found {} matching records. Requeue them?".format(total_records)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = [('record_id', 'payload_id', 'state', 'failure_reason')]
for i, record in enumerate(records):
try:
record.fire(force_send=True)
except Exception as e:
print "{}/{}: {} {}".format(i, total_records, 'EXCEPTION', repr(e))
log.append((record._id, record.payload_id, record.state, repr(e)))
else:
print "{}/{}: {}".format(i, total_records, record.state)
log.append((record._id, record.payload_id, record.state, record.failure_reason))
if sleep_time:
time.sleep(float(sleep_time))
filename = "sent_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
|
Add cmd to force send of cancelled repeat records
|
Add cmd to force send of cancelled repeat records
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add cmd to force send of cancelled repeat records
|
import csv
import datetime
import re
import time
from django.core.management.base import BaseCommand
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
Send cancelled repeat records. You may optionally specify a regex to
filter records using --include or --exclude, an a sleep time with --sleep
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('repeater_id')
parser.add_argument(
'--include',
dest='include_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to include it."),
)
parser.add_argument(
'--exclude',
dest='exclude_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to exclude it."),
)
parser.add_argument(
'--sleep',
dest='sleep_time',
help="Time in seconds to sleep between each request.",
)
def handle(self, domain, repeater_id, *args, **options):
sleep_time = options.get('sleep_time')
include_regex = options.get('include_regex')
exclude_regex = options.get('exclude_regex')
if include_regex and exclude_regex:
print "You may not specify both include and exclude"
def meets_filter(record):
if include_regex:
if not record.failure_reason:
return False
return bool(re.search(include_regex, record.failure_reason))
elif exclude_regex:
if not record.failure_reason:
return True
return not bool(re.search(exclude_regex, record.failure_reason))
return True # No filter applied
records = filter(
meets_filter,
iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
)
total_records = len(records)
print "Found {} matching records. Requeue them?".format(total_records)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = [('record_id', 'payload_id', 'state', 'failure_reason')]
for i, record in enumerate(records):
try:
record.fire(force_send=True)
except Exception as e:
print "{}/{}: {} {}".format(i, total_records, 'EXCEPTION', repr(e))
log.append((record._id, record.payload_id, record.state, repr(e)))
else:
print "{}/{}: {}".format(i, total_records, record.state)
log.append((record._id, record.payload_id, record.state, record.failure_reason))
if sleep_time:
time.sleep(float(sleep_time))
filename = "sent_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
|
<commit_before><commit_msg>Add cmd to force send of cancelled repeat records<commit_after>
|
import csv
import datetime
import re
import time
from django.core.management.base import BaseCommand
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
Send cancelled repeat records. You may optionally specify a regex to
filter records using --include or --exclude, an a sleep time with --sleep
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('repeater_id')
parser.add_argument(
'--include',
dest='include_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to include it."),
)
parser.add_argument(
'--exclude',
dest='exclude_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to exclude it."),
)
parser.add_argument(
'--sleep',
dest='sleep_time',
help="Time in seconds to sleep between each request.",
)
def handle(self, domain, repeater_id, *args, **options):
sleep_time = options.get('sleep_time')
include_regex = options.get('include_regex')
exclude_regex = options.get('exclude_regex')
if include_regex and exclude_regex:
print "You may not specify both include and exclude"
def meets_filter(record):
if include_regex:
if not record.failure_reason:
return False
return bool(re.search(include_regex, record.failure_reason))
elif exclude_regex:
if not record.failure_reason:
return True
return not bool(re.search(exclude_regex, record.failure_reason))
return True # No filter applied
records = filter(
meets_filter,
iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
)
total_records = len(records)
print "Found {} matching records. Requeue them?".format(total_records)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = [('record_id', 'payload_id', 'state', 'failure_reason')]
for i, record in enumerate(records):
try:
record.fire(force_send=True)
except Exception as e:
print "{}/{}: {} {}".format(i, total_records, 'EXCEPTION', repr(e))
log.append((record._id, record.payload_id, record.state, repr(e)))
else:
print "{}/{}: {}".format(i, total_records, record.state)
log.append((record._id, record.payload_id, record.state, record.failure_reason))
if sleep_time:
time.sleep(float(sleep_time))
filename = "sent_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
|
Add cmd to force send of cancelled repeat recordsimport csv
import datetime
import re
import time
from django.core.management.base import BaseCommand
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
Send cancelled repeat records. You may optionally specify a regex to
filter records using --include or --exclude, an a sleep time with --sleep
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('repeater_id')
parser.add_argument(
'--include',
dest='include_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to include it."),
)
parser.add_argument(
'--exclude',
dest='exclude_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to exclude it."),
)
parser.add_argument(
'--sleep',
dest='sleep_time',
help="Time in seconds to sleep between each request.",
)
def handle(self, domain, repeater_id, *args, **options):
sleep_time = options.get('sleep_time')
include_regex = options.get('include_regex')
exclude_regex = options.get('exclude_regex')
if include_regex and exclude_regex:
print "You may not specify both include and exclude"
def meets_filter(record):
if include_regex:
if not record.failure_reason:
return False
return bool(re.search(include_regex, record.failure_reason))
elif exclude_regex:
if not record.failure_reason:
return True
return not bool(re.search(exclude_regex, record.failure_reason))
return True # No filter applied
records = filter(
meets_filter,
iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
)
total_records = len(records)
print "Found {} matching records. Requeue them?".format(total_records)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = [('record_id', 'payload_id', 'state', 'failure_reason')]
for i, record in enumerate(records):
try:
record.fire(force_send=True)
except Exception as e:
print "{}/{}: {} {}".format(i, total_records, 'EXCEPTION', repr(e))
log.append((record._id, record.payload_id, record.state, repr(e)))
else:
print "{}/{}: {}".format(i, total_records, record.state)
log.append((record._id, record.payload_id, record.state, record.failure_reason))
if sleep_time:
time.sleep(float(sleep_time))
filename = "sent_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
|
<commit_before><commit_msg>Add cmd to force send of cancelled repeat records<commit_after>import csv
import datetime
import re
import time
from django.core.management.base import BaseCommand
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
Send cancelled repeat records. You may optionally specify a regex to
filter records using --include or --exclude, an a sleep time with --sleep
"""
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('repeater_id')
parser.add_argument(
'--include',
dest='include_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to include it."),
)
parser.add_argument(
'--exclude',
dest='exclude_regex',
help=("Regex that will be applied to a record's 'failure_reason' to "
"determine whether to exclude it."),
)
parser.add_argument(
'--sleep',
dest='sleep_time',
help="Time in seconds to sleep between each request.",
)
def handle(self, domain, repeater_id, *args, **options):
sleep_time = options.get('sleep_time')
include_regex = options.get('include_regex')
exclude_regex = options.get('exclude_regex')
if include_regex and exclude_regex:
print "You may not specify both include and exclude"
def meets_filter(record):
if include_regex:
if not record.failure_reason:
return False
return bool(re.search(include_regex, record.failure_reason))
elif exclude_regex:
if not record.failure_reason:
return True
return not bool(re.search(exclude_regex, record.failure_reason))
return True # No filter applied
records = filter(
meets_filter,
iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
)
total_records = len(records)
print "Found {} matching records. Requeue them?".format(total_records)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = [('record_id', 'payload_id', 'state', 'failure_reason')]
for i, record in enumerate(records):
try:
record.fire(force_send=True)
except Exception as e:
print "{}/{}: {} {}".format(i, total_records, 'EXCEPTION', repr(e))
log.append((record._id, record.payload_id, record.state, repr(e)))
else:
print "{}/{}: {}".format(i, total_records, record.state)
log.append((record._id, record.payload_id, record.state, record.failure_reason))
if sleep_time:
time.sleep(float(sleep_time))
filename = "sent_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
|
|
bf66372b2b5b49ba4a93d8ac4f573ceb7857f5b8
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
process.SetSelectedThread(thread)
if not thread.IsStopped():
error = process.Stop()
print(error)
if thread:
frame = thread.GetSelectedFrame()
if frame:
print('Will settrace in: %s' % (frame,))
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), lldb.eDynamicCanRunTarget)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
Fix attach in case of multiple threads.
|
Fix attach in case of multiple threads.
|
Python
|
apache-2.0
|
kool79/intellij-community,da1z/intellij-community,retomerz/intellij-community,blademainer/intellij-community,SerCeMan/intellij-community,retomerz/intellij-community,ol-loginov/intellij-community,holmes/intellij-community,kool79/intellij-community,lucafavatella/intellij-community,xfournet/intellij-community,adedayo/intellij-community,alphafoobar/intellij-community,fitermay/intellij-community,petteyg/intellij-community,adedayo/intellij-community,lucafavatella/intellij-community,lucafavatella/intellij-community,youdonghai/intellij-community,caot/intellij-community,asedunov/intellij-community,fnouama/intellij-community,youdonghai/intellij-community,TangHao1987/intellij-community,asedunov/intellij-community,clumsy/intellij-community,FHannes/intellij-community,da1z/intellij-community,samthor/intellij-community,pwoodworth/intellij-community,TangHao1987/intellij-community,idea4bsd/idea4bsd,retomerz/intellij-community,xfournet/intellij-community,clumsy/intellij-community,pwoodworth/intellij-community,youdonghai/intellij-community,alphafoobar/intellij-community,mglukhikh/intellij-community,ol-loginov/intellij-community,da1z/intellij-community,wreckJ/intellij-community,caot/intellij-community,allotria/intellij-community,clumsy/intellij-community,vladmm/intellij-community,adedayo/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,orekyuu/intellij-community,kdwink/intellij-community,hurricup/intellij-community,tmpgit/intellij-community,youdonghai/intellij-community,blademainer/intellij-community,MER-GROUP/intellij-community,tmpgit/intellij-community,petteyg/intellij-community,fitermay/intellij-community,diorcety/intellij-community,MER-GROUP/intellij-community,apixandru/intellij-community,apixandru/intellij-community,semonte/intellij-community,mglukhikh/intellij-community,nicolargo/intellij-community,suncycheng/intellij-community,signed/intellij-community,supersven/intellij-community,blademainer/intellij-community,FHannes/intellij-community,amith01994/intellij-community,gnuhub/intellij-community,lucafavatella/intellij-community,semonte/intellij-community,ThiagoGarciaAlves/intellij-community,SerCeMan/intellij-community,blademainer/intellij-community,wreckJ/intellij-community,Lekanich/intellij-community,muntasirsyed/intellij-community,ivan-fedorov/intellij-community,xfournet/intellij-community,TangHao1987/intellij-community,ivan-fedorov/intellij-community,kool79/intellij-community,lucafavatella/intellij-community,clumsy/intellij-community,ThiagoGarciaAlves/intellij-community,suncycheng/intellij-community,TangHao1987/intellij-community,vvv1559/intellij-community,apixandru/intellij-community,ryano144/intellij-community,SerCeMan/intellij-community,dslomov/intellij-community,FHannes/intellij-community,Lekanich/intellij-community,tmpgit/intellij-community,alphafoobar/intellij-community,retomerz/intellij-community,petteyg/intellij-community,da1z/intellij-community,gnuhub/intellij-community,salguarnieri/intellij-community,mglukhikh/intellij-community,akosyakov/intellij-community,blademainer/intellij-community,vladmm/intellij-community,MichaelNedzelsky/intellij-community,allotria/intellij-community,supersven/intellij-community,amith01994/intellij-community,gnuhub/intellij-community,Lekanich/intellij-community,diorcety/intellij-community,dslomov/intellij-community,fnouama/intellij-community,fitermay/intellij-community,supersven/intellij-community,kool79/intellij-community,idea4bsd/idea4bsd,samthor/intellij-community,holmes/intellij-community,MichaelNedzelsky/intellij-community,signed/intellij-community,youdonghai/intellij-community,salguarnieri/intellij-community,da1z/intellij-community,robovm/robovm-studio,fengbaicanhe/intellij-community,tmpgit/intellij-community,Lekanich/intellij-community,holmes/intellij-community,diorcety/intellij-community,adedayo/intellij-community,alphafoobar/intellij-community,caot/intellij-community,semonte/intellij-community,hurricup/intellij-community,amith01994/intellij-community,clumsy/intellij-community,supersven/intellij-community,mglukhikh/intellij-community,kool79/intellij-community,ThiagoGarciaAlves/intellij-community,alphafoobar/intellij-community,ibinti/intellij-community,idea4bsd/idea4bsd,samthor/intellij-community,gnuhub/intellij-community,adedayo/intellij-community,amith01994/intellij-community,ftomassetti/intellij-community,nicolargo/intellij-community,robovm/robovm-studio,tmpgit/intellij-community,samthor/intellij-community,xfournet/intellij-community,ol-loginov/intellij-community,michaelgallacher/intellij-community,suncycheng/intellij-community,robovm/robovm-studio,Lekanich/intellij-community,dslomov/intellij-community,SerCeMan/intellij-community,lucafavatella/intellij-community,akosyakov/intellij-community,da1z/intellij-community,michaelgallacher/intellij-community,hurricup/intellij-community,MER-GROUP/intellij-community,ol-loginov/intellij-community,izonder/intellij-community,nicolargo/intellij-community,ThiagoGarciaAlves/intellij-community,suncycheng/intellij-community,holmes/intellij-community,robovm/robovm-studio,pwoodworth/intellij-community,asedunov/intellij-community,orekyuu/intellij-community,ol-loginov/intellij-community,kool79/intellij-community,fnouama/intellij-community,holmes/intellij-community,muntasirsyed/intellij-community,semonte/intellij-community,xfournet/intellij-community,retomerz/intellij-community,ibinti/intellij-community,samthor/intellij-community,izonder/intellij-community,xfournet/intellij-community,wreckJ/intellij-community,caot/intellij-community,muntasirsyed/intellij-community,Distrotech/intellij-community,ahb0327/intellij-community,wreckJ/intellij-community,slisson/intellij-community,FHannes/intellij-community,ryano144/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,ryano144/intellij-community,TangHao1987/intellij-community,fengbaicanhe/intellij-community,fitermay/intellij-community,fnouama/intellij-community,Distrotech/intellij-community,supersven/intellij-community,ivan-fedorov/intellij-community,ahb0327/intellij-community,dslomov/intellij-community,ftomassetti/intellij-community,fitermay/intellij-community,samthor/intellij-community,semonte/intellij-community,allotria/intellij-community,muntasirsyed/intellij-community,ryano144/intellij-community,samthor/intellij-community,ryano144/intellij-community,MER-GROUP/intellij-community,vvv1559/intellij-community,pwoodworth/intellij-community,Lekanich/intellij-community,supersven/intellij-community,MER-GROUP/intellij-community,da1z/intellij-community,da1z/intellij-community,akosyakov/intellij-community,vvv1559/intellij-community,ivan-fedorov/intellij-community,petteyg/intellij-community,TangHao1987/intellij-community,Lekanich/intellij-community,samthor/intellij-community,wreckJ/intellij-community,signed/intellij-community,akosyakov/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,izonder/intellij-community,michaelgallacher/intellij-community,akosyakov/intellij-community,izonder/intellij-community,MichaelNedzelsky/intellij-community,Distrotech/intellij-community,ftomassetti/intellij-community,muntasirsyed/intellij-community,FHannes/intellij-community,ftomassetti/intellij-community,MER-GROUP/intellij-community,samthor/intellij-community,Distrotech/intellij-community,adedayo/intellij-community,michaelgallacher/intellij-community,mglukhikh/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,allotria/intellij-community,nicolargo/intellij-community,vvv1559/intellij-community,retomerz/intellij-community,orekyuu/intellij-community,michaelgallacher/intellij-community,tmpgit/intellij-community,amith01994/intellij-community,wreckJ/intellij-community,diorcety/intellij-community,fitermay/intellij-community,youdonghai/intellij-community,Lekanich/intellij-community,asedunov/intellij-community,ThiagoGarciaAlves/intellij-community,petteyg/intellij-community,fnouama/intellij-community,ThiagoGarciaAlves/intellij-community,amith01994/intellij-community,nicolargo/intellij-community,dslomov/intellij-community,ol-loginov/intellij-community,SerCeMan/intellij-community,hurricup/intellij-community,suncycheng/intellij-community,allotria/intellij-community,slisson/intellij-community,mglukhikh/intellij-community,michaelgallacher/intellij-community,ivan-fedorov/intellij-community,adedayo/intellij-community,pwoodworth/intellij-community,asedunov/intellij-community,ivan-fedorov/intellij-community,kool79/intellij-community,FHannes/intellij-community,MichaelNedzelsky/intellij-community,fitermay/intellij-community,ryano144/intellij-community,retomerz/intellij-community,amith01994/intellij-community,suncycheng/intellij-community,akosyakov/intellij-community,tmpgit/intellij-community,robovm/robovm-studio,blademainer/intellij-community,jagguli/intellij-community,signed/intellij-community,MichaelNedzelsky/intellij-community,apixandru/intellij-community,allotria/intellij-community,clumsy/intellij-community,blademainer/intellij-community,akosyakov/intellij-community,youdonghai/intellij-community,jagguli/intellij-community,ftomassetti/intellij-community,petteyg/intellij-community,apixandru/intellij-community,idea4bsd/idea4bsd,ivan-fedorov/intellij-community,robovm/robovm-studio,FHannes/intellij-community,wreckJ/intellij-community,vladmm/intellij-community,mglukhikh/intellij-community,TangHao1987/intellij-community,hurricup/intellij-community,alphafoobar/intellij-community,diorcety/intellij-community,dslomov/intellij-community,hurricup/intellij-community,mglukhikh/intellij-community,nicolargo/intellij-community,vvv1559/intellij-community,ibinti/intellij-community,holmes/intellij-community,salguarnieri/intellij-community,MichaelNedzelsky/intellij-community,youdonghai/intellij-community,MER-GROUP/intellij-community,FHannes/intellij-community,MER-GROUP/intellij-community,ryano144/intellij-community,SerCeMan/intellij-community,allotria/intellij-community,alphafoobar/intellij-community,pwoodworth/intellij-community,michaelgallacher/intellij-community,izonder/intellij-community,petteyg/intellij-community,ahb0327/intellij-community,jagguli/intellij-community,da1z/intellij-community,hurricup/intellij-community,vvv1559/intellij-community,diorcety/intellij-community,kool79/intellij-community,wreckJ/intellij-community,dslomov/intellij-community,dslomov/intellij-community,Distrotech/intellij-community,Distrotech/intellij-community,vladmm/intellij-community,michaelgallacher/intellij-community,slisson/intellij-community,idea4bsd/idea4bsd,signed/intellij-community,TangHao1987/intellij-community,pwoodworth/intellij-community,idea4bsd/idea4bsd,clumsy/intellij-community,retomerz/intellij-community,xfournet/intellij-community,alphafoobar/intellij-community,robovm/robovm-studio,vladmm/intellij-community,ftomassetti/intellij-community,slisson/intellij-community,ivan-fedorov/intellij-community,blademainer/intellij-community,SerCeMan/intellij-community,ahb0327/intellij-community,jagguli/intellij-community,robovm/robovm-studio,MER-GROUP/intellij-community,orekyuu/intellij-community,ryano144/intellij-community,suncycheng/intellij-community,TangHao1987/intellij-community,retomerz/intellij-community,ryano144/intellij-community,blademainer/intellij-community,ibinti/intellij-community,Distrotech/intellij-community,clumsy/intellij-community,vladmm/intellij-community,gnuhub/intellij-community,asedunov/intellij-community,fnouama/intellij-community,ibinti/intellij-community,ibinti/intellij-community,fengbaicanhe/intellij-community,TangHao1987/intellij-community,fengbaicanhe/intellij-community,amith01994/intellij-community,kdwink/intellij-community,ol-loginov/intellij-community,samthor/intellij-community,pwoodworth/intellij-community,retomerz/intellij-community,ThiagoGarciaAlves/intellij-community,fitermay/intellij-community,amith01994/intellij-community,amith01994/intellij-community,da1z/intellij-community,fengbaicanhe/intellij-community,slisson/intellij-community,kool79/intellij-community,jagguli/intellij-community,supersven/intellij-community,vvv1559/intellij-community,semonte/intellij-community,mglukhikh/intellij-community,signed/intellij-community,salguarnieri/intellij-community,ahb0327/intellij-community,fnouama/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,Lekanich/intellij-community,apixandru/intellij-community,kdwink/intellij-community,suncycheng/intellij-community,gnuhub/intellij-community,fnouama/intellij-community,ol-loginov/intellij-community,signed/intellij-community,diorcety/intellij-community,Lekanich/intellij-community,ahb0327/intellij-community,akosyakov/intellij-community,asedunov/intellij-community,fnouama/intellij-community,lucafavatella/intellij-community,semonte/intellij-community,gnuhub/intellij-community,izonder/intellij-community,SerCeMan/intellij-community,tmpgit/intellij-community,kdwink/intellij-community,orekyuu/intellij-community,slisson/intellij-community,ftomassetti/intellij-community,salguarnieri/intellij-community,pwoodworth/intellij-community,fengbaicanhe/intellij-community,ibinti/intellij-community,diorcety/intellij-community,youdonghai/intellij-community,hurricup/intellij-community,samthor/intellij-community,hurricup/intellij-community,apixandru/intellij-community,kdwink/intellij-community,FHannes/intellij-community,akosyakov/intellij-community,SerCeMan/intellij-community,kdwink/intellij-community,asedunov/intellij-community,holmes/intellij-community,kdwink/intellij-community,Distrotech/intellij-community,mglukhikh/intellij-community,xfournet/intellij-community,MichaelNedzelsky/intellij-community,robovm/robovm-studio,youdonghai/intellij-community,ol-loginov/intellij-community,semonte/intellij-community,diorcety/intellij-community,nicolargo/intellij-community,idea4bsd/idea4bsd,michaelgallacher/intellij-community,jagguli/intellij-community,orekyuu/intellij-community,slisson/intellij-community,ahb0327/intellij-community,alphafoobar/intellij-community,blademainer/intellij-community,orekyuu/intellij-community,Lekanich/intellij-community,orekyuu/intellij-community,retomerz/intellij-community,mglukhikh/intellij-community,jagguli/intellij-community,signed/intellij-community,pwoodworth/intellij-community,semonte/intellij-community,salguarnieri/intellij-community,adedayo/intellij-community,hurricup/intellij-community,kdwink/intellij-community,youdonghai/intellij-community,holmes/intellij-community,allotria/intellij-community,fnouama/intellij-community,MichaelNedzelsky/intellij-community,vvv1559/intellij-community,supersven/intellij-community,xfournet/intellij-community,ibinti/intellij-community,vladmm/intellij-community,idea4bsd/idea4bsd,robovm/robovm-studio,idea4bsd/idea4bsd,MER-GROUP/intellij-community,vvv1559/intellij-community,suncycheng/intellij-community,supersven/intellij-community,kool79/intellij-community,pwoodworth/intellij-community,ahb0327/intellij-community,slisson/intellij-community,slisson/intellij-community,MichaelNedzelsky/intellij-community,ahb0327/intellij-community,semonte/intellij-community,caot/intellij-community,apixandru/intellij-community,signed/intellij-community,Distrotech/intellij-community,salguarnieri/intellij-community,FHannes/intellij-community,nicolargo/intellij-community,vladmm/intellij-community,vladmm/intellij-community,fengbaicanhe/intellij-community,vladmm/intellij-community,retomerz/intellij-community,fitermay/intellij-community,muntasirsyed/intellij-community,xfournet/intellij-community,petteyg/intellij-community,dslomov/intellij-community,hurricup/intellij-community,fengbaicanhe/intellij-community,vladmm/intellij-community,kool79/intellij-community,suncycheng/intellij-community,fengbaicanhe/intellij-community,vvv1559/intellij-community,apixandru/intellij-community,nicolargo/intellij-community,robovm/robovm-studio,amith01994/intellij-community,caot/intellij-community,fitermay/intellij-community,tmpgit/intellij-community,FHannes/intellij-community,akosyakov/intellij-community,blademainer/intellij-community,signed/intellij-community,muntasirsyed/intellij-community,xfournet/intellij-community,semonte/intellij-community,apixandru/intellij-community,asedunov/intellij-community,ol-loginov/intellij-community,signed/intellij-community,hurricup/intellij-community,ftomassetti/intellij-community,muntasirsyed/intellij-community,salguarnieri/intellij-community,jagguli/intellij-community,blademainer/intellij-community,supersven/intellij-community,izonder/intellij-community,SerCeMan/intellij-community,Lekanich/intellij-community,petteyg/intellij-community,ryano144/intellij-community,alphafoobar/intellij-community,caot/intellij-community,orekyuu/intellij-community,MichaelNedzelsky/intellij-community,Distrotech/intellij-community,salguarnieri/intellij-community,jagguli/intellij-community,ivan-fedorov/intellij-community,ivan-fedorov/intellij-community,jagguli/intellij-community,MichaelNedzelsky/intellij-community,muntasirsyed/intellij-community,ThiagoGarciaAlves/intellij-community,da1z/intellij-community,diorcety/intellij-community,lucafavatella/intellij-community,ahb0327/intellij-community,fnouama/intellij-community,michaelgallacher/intellij-community,alphafoobar/intellij-community,adedayo/intellij-community,diorcety/intellij-community,petteyg/intellij-community,lucafavatella/intellij-community,idea4bsd/idea4bsd,dslomov/intellij-community,akosyakov/intellij-community,ftomassetti/intellij-community,wreckJ/intellij-community,gnuhub/intellij-community,orekyuu/intellij-community,gnuhub/intellij-community,dslomov/intellij-community,tmpgit/intellij-community,pwoodworth/intellij-community,idea4bsd/idea4bsd,nicolargo/intellij-community,clumsy/intellij-community,ThiagoGarciaAlves/intellij-community,ftomassetti/intellij-community,supersven/intellij-community,izonder/intellij-community,adedayo/intellij-community,Distrotech/intellij-community,wreckJ/intellij-community,michaelgallacher/intellij-community,apixandru/intellij-community,izonder/intellij-community,idea4bsd/idea4bsd,da1z/intellij-community,izonder/intellij-community,allotria/intellij-community,fitermay/intellij-community,michaelgallacher/intellij-community,FHannes/intellij-community,ol-loginov/intellij-community,kdwink/intellij-community,holmes/intellij-community,lucafavatella/intellij-community,caot/intellij-community,holmes/intellij-community,izonder/intellij-community,nicolargo/intellij-community,semonte/intellij-community,dslomov/intellij-community,ivan-fedorov/intellij-community,ol-loginov/intellij-community,wreckJ/intellij-community,supersven/intellij-community,alphafoobar/intellij-community,caot/intellij-community,semonte/intellij-community,muntasirsyed/intellij-community,gnuhub/intellij-community,tmpgit/intellij-community,adedayo/intellij-community,SerCeMan/intellij-community,ftomassetti/intellij-community,hurricup/intellij-community,caot/intellij-community,vvv1559/intellij-community,FHannes/intellij-community,orekyuu/intellij-community,ibinti/intellij-community,slisson/intellij-community,lucafavatella/intellij-community,TangHao1987/intellij-community,MichaelNedzelsky/intellij-community,MER-GROUP/intellij-community,jagguli/intellij-community,slisson/intellij-community,akosyakov/intellij-community,wreckJ/intellij-community,fitermay/intellij-community,ryano144/intellij-community,jagguli/intellij-community,diorcety/intellij-community,gnuhub/intellij-community,muntasirsyed/intellij-community,apixandru/intellij-community,ibinti/intellij-community,fitermay/intellij-community,amith01994/intellij-community,retomerz/intellij-community,allotria/intellij-community,asedunov/intellij-community,holmes/intellij-community,vladmm/intellij-community,muntasirsyed/intellij-community,asedunov/intellij-community,vvv1559/intellij-community,nicolargo/intellij-community,lucafavatella/intellij-community,kool79/intellij-community,salguarnieri/intellij-community,apixandru/intellij-community,kdwink/intellij-community,fnouama/intellij-community,lucafavatella/intellij-community,idea4bsd/idea4bsd,kdwink/intellij-community,ibinti/intellij-community,signed/intellij-community,orekyuu/intellij-community,holmes/intellij-community,MER-GROUP/intellij-community,ftomassetti/intellij-community,clumsy/intellij-community,asedunov/intellij-community,clumsy/intellij-community,ryano144/intellij-community,ahb0327/intellij-community,tmpgit/intellij-community,ivan-fedorov/intellij-community,samthor/intellij-community,asedunov/intellij-community,youdonghai/intellij-community,apixandru/intellij-community,ibinti/intellij-community,da1z/intellij-community,Distrotech/intellij-community,petteyg/intellij-community,TangHao1987/intellij-community,youdonghai/intellij-community,slisson/intellij-community,ahb0327/intellij-community,suncycheng/intellij-community,gnuhub/intellij-community,signed/intellij-community,ThiagoGarciaAlves/intellij-community,salguarnieri/intellij-community,salguarnieri/intellij-community,vvv1559/intellij-community,fengbaicanhe/intellij-community,fengbaicanhe/intellij-community,robovm/robovm-studio,fengbaicanhe/intellij-community,clumsy/intellij-community,ibinti/intellij-community,caot/intellij-community,kdwink/intellij-community,adedayo/intellij-community,caot/intellij-community,petteyg/intellij-community,izonder/intellij-community,SerCeMan/intellij-community
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
Fix attach in case of multiple threads.
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
process.SetSelectedThread(thread)
if not thread.IsStopped():
error = process.Stop()
print(error)
if thread:
frame = thread.GetSelectedFrame()
if frame:
print('Will settrace in: %s' % (frame,))
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), lldb.eDynamicCanRunTarget)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
<commit_before># This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
<commit_msg>Fix attach in case of multiple threads.<commit_after>
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
process.SetSelectedThread(thread)
if not thread.IsStopped():
error = process.Stop()
print(error)
if thread:
frame = thread.GetSelectedFrame()
if frame:
print('Will settrace in: %s' % (frame,))
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), lldb.eDynamicCanRunTarget)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
Fix attach in case of multiple threads.# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
process.SetSelectedThread(thread)
if not thread.IsStopped():
error = process.Stop()
print(error)
if thread:
frame = thread.GetSelectedFrame()
if frame:
print('Will settrace in: %s' % (frame,))
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), lldb.eDynamicCanRunTarget)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
<commit_before># This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
try:
show_debug_info = 0
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for t in process:
# Get the first frame
frame = t.GetFrameAtIndex (t.GetNumFrames()-1)
if frame:
print('Will settrace in: %s' % (frame,))
frame.EvaluateExpression("expr (int) SetSysTraceFunc(%s, %s);" % (
show_debug_info, is_debug))
except:
import traceback;traceback.print_exc()
<commit_msg>Fix attach in case of multiple threads.<commit_after># This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
process.SetSelectedThread(thread)
if not thread.IsStopped():
error = process.Stop()
print(error)
if thread:
frame = thread.GetSelectedFrame()
if frame:
print('Will settrace in: %s' % (frame,))
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), lldb.eDynamicCanRunTarget)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
efb6072e097a816bb46fdd83541c763e222816c9
|
clic/dickens/test_concordance.py
|
clic/dickens/test_concordance.py
|
import unittest
from concordance_new import Concordancer_New
class TestConcordancerNewChapterIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is a very naive test to run whilst reviewing the create
concordance code. It's goal is simply to evaluate whether that
function is still up an running.
For that purpose it uses a hard-coded example
"""
concordance = Concordancer_New()
fog = concordance.create_concordance(terms="fog",
idxName="chapter-idx",
Materials=["dickens"],
selectWords="whole")
assert len(fog) == 95 # 94 hits + one variable total_count in the list
class TestConcordancerNewQuoteIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is another naive test focusing on searching in quotes
It also uses a hard-coded example
"""
concordance = Concordancer_New()
maybe = concordance.create_concordance(terms="maybe",
idxName="quote-idx",
Materials=["dickens"],
selectWords="whole")
assert len(maybe) == 46 # 45 hits + one variable total_count in the list
if __name__ == '__main__':
unittest.main()
|
Add initial tests for the concordance view
|
Add initial tests for the concordance view
These tests can be run simply using `python test_concordance.py`.
The test suite is currently very limited. It is a tool to refactoring
the current code.
|
Python
|
mit
|
CentreForResearchInAppliedLinguistics/clic,CentreForResearchInAppliedLinguistics/clic,CentreForCorpusResearch/clic,CentreForCorpusResearch/clic,CentreForResearchInAppliedLinguistics/clic,CentreForCorpusResearch/clic
|
Add initial tests for the concordance view
These tests can be run simply using `python test_concordance.py`.
The test suite is currently very limited. It is a tool to refactoring
the current code.
|
import unittest
from concordance_new import Concordancer_New
class TestConcordancerNewChapterIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is a very naive test to run whilst reviewing the create
concordance code. It's goal is simply to evaluate whether that
function is still up an running.
For that purpose it uses a hard-coded example
"""
concordance = Concordancer_New()
fog = concordance.create_concordance(terms="fog",
idxName="chapter-idx",
Materials=["dickens"],
selectWords="whole")
assert len(fog) == 95 # 94 hits + one variable total_count in the list
class TestConcordancerNewQuoteIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is another naive test focusing on searching in quotes
It also uses a hard-coded example
"""
concordance = Concordancer_New()
maybe = concordance.create_concordance(terms="maybe",
idxName="quote-idx",
Materials=["dickens"],
selectWords="whole")
assert len(maybe) == 46 # 45 hits + one variable total_count in the list
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial tests for the concordance view
These tests can be run simply using `python test_concordance.py`.
The test suite is currently very limited. It is a tool to refactoring
the current code.<commit_after>
|
import unittest
from concordance_new import Concordancer_New
class TestConcordancerNewChapterIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is a very naive test to run whilst reviewing the create
concordance code. It's goal is simply to evaluate whether that
function is still up an running.
For that purpose it uses a hard-coded example
"""
concordance = Concordancer_New()
fog = concordance.create_concordance(terms="fog",
idxName="chapter-idx",
Materials=["dickens"],
selectWords="whole")
assert len(fog) == 95 # 94 hits + one variable total_count in the list
class TestConcordancerNewQuoteIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is another naive test focusing on searching in quotes
It also uses a hard-coded example
"""
concordance = Concordancer_New()
maybe = concordance.create_concordance(terms="maybe",
idxName="quote-idx",
Materials=["dickens"],
selectWords="whole")
assert len(maybe) == 46 # 45 hits + one variable total_count in the list
if __name__ == '__main__':
unittest.main()
|
Add initial tests for the concordance view
These tests can be run simply using `python test_concordance.py`.
The test suite is currently very limited. It is a tool to refactoring
the current code.import unittest
from concordance_new import Concordancer_New
class TestConcordancerNewChapterIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is a very naive test to run whilst reviewing the create
concordance code. It's goal is simply to evaluate whether that
function is still up an running.
For that purpose it uses a hard-coded example
"""
concordance = Concordancer_New()
fog = concordance.create_concordance(terms="fog",
idxName="chapter-idx",
Materials=["dickens"],
selectWords="whole")
assert len(fog) == 95 # 94 hits + one variable total_count in the list
class TestConcordancerNewQuoteIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is another naive test focusing on searching in quotes
It also uses a hard-coded example
"""
concordance = Concordancer_New()
maybe = concordance.create_concordance(terms="maybe",
idxName="quote-idx",
Materials=["dickens"],
selectWords="whole")
assert len(maybe) == 46 # 45 hits + one variable total_count in the list
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial tests for the concordance view
These tests can be run simply using `python test_concordance.py`.
The test suite is currently very limited. It is a tool to refactoring
the current code.<commit_after>import unittest
from concordance_new import Concordancer_New
class TestConcordancerNewChapterIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is a very naive test to run whilst reviewing the create
concordance code. It's goal is simply to evaluate whether that
function is still up an running.
For that purpose it uses a hard-coded example
"""
concordance = Concordancer_New()
fog = concordance.create_concordance(terms="fog",
idxName="chapter-idx",
Materials=["dickens"],
selectWords="whole")
assert len(fog) == 95 # 94 hits + one variable total_count in the list
class TestConcordancerNewQuoteIndex(unittest.TestCase):
def test_create_concordance(self):
"""
This is another naive test focusing on searching in quotes
It also uses a hard-coded example
"""
concordance = Concordancer_New()
maybe = concordance.create_concordance(terms="maybe",
idxName="quote-idx",
Materials=["dickens"],
selectWords="whole")
assert len(maybe) == 46 # 45 hits + one variable total_count in the list
if __name__ == '__main__':
unittest.main()
|
|
c131e6108a72c57af4d3bdbe67d182d6c0ddb1eb
|
geotrek/feedback/migrations/0008_auto_20200326_1252.py
|
geotrek/feedback/migrations/0008_auto_20200326_1252.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-26 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0007_auto_20200324_1412'),
]
operations = [
migrations.AlterField(
model_name='report',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),
),
migrations.AlterField(
model_name='report',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='report',
name='problem_magnitude',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),
),
]
|
Add migration to modify on_delete
|
Add migration to modify on_delete
|
Python
|
bsd-2-clause
|
GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin
|
Add migration to modify on_delete
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-26 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0007_auto_20200324_1412'),
]
operations = [
migrations.AlterField(
model_name='report',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),
),
migrations.AlterField(
model_name='report',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='report',
name='problem_magnitude',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),
),
]
|
<commit_before><commit_msg>Add migration to modify on_delete<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-26 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0007_auto_20200324_1412'),
]
operations = [
migrations.AlterField(
model_name='report',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),
),
migrations.AlterField(
model_name='report',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='report',
name='problem_magnitude',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),
),
]
|
Add migration to modify on_delete# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-26 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0007_auto_20200324_1412'),
]
operations = [
migrations.AlterField(
model_name='report',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),
),
migrations.AlterField(
model_name='report',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='report',
name='problem_magnitude',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),
),
]
|
<commit_before><commit_msg>Add migration to modify on_delete<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-26 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('feedback', '0007_auto_20200324_1412'),
]
operations = [
migrations.AlterField(
model_name='report',
name='activity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),
),
migrations.AlterField(
model_name='report',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='report',
name='problem_magnitude',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),
),
]
|
|
b42c13de01a49e7fe3fb7caa22089ea1cd87f7bf
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_power_states.py
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_power_states.py
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class PowerStateTests(base.TestCase):
"""Functional tests for baremetal node power state commands."""
def setUp(self):
super(PowerStateTests, self).setUp()
self.node = self.node_create()
def test_off_reboot_on(self):
"""Reboot node from Power OFF state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State OFF as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power off {0}'
.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power off', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
def test_on_reboot_on(self):
"""Reboot node from Power ON state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State ON as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power on {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
|
Add sanity tests for baremetal power state commands
|
Add sanity tests for baremetal power state commands
Add sanity testcases for commands:
openstack baremetal node reboot,
openstack baremetal node power on,
openstack baremetal node power off.
Change-Id: I24bc2dcd1ef27d918b072ea686d53c0c8aa8b7ab
Partial-Bug: #1642597
|
Python
|
apache-2.0
|
openstack/python-ironicclient,openstack/python-ironicclient
|
Add sanity tests for baremetal power state commands
Add sanity testcases for commands:
openstack baremetal node reboot,
openstack baremetal node power on,
openstack baremetal node power off.
Change-Id: I24bc2dcd1ef27d918b072ea686d53c0c8aa8b7ab
Partial-Bug: #1642597
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class PowerStateTests(base.TestCase):
"""Functional tests for baremetal node power state commands."""
def setUp(self):
super(PowerStateTests, self).setUp()
self.node = self.node_create()
def test_off_reboot_on(self):
"""Reboot node from Power OFF state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State OFF as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power off {0}'
.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power off', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
def test_on_reboot_on(self):
"""Reboot node from Power ON state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State ON as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power on {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
|
<commit_before><commit_msg>Add sanity tests for baremetal power state commands
Add sanity testcases for commands:
openstack baremetal node reboot,
openstack baremetal node power on,
openstack baremetal node power off.
Change-Id: I24bc2dcd1ef27d918b072ea686d53c0c8aa8b7ab
Partial-Bug: #1642597<commit_after>
|
# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class PowerStateTests(base.TestCase):
"""Functional tests for baremetal node power state commands."""
def setUp(self):
super(PowerStateTests, self).setUp()
self.node = self.node_create()
def test_off_reboot_on(self):
"""Reboot node from Power OFF state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State OFF as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power off {0}'
.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power off', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
def test_on_reboot_on(self):
"""Reboot node from Power ON state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State ON as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power on {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
|
Add sanity tests for baremetal power state commands
Add sanity testcases for commands:
openstack baremetal node reboot,
openstack baremetal node power on,
openstack baremetal node power off.
Change-Id: I24bc2dcd1ef27d918b072ea686d53c0c8aa8b7ab
Partial-Bug: #1642597# Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class PowerStateTests(base.TestCase):
"""Functional tests for baremetal node power state commands."""
def setUp(self):
super(PowerStateTests, self).setUp()
self.node = self.node_create()
def test_off_reboot_on(self):
"""Reboot node from Power OFF state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State OFF as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power off {0}'
.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power off', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
def test_on_reboot_on(self):
"""Reboot node from Power ON state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State ON as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power on {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
|
<commit_before><commit_msg>Add sanity tests for baremetal power state commands
Add sanity testcases for commands:
openstack baremetal node reboot,
openstack baremetal node power on,
openstack baremetal node power off.
Change-Id: I24bc2dcd1ef27d918b072ea686d53c0c8aa8b7ab
Partial-Bug: #1642597<commit_after># Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class PowerStateTests(base.TestCase):
"""Functional tests for baremetal node power state commands."""
def setUp(self):
super(PowerStateTests, self).setUp()
self.node = self.node_create()
def test_off_reboot_on(self):
"""Reboot node from Power OFF state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State OFF as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power off {0}'
.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power off', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
def test_on_reboot_on(self):
"""Reboot node from Power ON state.
Test steps:
1) Create baremetal node in setUp.
2) Set node Power State ON as precondition.
3) Call reboot command for baremetal node.
4) Check node Power State ON in node properties.
"""
self.openstack('baremetal node power on {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ['power_state'])
self.assertEqual('power on', show_prop['power_state'])
|
|
5dde9f6aca671440253729c29530e93974921ea0
|
moderation_queue/migrations/0007_auto_20150303_1420.py
|
moderation_queue/migrations/0007_auto_20150303_1420.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
|
Add a migration to add the 'Other' field to QueuedImage.why_allowed
|
Add a migration to add the 'Other' field to QueuedImage.why_allowed
|
Python
|
agpl-3.0
|
mysociety/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add a migration to add the 'Other' field to QueuedImage.why_allowed
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration to add the 'Other' field to QueuedImage.why_allowed<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
|
Add a migration to add the 'Other' field to QueuedImage.why_allowed# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration to add the 'Other' field to QueuedImage.why_allowed<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('moderation_queue', '0006_auto_20150303_0838'),
]
operations = [
migrations.AlterField(
model_name='queuedimage',
name='why_allowed',
field=models.CharField(default=b'other', max_length=64, choices=[(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b"This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page"), (b'other', b'Other')]),
preserve_default=True,
),
]
|
|
f627f04ebe0186b19d58619cab8b7098f5ca2e4c
|
plugins/openstack/nova/nova-server-state-metrics.py
|
plugins/openstack/nova/nova-server-state-metrics.py
|
#!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
|
Add plugin for Nova server state metrics
|
Add plugin for Nova server state metrics
|
Python
|
mit
|
giorgiosironi/sensu-community-plugins,Squarespace/sensu-community-plugins,zerOnepal/sensu-community-plugins,lfdesousa/sensu-community-plugins,maoe/sensu-community-plugins,aryeguy/sensu-community-plugins,royalj/sensu-community-plugins,lenfree/sensu-community-plugins,new23d/sensu-community-plugins,tuenti/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,Seraf/sensu-community-plugins,jennytoo/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,loveholidays/sensu-plugins,Squarespace/sensu-community-plugins,cmattoon/sensu-community-plugins,julienba/sensu-community-plugins,gferguson-gd/sensu-community-plugins,Seraf/sensu-community-plugins,Seraf/sensu-community-plugins,cotocisternas/sensu-community-plugins,klangrud/sensu-community-plugins,reevoo/sensu-community-plugins,lfdesousa/sensu-community-plugins,pkaeding/sensu-community-plugins,tuenti/sensu-community-plugins,Squarespace/sensu-community-plugins,thehyve/sensu-community-plugins,khuongdp/sensu-community-plugins,himyouten/sensu-community-plugins,warmfusion/sensu-community-plugins,PerfectMemory/sensu-community-plugins,petere/sensu-community-plugins,aryeguy/sensu-community-plugins,shnmorimoto/sensu-community-plugins,nilroy/sensu-community-plugins,alertlogic/sensu-community-plugins,alexhjlee/sensu-community-plugins,JonathanHuot/sensu-community-plugins,ideais/sensu-community-plugins,aryeguy/sensu-community-plugins,pkaeding/sensu-community-plugins,thehyve/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,lenfree/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,mecavity/sensu-community-plugins,nilroy/sensu-community-plugins,himyouten/sensu-community-plugins,plasticbrain/sensu-community-plugins,jbehrends/sensu-community-plugins,new23d/sensu-community-plugins,nagas/sensu-community-plugins,royalj/sensu-community-plugins,JonathanHuot/sensu-community-plugins,tuenti/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,alexhjlee/sensu-community-plugins,klangrud/sensu-community-plugins,madAndroid/sensu-community-plugins,justanshulsharma/sensu-community-plugins,cotocisternas/sensu-community-plugins,nagas/sensu-community-plugins,cotocisternas/sensu-community-plugins,klangrud/sensu-community-plugins,PerfectMemory/sensu-community-plugins,jennytoo/sensu-community-plugins,cmattoon/sensu-community-plugins,PerfectMemory/sensu-community-plugins,cread/sensu-community-plugins,leedm777/sensu-community-plugins,leedm777/sensu-community-plugins,warmfusion/sensu-community-plugins,gferguson-gd/sensu-community-plugins,pkaeding/sensu-community-plugins,estately/sensu-community-plugins,shnmorimoto/sensu-community-plugins,himyouten/sensu-community-plugins,gferguson-gd/sensu-community-plugins,estately/sensu-community-plugins,circleback/sensu-community-plugins,ideais/sensu-community-plugins,julienba/sensu-community-plugins,petere/sensu-community-plugins,maoe/sensu-community-plugins,Squarespace/sensu-community-plugins,alertlogic/sensu-community-plugins,cmattoon/sensu-community-plugins,emillion/sensu-community-plugins,madAndroid/sensu-community-plugins,luisdalves/sensu-community-plugins,intoximeters/sensu-community-plugins,cread/sensu-community-plugins,intoximeters/sensu-community-plugins,luisdalves/sensu-community-plugins,nilroy/sensu-community-plugins,loveholidays/sensu-plugins,FlorinAndrei/sensu-community-plugins,reevoo/sensu-community-plugins,julienba/sensu-community-plugins,royalj/sensu-community-plugins,nagas/sensu-community-plugins,warmfusion/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,jbehrends/sensu-community-plugins,madAndroid/sensu-community-plugins,maoe/sensu-community-plugins,ideais/sensu-community-plugins,loveholidays/sensu-plugins,maoe/sensu-community-plugins,lfdesousa/sensu-community-plugins,tuenti/sensu-community-plugins,royalj/sensu-community-plugins,JonathanHuot/sensu-community-plugins,pkaeding/sensu-community-plugins,nagas/sensu-community-plugins,justanshulsharma/sensu-community-plugins,emillion/sensu-community-plugins,tuenti/sensu-community-plugins,reevoo/sensu-community-plugins,leedm777/sensu-community-plugins,thehyve/sensu-community-plugins,khuongdp/sensu-community-plugins,alertlogic/sensu-community-plugins,jennytoo/sensu-community-plugins,new23d/sensu-community-plugins,klangrud/sensu-community-plugins,intoximeters/sensu-community-plugins,ideais/sensu-community-plugins,new23d/sensu-community-plugins,petere/sensu-community-plugins,emillion/sensu-community-plugins,luisdalves/sensu-community-plugins,plasticbrain/sensu-community-plugins,estately/sensu-community-plugins,leedm777/sensu-community-plugins,nilroy/sensu-community-plugins,zerOnepal/sensu-community-plugins,julienba/sensu-community-plugins,JonathanHuot/sensu-community-plugins,plasticbrain/sensu-community-plugins,Seraf/sensu-community-plugins,justanshulsharma/sensu-community-plugins,giorgiosironi/sensu-community-plugins,cmattoon/sensu-community-plugins,luisdalves/sensu-community-plugins,gferguson-gd/sensu-community-plugins,lfdesousa/sensu-community-plugins,cread/sensu-community-plugins,cread/sensu-community-plugins,giorgiosironi/sensu-community-plugins,estately/sensu-community-plugins,khuongdp/sensu-community-plugins,jbehrends/sensu-community-plugins,PerfectMemory/sensu-community-plugins,mecavity/sensu-community-plugins,emillion/sensu-community-plugins,intoximeters/sensu-community-plugins,circleback/sensu-community-plugins,alertlogic/sensu-community-plugins,thehyve/sensu-community-plugins,zerOnepal/sensu-community-plugins,loveholidays/sensu-plugins,zerOnepal/sensu-community-plugins,cotocisternas/sensu-community-plugins,shnmorimoto/sensu-community-plugins,circleback/sensu-community-plugins,shnmorimoto/sensu-community-plugins,plasticbrain/sensu-community-plugins,madAndroid/sensu-community-plugins,warmfusion/sensu-community-plugins,alexhjlee/sensu-community-plugins,reevoo/sensu-community-plugins,circleback/sensu-community-plugins,himyouten/sensu-community-plugins,aryeguy/sensu-community-plugins,giorgiosironi/sensu-community-plugins,lenfree/sensu-community-plugins,jennytoo/sensu-community-plugins,jbehrends/sensu-community-plugins,alexhjlee/sensu-community-plugins,mecavity/sensu-community-plugins,lenfree/sensu-community-plugins,mecavity/sensu-community-plugins,petere/sensu-community-plugins,justanshulsharma/sensu-community-plugins,khuongdp/sensu-community-plugins
|
Add plugin for Nova server state metrics
|
#!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add plugin for Nova server state metrics<commit_after>
|
#!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
|
Add plugin for Nova server state metrics#!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add plugin for Nova server state metrics<commit_after>#!/usr/bin/env python
from argparse import ArgumentParser
import socket
import time
from novaclient.v3 import Client
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-u', '--user', default='admin')
parser.add_argument('-p', '--password', default='admin')
parser.add_argument('-t', '--tenant', default='admin')
parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)
servers = client.servers.list()
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
|
|
9aeb9d35cd49ccd7ab1ede87d70666e34b80320c
|
readthedocs/rtd_tests/tests/test_core_management.py
|
readthedocs/rtd_tests/tests/test_core_management.py
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
Add tests for run docker mgmt command
|
Add tests for run docker mgmt command
|
Python
|
mit
|
Tazer/readthedocs.org,rtfd/readthedocs.org,soulshake/readthedocs.org,davidfischer/readthedocs.org,takluyver/readthedocs.org,sid-kap/readthedocs.org,sunnyzwh/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,takluyver/readthedocs.org,espdev/readthedocs.org,sils1297/readthedocs.org,wijerasa/readthedocs.org,KamranMackey/readthedocs.org,sils1297/readthedocs.org,agjohnson/readthedocs.org,fujita-shintaro/readthedocs.org,asampat3090/readthedocs.org,sunnyzwh/readthedocs.org,VishvajitP/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,fujita-shintaro/readthedocs.org,mrshoki/readthedocs.org,atsuyim/readthedocs.org,attakei/readthedocs-oauth,istresearch/readthedocs.org,nikolas/readthedocs.org,CedarLogic/readthedocs.org,Tazer/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,attakei/readthedocs-oauth,nikolas/readthedocs.org,stevepiercy/readthedocs.org,techtonik/readthedocs.org,kenshinthebattosai/readthedocs.org,mrshoki/readthedocs.org,LukasBoersma/readthedocs.org,singingwolfboy/readthedocs.org,kenshinthebattosai/readthedocs.org,d0ugal/readthedocs.org,agjohnson/readthedocs.org,dirn/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,wijerasa/readthedocs.org,clarkperkins/readthedocs.org,titiushko/readthedocs.org,GovReady/readthedocs.org,soulshake/readthedocs.org,asampat3090/readthedocs.org,wanghaven/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,tddv/readthedocs.org,hach-que/readthedocs.org,Carreau/readthedocs.org,jerel/readthedocs.org,espdev/readthedocs.org,LukasBoersma/readthedocs.org,cgourlay/readthedocs.org,Carreau/readthedocs.org,safwanrahman/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,laplaceliu/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,rtfd/readthedocs.org,CedarLogic/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,tddv/readthedocs.org,gjtorikian/readthedocs.org,raven47git/readthedocs.org,d0ugal/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,stevepiercy/readthedocs.org,Tazer/readthedocs.org,GovReady/readthedocs.org,espdev/readthedocs.org,takluyver/readthedocs.org,Carreau/readthedocs.org,d0ugal/readthedocs.org,KamranMackey/readthedocs.org,jerel/readthedocs.org,KamranMackey/readthedocs.org,mhils/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,cgourlay/readthedocs.org,jerel/readthedocs.org,singingwolfboy/readthedocs.org,SteveViss/readthedocs.org,mrshoki/readthedocs.org,Carreau/readthedocs.org,CedarLogic/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,kdkeyser/readthedocs.org,michaelmcandrew/readthedocs.org,kenwang76/readthedocs.org,d0ugal/readthedocs.org,emawind84/readthedocs.org,wijerasa/readthedocs.org,royalwang/readthedocs.org,sunnyzwh/readthedocs.org,sid-kap/readthedocs.org,davidfischer/readthedocs.org,emawind84/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,emawind84/readthedocs.org,atsuyim/readthedocs.org,LukasBoersma/readthedocs.org,wijerasa/readthedocs.org,wanghaven/readthedocs.org,royalwang/readthedocs.org,tddv/readthedocs.org,singingwolfboy/readthedocs.org,jerel/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,mhils/readthedocs.org,SteveViss/readthedocs.org,safwanrahman/readthedocs.org,emawind84/readthedocs.org,mhils/readthedocs.org,sid-kap/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,kenshinthebattosai/readthedocs.org,agjohnson/readthedocs.org,raven47git/readthedocs.org,mrshoki/readthedocs.org,kenwang76/readthedocs.org,singingwolfboy/readthedocs.org,hach-que/readthedocs.org,clarkperkins/readthedocs.org,agjohnson/readthedocs.org,attakei/readthedocs-oauth,dirn/readthedocs.org,wanghaven/readthedocs.org,VishvajitP/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,gjtorikian/readthedocs.org,royalwang/readthedocs.org,clarkperkins/readthedocs.org,istresearch/readthedocs.org,titiushko/readthedocs.org,mhils/readthedocs.org,sils1297/readthedocs.org,takluyver/readthedocs.org,sunnyzwh/readthedocs.org,dirn/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,michaelmcandrew/readthedocs.org,asampat3090/readthedocs.org,soulshake/readthedocs.org,techtonik/readthedocs.org,hach-que/readthedocs.org,atsuyim/readthedocs.org,titiushko/readthedocs.org,nikolas/readthedocs.org,attakei/readthedocs-oauth,asampat3090/readthedocs.org,GovReady/readthedocs.org,stevepiercy/readthedocs.org,gjtorikian/readthedocs.org,kdkeyser/readthedocs.org,cgourlay/readthedocs.org,nikolas/readthedocs.org,safwanrahman/readthedocs.org,KamranMackey/readthedocs.org,atsuyim/readthedocs.org,cgourlay/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,sils1297/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,LukasBoersma/readthedocs.org,raven47git/readthedocs.org,dirn/readthedocs.org
|
Add tests for run docker mgmt command
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
<commit_before><commit_msg>Add tests for run docker mgmt command<commit_after>
|
from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
Add tests for run docker mgmt commandfrom StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
<commit_before><commit_msg>Add tests for run docker mgmt command<commit_after>from StringIO import StringIO
from django.test import TestCase
from mock import patch
from core.management.commands import run_docker
from projects.models import Project
from builds.models import Version
class TestRunDocker(TestCase):
'''Test run_docker command with good input and output'''
fixtures = ['test_data']
def setUp(self):
self.project = Project.objects.get(slug='pip')
self.version = Version(slug='foo', verbose_name='foobar')
self.project.versions.add(self.version)
def _get_input(self, files=None):
return ('{"project": {"id": 6, "name": "Pip", "slug": "pip"},'
'"id": 71, "type": "tag", "identifier": "437fb316fbbdba1acdd22e07dbe7c4809ffd97e6",'
'"verbose_name": "stable", "slug": "stable"}')
def _docker_build(data):
if isinstance(data, Version):
return {'html': (0, 'DOCKER PASS', '')}
else:
return {'html': (1, '', 'DOCKER FAIL')}
def test_stdin(self):
'''Test docker build command'''
def _input(_, files=None):
return '{"test": "foobar"}'
with patch.object(run_docker.Command, '_get_input', _input):
cmd = run_docker.Command()
assert cmd._get_input() == '{"test": "foobar"}'
@patch.object(run_docker.Command, '_get_input', _get_input)
@patch('projects.tasks.docker_build', _docker_build)
@patch('sys.stdout', new_callable=StringIO)
def test_good_input(self, mock_output):
'''Test docker build command'''
cmd = run_docker.Command()
self.assertEqual(cmd._get_input(), self._get_input())
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
'{"html": [0, "DOCKER PASS", ""]}\n'
)
@patch('projects.tasks.docker_build', _docker_build)
def test_bad_input(self):
'''Test docker build command'''
with patch.object(run_docker.Command, '_get_input') as mock_input:
with patch('sys.stdout', new_callable=StringIO) as mock_output:
mock_input.return_value = 'BAD JSON'
cmd = run_docker.Command()
cmd.handle()
self.assertEqual(
mock_output.getvalue(),
('{"doc_builder": '
'[-1, "", "ValueError: No JSON object could be decoded"]}'
'\n')
)
|
|
cbaf4e86c4409735a8f011f5a8f801a34278c21c
|
src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py
|
src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
|
Increase text index property size
|
Increase text index property size
The index property size should support strings of the same length as
custom attribute definition titles.
|
Python
|
apache-2.0
|
VinnieJohns/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,plamut/ggrc-core
|
Increase text index property size
The index property size should support strings of the same length as
custom attribute definition titles.
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
|
<commit_before><commit_msg>Increase text index property size
The index property size should support strings of the same length as
custom attribute definition titles.<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
|
Increase text index property size
The index property size should support strings of the same length as
custom attribute definition titles.# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
|
<commit_before><commit_msg>Increase text index property size
The index property size should support strings of the same length as
custom attribute definition titles.<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update fulltext index.
Create Date: 2017-01-12 01:37:16.801973
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '421b2179c02e'
down_revision = '177a979b230a'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=64),
type_=sa.String(length=250),
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"fulltext_record_properties",
"property",
existing_type=sa.String(length=250),
type_=sa.String(length=64),
nullable=False
)
|
|
241ac6d844febf829f6442897ebf547a291e5db4
|
summarization/summarization.py
|
summarization/summarization.py
|
import indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
|
Add Summarization API code for blog post
|
Add Summarization API code for blog post
|
Python
|
mit
|
IndicoDataSolutions/SuperCell
|
Add Summarization API code for blog post
|
import indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
|
<commit_before><commit_msg>Add Summarization API code for blog post<commit_after>
|
import indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
|
Add Summarization API code for blog postimport indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
|
<commit_before><commit_msg>Add Summarization API code for blog post<commit_after>import indicoio
import csv
indicoio.config.api_key = 'YOUR_API_KEY'
def clean_article(article):
return article.replace("\n", " ").decode('cp1252').encode('utf-8', 'replace')
def clean_articles(article_list):
# data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data
# put all articles into a list for easy batch processing
cleaned_articles = [clean_article(text) for row in article_list for text in row]
print "Articles cleaned and ready for batch processing!"
return cleaned_articles
def get_summary(cleaned_articles):
# get article summaries
summary = [indicoio.summarization(item) for item in cleaned_articles]
# clean up result for easy readability
print "Here are the summaries for all %d articles:" % (len(summary))
for line in summary:
print "\n" + " ".join(line)
if __name__ == "__main__":
with open('articles.csv', 'rU') as f:
article_list = csv.reader(f)
cleaned_articles = clean_articles(article_list)
get_summary(cleaned_articles)
|
|
bb43a2e63f7f7c337b01ef855d426a84b73eeee5
|
telemeta/management/commands/telemeta-export-items-from-user-playlists.py
|
telemeta/management/commands/telemeta-export-items-from-user-playlists.py
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
|
Add a command prototype to list all items from a playlist
|
Add a command prototype to list all items from a playlist
|
Python
|
agpl-3.0
|
Parisson/Telemeta,Parisson/Telemeta,Parisson/Telemeta,Parisson/Telemeta
|
Add a command prototype to list all items from a playlist
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
|
<commit_before><commit_msg>Add a command prototype to list all items from a playlist<commit_after>
|
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
|
Add a command prototype to list all items from a playlistfrom optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
|
<commit_before><commit_msg>Add a command prototype to list all items from a playlist<commit_after>from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.utils import translation
from telemeta.models import Playlist, MediaCollection, MediaItem
class Command(BaseCommand):
help = "Export media files from playlists of a given user"
args = "username"
def handle(self, *args, **options):
username = args[0]
user = User.objects.get(username=username)
playlists = user.playlists.all()
items = []
for playlist in playlists:
resources = playlist.resources.all()
for resource in resources:
if resource.resource_type == 'collection':
collection = MediaCollection.objects.get(id=resource.resource_id)
for item in collection.items.all():
items.append(item)
elif resource.resource_type == 'item':
item = MediaItem.objects.get(id=resource.resource_id)
items.append(item)
print(items)
|
|
a62dc18745f952b3fcb05ddf4768758e25883698
|
accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py
|
accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
|
Add datamigration to create staff clearances
|
[AC-6516] Add datamigration to create staff clearances
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-6516] Add datamigration to create staff clearances
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
|
<commit_before><commit_msg>[AC-6516] Add datamigration to create staff clearances<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
|
[AC-6516] Add datamigration to create staff clearances# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
|
<commit_before><commit_msg>[AC-6516] Add datamigration to create staff clearances<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-12 19:38
from __future__ import unicode_literals
from django.db import migrations
STAFF = "Staff" # don't import from models in migrations.
def grant_staff_clearances_for_role_grantees(apps, program_role):
Clearance = apps.get_model('accelerator', 'Clearance')
program_family = program_role.program.program_family
user_ids = program_role.programrolegrant_set.values_list(
"person_id", flat=True)
for user_id in user_ids:
Clearance.objects.get_or_create(
user_id=user_id,
program_family=program_family,
defaults={"level": STAFF})
def grant_clearances_for_mc_staff_users(apps, schema_editor):
ProgramRole = apps.get_model('accelerator', "ProgramRole")
for program_role in ProgramRole.objects.filter(
user_role__name=STAFF):
grant_staff_clearances_for_role_grantees(apps, program_role)
def revoke_staff_clearances(apps, schema_editor):
Clearance = apps.get_model("accelerator", "Clearance")
Clearance.objects.filter(level=STAFF).delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_add_clearance_level_staff'),
]
operations = [
migrations.RunPython(
grant_clearances_for_mc_staff_users,
revoke_staff_clearances)
]
|
|
2d18583309a189e263bda13e19f7a05ba832c14d
|
backend/scripts/templates/templates2file.py
|
backend/scripts/templates/templates2file.py
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
|
Add file to write templates to json
|
Add file to write templates to json
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add file to write templates to json
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
|
<commit_before><commit_msg>Add file to write templates to json<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
|
Add file to write templates to json#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
|
<commit_before><commit_msg>Add file to write templates to json<commit_after>#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import json
import os
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
templates = list(r.table('templates').run(conn))
try:
os.mkdir('/tmp/templates')
except:
pass
for template in templates:
try:
with open("/tmp/templates/{}".format(template['name']), 'w') as out:
json.dump(template, out, indent=4)
except:
pass
|
|
6df873a26ff71b07e68dcb2e9fa9c4b1725a70ce
|
src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py
|
src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
|
Add migration for expert bid
|
Add migration for expert bid [WAL-976]
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur
|
Add migration for expert bid [WAL-976]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
|
<commit_before><commit_msg>Add migration for expert bid [WAL-976]<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
|
Add migration for expert bid [WAL-976]# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
|
<commit_before><commit_msg>Add migration for expert bid [WAL-976]<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-07 15:09
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import nodeconductor.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0052_customer_subnets'),
('experts', '0002_expertrequest'),
]
operations = [
migrations.CreateModel(
name='ExpertBid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', nodeconductor.core.fields.UUIDField()),
('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=[django.core.validators.MinValueValidator(Decimal('0'))])),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),
],
options={
'abstract': False,
},
),
]
|
|
8e983472134817c1312e3713ca45c7359300dedf
|
academics/management/commands/set_student_current.py
|
academics/management/commands/set_student_current.py
|
#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True)
|
Set students current flag based on enrolled and attending
|
Set students current flag based on enrolled and attending
|
Python
|
mit
|
rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps,rectory-school/rectory-apps
|
Set students current flag based on enrolled and attending
|
#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True)
|
<commit_before><commit_msg>Set students current flag based on enrolled and attending<commit_after>
|
#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True)
|
Set students current flag based on enrolled and attending#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True)
|
<commit_before><commit_msg>Set students current flag based on enrolled and attending<commit_after>#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from academics.models import Student, Enrollment, AcademicYear
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import reset student's current status"
def handle(self, *args, **kwargs):
logger.info("Beginning student status reset routing")
with transaction.atomic():
Student.objects.update(current=False)
current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment="Enrolled", status_attending="Attending")
current_students = Student.objects.filter(enrollment__in=current_enrollments)
current_students.update(current=True)
|
|
b0d50f52f45d8f1c7de261c7fe8d15e621d0e641
|
scripts/theanets-untie.py
|
scripts/theanets-untie.py
|
#!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
|
Add a script to "untie" tied model weights.
|
Add a script to "untie" tied model weights.
Might need some more work in updating "layers" parameter.
Could help address issue #39.
|
Python
|
mit
|
lmjohns3/theanets,devdoer/theanets,chrinide/theanets
|
Add a script to "untie" tied model weights.
Might need some more work in updating "layers" parameter.
Could help address issue #39.
|
#!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a script to "untie" tied model weights.
Might need some more work in updating "layers" parameter.
Could help address issue #39.<commit_after>
|
#!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
|
Add a script to "untie" tied model weights.
Might need some more work in updating "layers" parameter.
Could help address issue #39.#!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a script to "untie" tied model weights.
Might need some more work in updating "layers" parameter.
Could help address issue #39.<commit_after>#!/usr/bin/env python
import climate
import cPickle as pickle
import gzip
import numpy as np
logging = climate.get_logger('theanets-untie')
@climate.annotate(
source='load a saved network from FILE',
target='save untied network weights to FILE',
)
def main(source, target):
opener = gzip.open if source.endswith('.gz') else open
p = pickle.load(opener(source))
logging.info('read from %s:', source)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
p['weights'].extend(0 + w.T for w in p['weights'][::-1])
p['biases'].extend(-b for b in p['biases'][-2::-1])
p['biases'].append(np.zeros(
(len(p['weights'][0]), ), p['biases'][0].dtype))
logging.info('writing to %s:', target)
for w, b in zip(p['weights'], p['biases']):
logging.info('weights %s bias %s %s', w.shape, b.shape, b.dtype)
opener = gzip.open if target.endswith('.gz') else open
pickle.dump(p, opener(target, 'wb'), -1)
if __name__ == '__main__':
climate.call(main)
|
|
339fdd927f9da0f7e15726d087c9916301aef935
|
softMarginSVMwithKernels/howItWorksSoftMarginSVM.py
|
softMarginSVMwithKernels/howItWorksSoftMarginSVM.py
|
# -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
|
Add soft margin SVM and added kernels and class
|
Add soft margin SVM and added kernels and class
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add soft margin SVM and added kernels and class
|
# -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
|
<commit_before><commit_msg>Add soft margin SVM and added kernels and class<commit_after>
|
# -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
|
Add soft margin SVM and added kernels and class# -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
|
<commit_before><commit_msg>Add soft margin SVM and added kernels and class<commit_after># -*- coding: utf-8 -*-
"""Soft Margin SVM classification with kernels for machine learning.
Soft margin SVM is basically an SVM (see folder **supportVectorMachine**) which
has some 'slack' and allows features to be 'wrongly' classified to avoid
overfitting the classifier. This also includes kernels. Kernels use the inner
product to help us transform the feature space to make it possible for Support
Vector Machines to create a good hyperplane with non-linear feature sets.
I basically just do the 'from scratch' in this part because all this can easily
be done by just adding some parameters to sklearn's svm.SVC().
Example:
$ python howItWorksSoftMarginSVM.py.py
Todo:
*
"""
import numpy as np
from numpy import linalg
# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will
# just use a library for it now because it's simpler.
import cvxopt
import cvxopt.solvers
def linear_kernel(x1, x2):
"""Linear kernel function.
if this kernel is used then the decision boundary hyperplane will have a
linear form.
"""
return np.dot(x1, x2)
def polynomial_kernel(x, y, p=3):
"""Polynomial kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Polynomial form.
"""
return (1 + np.dot(x, y))**p
def gaussian_kernel(x, y, sigma=5.0):
"""Gaussian kernel function.
if this kernel is used then the decision boundary hyperplane will have a
Gaussian form.
"""
return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))
class SVM(object):
"""Support Vector Machine (SVM) class.
This class is for creating an instance of a SVM. To avoid retraining or
refitting (as it's also called) every time it is used.
"""
def __init__(self, kernel=linear_kernel, C=None):
"""The __init__ method of the SVM class.
Args:
kernel (function name): The kernel that will be used.
Default linear kernel.
C: the max sum of all the distances of the features that are
wrongly classified during fitting/training. Default is 'None', if C is
None then it's a hard margin SVM with no slack.
"""
self.kernel = kernel
self.C = C
if self.C is not None:
self.C = float(self.C)
|
|
8601790648a17dd1794be4f88d61e4af01349a80
|
tests/test_pipeline_chipseq.py
|
tests/test_pipeline_chipseq.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test for the chipseq pipeline code
|
Test for the chipseq pipeline code
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq
|
Test for the chipseq pipeline code
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test for the chipseq pipeline code<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test for the chipseq pipeline code"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test for the chipseq pipeline code<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_chipseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = [
resource_path + 'macs2.Human.GCA_000001405.22.fasta',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',
resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',
resource_path + 'macs2.Human.DRR000150.22.fastq',
None
]
metadata = {
'assembly' : 'GRCh38',
'expt_name' : 'macs.Human.SRR1658573'
}
chipseq_handle = process_chipseq()
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, [])
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
|
4155d6ca5db149d8b213cc4078580fc2e85d7f4d
|
vinotes/apps/api/migrations/0002_auto_20150325_1104.py
|
vinotes/apps/api/migrations/0002_auto_20150325_1104.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Migrate database for model changes.
|
Migrate database for model changes.
|
Python
|
unlicense
|
rcutmore/vinotes-api,rcutmore/vinotes-api
|
Migrate database for model changes.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Migrate database for model changes.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Migrate database for model changes.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Migrate database for model changes.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='wine',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='winery',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
|
7ab37e931a836faa78a78f5d8358d845f72cdf49
|
point/gemini_cmd.py
|
point/gemini_cmd.py
|
#!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
|
Add low level Gemini serial command script
|
Add low level Gemini serial command script
|
Python
|
mit
|
bgottula/point
|
Add low level Gemini serial command script
|
#!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add low level Gemini serial command script<commit_after>
|
#!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
|
Add low level Gemini serial command script#!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add low level Gemini serial command script<commit_after>#!/usr/bin/env python3
"""
A simple script for sending raw serial commands to Gemini.
"""
import time
import serial
import readline
def main():
ser = serial.Serial('/dev/ttyACM0', baudrate=9600)
while True:
cmd = input('> ')
if len(cmd) == 0:
continue
# losmandy native commands -- add checksum
if cmd[0] == '<' or cmd[0] == '>':
if ':' not in cmd:
print("Rejected: Native command must contain a ':' character")
continue
checksum = 0
for c in cmd:
checksum = checksum ^ ord(c)
checksum %= 128
checksum += 64
cmd = cmd + chr(checksum) + '#'
print('Native command: ' + cmd)
# LX200 command format
elif cmd[0] == ':':
print('LX200 command: ' + cmd)
pass
else:
print("Rejected: Must start with ':', '<', or '>'")
continue
ser.write(cmd.encode())
time.sleep(0.1)
reply = ser.read(ser.in_waiting).decode()
if len(reply) > 0:
print('reply: ' + reply)
if __name__ == "__main__":
main()
|
|
55fa30c236095006e6f9c970ef668598c4348a96
|
src/satosa/micro_service/attribute_modifications.py
|
src/satosa/micro_service/attribute_modifications.py
|
import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
|
Add microservice plugin for adding static attributes to responses.
|
Add microservice plugin for adding static attributes to responses.
|
Python
|
apache-2.0
|
irtnog/SATOSA,irtnog/SATOSA,SUNET/SATOSA,its-dirg/SATOSA,SUNET/SATOSA
|
Add microservice plugin for adding static attributes to responses.
|
import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
|
<commit_before><commit_msg>Add microservice plugin for adding static attributes to responses.<commit_after>
|
import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
|
Add microservice plugin for adding static attributes to responses.import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
|
<commit_before><commit_msg>Add microservice plugin for adding static attributes to responses.<commit_after>import os
import yaml
from satosa.internal_data import DataConverter
from satosa.micro_service.service_base import ResponseMicroService
class AddStaticAttributes(ResponseMicroService):
"""
Add static attributes to the responses.
The path to the file describing the mapping (as YAML) of static attributes must be specified
with the environment variable 'SATOSA_STATIC_ATTRIBUTES'.
"""
def __init__(self, internal_attributes):
super(AddStaticAttributes, self).__init__()
self.data_converter = DataConverter(internal_attributes)
mapping_file = os.environ.get("SATOSA_STATIC_ATTRIBUTES")
if not mapping_file:
raise ValueError("Could not find file containing mapping of static attributes.")
with open(mapping_file) as f:
self.static_attributes = yaml.safe_load(f)
def process(self, context, data):
all_attributes = data.get_attributes()
all_attributes.update(self.data_converter.to_internal("saml", self.static_attributes))
data.add_attributes(all_attributes)
return data
|
|
94f5f630c315bc6951c98cd2a9f4908ce05d59a4
|
fedmsg/tests/test_encoding.py
|
fedmsg/tests/test_encoding.py
|
import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
|
Test float precision in json encoding.
|
Test float precision in json encoding.
|
Python
|
lgpl-2.1
|
cicku/fedmsg,mathstuf/fedmsg,fedora-infra/fedmsg,maxamillion/fedmsg,fedora-infra/fedmsg,chaiku/fedmsg,vivekanand1101/fedmsg,pombredanne/fedmsg,chaiku/fedmsg,pombredanne/fedmsg,vivekanand1101/fedmsg,maxamillion/fedmsg,mathstuf/fedmsg,fedora-infra/fedmsg,cicku/fedmsg,mathstuf/fedmsg,chaiku/fedmsg,cicku/fedmsg,pombredanne/fedmsg,vivekanand1101/fedmsg,maxamillion/fedmsg
|
Test float precision in json encoding.
|
import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
|
<commit_before><commit_msg>Test float precision in json encoding.<commit_after>
|
import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
|
Test float precision in json encoding.import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
|
<commit_before><commit_msg>Test float precision in json encoding.<commit_after>import unittest
import fedmsg.encoding
from nose.tools import eq_
class TestEncoding(unittest.TestCase):
def test_float_precision(self):
""" Ensure that float precision is limited to 3 decimal places. """
msg = dict(some_number=1234.123456)
json_str = fedmsg.encoding.dumps(msg)
print json_str
output = fedmsg.encoding.loads(json_str)
eq_(str(output['some_number']), '1234.123')
|
|
dfb4c5422c79fcd413d0d9a028cb5548e2678454
|
generate_test_certificates.py
|
generate_test_certificates.py
|
import trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
|
Add script for generating test certificates
|
Add script for generating test certificates
|
Python
|
mit
|
datatheorem/TrustKit,datatheorem/TrustKit,datatheorem/TrustKit,datatheorem/TrustKit
|
Add script for generating test certificates
|
import trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
|
<commit_before><commit_msg>Add script for generating test certificates<commit_after>
|
import trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
|
Add script for generating test certificatesimport trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
|
<commit_before><commit_msg>Add script for generating test certificates<commit_after>import trustme
# Create a CA
ca = trustme.CA()
# Issue a cert signed by this CA
server_cert = ca.issue_cert(u"www.good.com")
# Save the PEM-encoded data to a file
ca.cert_pem.write_to_path("GoodRootCA.pem")
server_cert.private_key_and_cert_chain_pem.write_to_path("www.good.com.pem")
|
|
00cdcceb131814b24546c36810682ed78ba866c6
|
pyfwk/struc/dbcol.py
|
pyfwk/struc/dbcol.py
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
Create database column class (DBCol)
|
Create database column class (DBCol)
|
Python
|
mit
|
rlinguri/pyfwk
|
Create database column class (DBCol)
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create database column class (DBCol)<commit_after>
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
Create database column class (DBCol)#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create database column class (DBCol)<commit_after>#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
|
51466e360320267afab41704caecebac0dff1dc2
|
src/example/bench_wsh.py
|
src/example/bench_wsh.py
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
Add a handler for performing client load testing.
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
git-svn-id: a751b5b3dcfba0ee4592a85c40d2fdd063ca0d53@379 4ff78f4a-9131-11de-b045-6380ec9940d4
|
Python
|
bsd-3-clause
|
XiaonuoGantan/pywebsocket,XiaonuoGantan/pywebsocket
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
git-svn-id: a751b5b3dcfba0ee4592a85c40d2fdd063ca0d53@379 4ff78f4a-9131-11de-b045-6380ec9940d4
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
<commit_before><commit_msg>Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
git-svn-id: a751b5b3dcfba0ee4592a85c40d2fdd063ca0d53@379 4ff78f4a-9131-11de-b045-6380ec9940d4<commit_after>
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
git-svn-id: a751b5b3dcfba0ee4592a85c40d2fdd063ca0d53@379 4ff78f4a-9131-11de-b045-6380ec9940d4# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
<commit_before><commit_msg>Add a handler for performing client load testing.
Review URL: http://codereview.appspot.com/3844044
git-svn-id: a751b5b3dcfba0ee4592a85c40d2fdd063ca0d53@379 4ff78f4a-9131-11de-b045-6380ec9940d4<commit_after># Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
|
f531eb7d1734d6d715893356a50d11eee6bc009a
|
corehq/apps/users/tests/forms.py
|
corehq/apps/users/tests/forms.py
|
from collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
Test mobile set password form
|
Test mobile set password form
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Test mobile set password form
|
from collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
<commit_before><commit_msg>Test mobile set password form<commit_after>
|
from collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
Test mobile set password formfrom collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
<commit_before><commit_msg>Test mobile set password form<commit_after>from collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
|
86c2441be14dbc3303b0bc65356372728a62fd4a
|
test/performance.py
|
test/performance.py
|
from contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
Add infrastructure for counting database queries
|
Add infrastructure for counting database queries
|
Python
|
agpl-3.0
|
terceiro/squad,terceiro/squad,terceiro/squad,terceiro/squad
|
Add infrastructure for counting database queries
|
from contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
<commit_before><commit_msg>Add infrastructure for counting database queries<commit_after>
|
from contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
Add infrastructure for counting database queriesfrom contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
<commit_before><commit_msg>Add infrastructure for counting database queries<commit_after>from contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
|
b5568053325bd78c277d4bc0adff59cd12e10f48
|
build-plugin.py
|
build-plugin.py
|
import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
Add a script to build plugin.
|
Add a script to build plugin.
|
Python
|
mit
|
qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv
|
Add a script to build plugin.
|
import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
<commit_before><commit_msg>Add a script to build plugin.<commit_after>
|
import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
Add a script to build plugin.import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
<commit_before><commit_msg>Add a script to build plugin.<commit_after>import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
|
21d931e35d9e0b32415a408f28e45894f0c3e800
|
django_backend_test/noras_menu/tasks.py
|
django_backend_test/noras_menu/tasks.py
|
# -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
Add task files for celery async process
|
Add task files for celery async process
|
Python
|
mit
|
semorale/backend-test,semorale/backend-test,semorale/backend-test
|
Add task files for celery async process
|
# -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
<commit_before><commit_msg>Add task files for celery async process<commit_after>
|
# -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
Add task files for celery async process# -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
<commit_before><commit_msg>Add task files for celery async process<commit_after># -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
|
849a29b22d656c8079b4ccaf922848fb057c80c5
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
Add migration to assign appropriate sheets to Transnational CountryRegion
|
Add migration to assign appropriate sheets to Transnational CountryRegion
|
Python
|
apache-2.0
|
Code4SA/gmmp,Code4SA/gmmp,Code4SA/gmmp
|
Add migration to assign appropriate sheets to Transnational CountryRegion
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
<commit_before><commit_msg>Add migration to assign appropriate sheets to Transnational CountryRegion<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
Add migration to assign appropriate sheets to Transnational CountryRegion# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
<commit_before><commit_msg>Add migration to assign appropriate sheets to Transnational CountryRegion<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
|
eeb9b9877f1aa5bc1f22ac4883fe58a57ee0474a
|
scripts/test_hots.py
|
scripts/test_hots.py
|
import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
Add script to test HOTS
|
Add script to test HOTS
Add Python script to test Higher-Order Encoder Time Stamping with
sampled data.
|
Python
|
bsd-2-clause
|
oliverlee/phobos,oliverlee/phobos,oliverlee/phobos,oliverlee/phobos
|
Add script to test HOTS
Add Python script to test Higher-Order Encoder Time Stamping with
sampled data.
|
import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
<commit_before><commit_msg>Add script to test HOTS
Add Python script to test Higher-Order Encoder Time Stamping with
sampled data.<commit_after>
|
import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
Add script to test HOTS
Add Python script to test Higher-Order Encoder Time Stamping with
sampled data.import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
<commit_before><commit_msg>Add script to test HOTS
Add Python script to test Higher-Order Encoder Time Stamping with
sampled data.<commit_after>import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
|
ee169acf82eff08daa40c461263712f2af2a1131
|
scripts/simulate.py
|
scripts/simulate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
Add a standalone simulation script (really a duplicate of sensitivity.py)
|
Add a standalone simulation script (really a duplicate of sensitivity.py)
|
Python
|
mit
|
chatelak/RMG-Py,pierrelb/RMG-Py,chatelak/RMG-Py,nyee/RMG-Py,pierrelb/RMG-Py,nickvandewiele/RMG-Py,nyee/RMG-Py,nickvandewiele/RMG-Py
|
Add a standalone simulation script (really a duplicate of sensitivity.py)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a standalone simulation script (really a duplicate of sensitivity.py)<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
Add a standalone simulation script (really a duplicate of sensitivity.py)#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a standalone simulation script (really a duplicate of sensitivity.py)<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
|
eeb0187b9d474b9b5d1710e8f45f8116894eb15c
|
temp-sensor02/main.py
|
temp-sensor02/main.py
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
Read Temperature from DS18B20. Post the data to data.sparkfun.com
|
Read Temperature from DS18B20. Post the data to data.sparkfun.com
|
Python
|
mit
|
fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout
|
Read Temperature from DS18B20. Post the data to data.sparkfun.com
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
<commit_before><commit_msg>Read Temperature from DS18B20. Post the data to data.sparkfun.com<commit_after>
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
Read Temperature from DS18B20. Post the data to data.sparkfun.comfrom machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
<commit_before><commit_msg>Read Temperature from DS18B20. Post the data to data.sparkfun.com<commit_after>from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
|
632f71651864517cc977f79dcdac7f3b0f516b49
|
scripts/post_data.py
|
scripts/post_data.py
|
#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
Add example script to post experiment and task data
|
Add example script to post experiment and task data
|
Python
|
agpl-3.0
|
niekas/dakis,niekas/dakis,niekas/dakis
|
Add example script to post experiment and task data
|
#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
<commit_before><commit_msg>Add example script to post experiment and task data<commit_after>
|
#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
Add example script to post experiment and task data#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
<commit_before><commit_msg>Add example script to post experiment and task data<commit_after>#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
|
e04bf5dd12a1f5e28258541dcf9d2eb8c5567ad0
|
tests/lead_price_tests.py
|
tests/lead_price_tests.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
Add tests for lead price
|
Add tests for lead price
|
Python
|
mit
|
Jamil/sabre_dev_studio
|
Add tests for lead price
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for lead price<commit_after>
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
Add tests for lead priceimport unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for lead price<commit_after>import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
|
b7c22cddecb743e9597c92160e3aa0100e149e19
|
tests/model/test_hades.py
|
tests/model/test_hades.py
|
from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
Introduce hades test fixtures and first tests.
|
Introduce hades test fixtures and first tests.
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft
|
Introduce hades test fixtures and first tests.
|
from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
<commit_before><commit_msg>Introduce hades test fixtures and first tests.<commit_after>
|
from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
Introduce hades test fixtures and first tests.from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
<commit_before><commit_msg>Introduce hades test fixtures and first tests.<commit_after>from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
|
d9b4b0d913304b19365854b0ffceab179237d8f8
|
numba/tests/test_floatsyms.py
|
numba/tests/test_floatsyms.py
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
Add tests for float->int symbols (esp for 32-bit windows and linux)
|
Add tests for float->int symbols (esp for 32-bit windows and linux)
|
Python
|
bsd-2-clause
|
numba/numba,stonebig/numba,stefanseefeld/numba,sklam/numba,seibert/numba,pitrou/numba,GaZ3ll3/numba,IntelLabs/numba,pombredanne/numba,stonebig/numba,gdementen/numba,pitrou/numba,pombredanne/numba,pitrou/numba,sklam/numba,ssarangi/numba,numba/numba,cpcloud/numba,stuartarchibald/numba,pombredanne/numba,seibert/numba,jriehl/numba,jriehl/numba,GaZ3ll3/numba,pombredanne/numba,stefanseefeld/numba,jriehl/numba,pombredanne/numba,pitrou/numba,stefanseefeld/numba,numba/numba,GaZ3ll3/numba,cpcloud/numba,gmarkall/numba,gmarkall/numba,cpcloud/numba,IntelLabs/numba,cpcloud/numba,jriehl/numba,ssarangi/numba,seibert/numba,IntelLabs/numba,GaZ3ll3/numba,stuartarchibald/numba,sklam/numba,jriehl/numba,stefanseefeld/numba,GaZ3ll3/numba,gdementen/numba,seibert/numba,gmarkall/numba,pitrou/numba,cpcloud/numba,stefanseefeld/numba,stuartarchibald/numba,IntelLabs/numba,sklam/numba,numba/numba,gmarkall/numba,ssarangi/numba,gdementen/numba,stuartarchibald/numba,gdementen/numba,ssarangi/numba,seibert/numba,ssarangi/numba,numba/numba,sklam/numba,stonebig/numba,stuartarchibald/numba,IntelLabs/numba,stonebig/numba,gmarkall/numba,gdementen/numba,stonebig/numba
|
Add tests for float->int symbols (esp for 32-bit windows and linux)
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for float->int symbols (esp for 32-bit windows and linux)<commit_after>
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
Add tests for float->int symbols (esp for 32-bit windows and linux)from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for float->int symbols (esp for 32-bit windows and linux)<commit_after>from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
|
4f3854eaf8d6e4b0ad9a77e871a946916ab3fec6
|
listings/syndication/migrations/0002_auto__del_unique_feedtype_content_type.py
|
listings/syndication/migrations/0002_auto__del_unique_feedtype_content_type.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FeedType', fields ['content_type']
db.delete_unique('syndication_feedtype', ['content_type'])
def backwards(self, orm):
# Adding unique constraint on 'FeedType', fields ['content_type']
db.create_unique('syndication_feedtype', ['content_type'])
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'application/xml'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
Migrate listings.syndication, FeedType.content_type should not be unique.
|
Migrate listings.syndication, FeedType.content_type should not be unique.
|
Python
|
mit
|
wtrevino/django-listings,wtrevino/django-listings
|
Migrate listings.syndication, FeedType.content_type should not be unique.
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FeedType', fields ['content_type']
db.delete_unique('syndication_feedtype', ['content_type'])
def backwards(self, orm):
# Adding unique constraint on 'FeedType', fields ['content_type']
db.create_unique('syndication_feedtype', ['content_type'])
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'application/xml'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
<commit_before><commit_msg>Migrate listings.syndication, FeedType.content_type should not be unique.<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FeedType', fields ['content_type']
db.delete_unique('syndication_feedtype', ['content_type'])
def backwards(self, orm):
# Adding unique constraint on 'FeedType', fields ['content_type']
db.create_unique('syndication_feedtype', ['content_type'])
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'application/xml'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
Migrate listings.syndication, FeedType.content_type should not be unique.# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FeedType', fields ['content_type']
db.delete_unique('syndication_feedtype', ['content_type'])
def backwards(self, orm):
# Adding unique constraint on 'FeedType', fields ['content_type']
db.create_unique('syndication_feedtype', ['content_type'])
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'application/xml'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
<commit_before><commit_msg>Migrate listings.syndication, FeedType.content_type should not be unique.<commit_after># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'FeedType', fields ['content_type']
db.delete_unique('syndication_feedtype', ['content_type'])
def backwards(self, orm):
# Adding unique constraint on 'FeedType', fields ['content_type']
db.create_unique('syndication_feedtype', ['content_type'])
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'application/xml'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
|
|
c5fba0cc8acb482a0bc1c49ae5187ebc1232dba3
|
tests/test_directions.py
|
tests/test_directions.py
|
import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
|
Add tests for the different input variations.
|
Add tests for the different input variations.
|
Python
|
bsd-3-clause
|
asfaltboy/directions.py,jwass/directions.py,samtux/directions.py
|
Add tests for the different input variations.
|
import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
|
<commit_before><commit_msg>Add tests for the different input variations.<commit_after>
|
import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
|
Add tests for the different input variations.import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
|
<commit_before><commit_msg>Add tests for the different input variations.<commit_after>import unittest
from shapely.geometry import LineString, Point
from directions.base import _parse_points
class DirectionsTest(unittest.TestCase):
def setUp(self):
self.p = [(1,2), (3,4), (5,6), (7,8)]
self.line = LineString(self.p)
def test_origin_dest(self):
result = _parse_points(self.p[0], self.p[-1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_origin_dest_waypoints(self):
result = _parse_points(self.p[0], self.p[-1], self.p[1:-1])
self.assertEqual(self.p, result)
def test_line(self):
result = _parse_points(self.line)
self.assertEqual(self.p, result)
def test_points(self):
p0 = Point(self.line.coords[0])
p1 = Point(self.line.coords[-1])
result = _parse_points(p0, p1)
self.assertEqual([self.p[0], self.p[-1]], result)
def test_points_array(self):
p0 = Point(self.p[0])
p1 = Point(self.p[-1])
result = _parse_points([p0, p1])
self.assertEqual([self.p[0], self.p[-1]], result)
def test_mixed_types(self):
origin = 'blah'
destination = Point(self.p[-1])
points = self.p[1:-1]
expected = list(self.p) # Copy it
expected[0] = 'blah'
result = _parse_points(origin, destination, points)
self.assertEqual(expected, result)
def test_no_dest_waypoints(self):
# Can't specify waypoints without destination
with self.assertRaises(ValueError):
_parse_points('origin', waypoints=['p1'])
def test_bad_input(self):
# Test points not length 2
with self.assertRaises(ValueError):
_parse_points(self.p[0], (1.0, 2.0, 3.0))
|
|
635682c9d206cd9ae6ea184f9361937b0a272b90
|
wqflask/utility/monads.py
|
wqflask/utility/monads.py
|
"""Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
|
Add monadic utilities MonadicDict and MonadicDictCursor.
|
Add monadic utilities MonadicDict and MonadicDictCursor.
* wqflask/utility/monads.py: New file.
|
Python
|
agpl-3.0
|
genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2
|
Add monadic utilities MonadicDict and MonadicDictCursor.
* wqflask/utility/monads.py: New file.
|
"""Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
|
<commit_before><commit_msg>Add monadic utilities MonadicDict and MonadicDictCursor.
* wqflask/utility/monads.py: New file.<commit_after>
|
"""Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
|
Add monadic utilities MonadicDict and MonadicDictCursor.
* wqflask/utility/monads.py: New file."""Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
|
<commit_before><commit_msg>Add monadic utilities MonadicDict and MonadicDictCursor.
* wqflask/utility/monads.py: New file.<commit_after>"""Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
|
|
40154d7de207df9689ac220cc8966735cb3ed5af
|
tests/test_asyncio.py
|
tests/test_asyncio.py
|
import asyncio
async def routine0(s,n):
print('CRT:',s,':',n)
async def routine(id, n):
print('TEST[%s] %d'%(id,n))
if not n:
return
n -= 1
await routine(id, n)
await routine0(id, n)
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(routine('a',5)),
asyncio.ensure_future(routine('b',8))]
print('muf')
loop.run_until_complete(asyncio.wait(tasks))
print('puf')
loop.close()
|
Test asyncio in python 3.6
|
Test asyncio in python 3.6
|
Python
|
apache-2.0
|
theia-log/theia,theia-log/theia
|
Test asyncio in python 3.6
|
import asyncio
async def routine0(s,n):
print('CRT:',s,':',n)
async def routine(id, n):
print('TEST[%s] %d'%(id,n))
if not n:
return
n -= 1
await routine(id, n)
await routine0(id, n)
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(routine('a',5)),
asyncio.ensure_future(routine('b',8))]
print('muf')
loop.run_until_complete(asyncio.wait(tasks))
print('puf')
loop.close()
|
<commit_before><commit_msg>Test asyncio in python 3.6<commit_after>
|
import asyncio
async def routine0(s,n):
print('CRT:',s,':',n)
async def routine(id, n):
print('TEST[%s] %d'%(id,n))
if not n:
return
n -= 1
await routine(id, n)
await routine0(id, n)
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(routine('a',5)),
asyncio.ensure_future(routine('b',8))]
print('muf')
loop.run_until_complete(asyncio.wait(tasks))
print('puf')
loop.close()
|
Test asyncio in python 3.6import asyncio
async def routine0(s,n):
print('CRT:',s,':',n)
async def routine(id, n):
print('TEST[%s] %d'%(id,n))
if not n:
return
n -= 1
await routine(id, n)
await routine0(id, n)
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(routine('a',5)),
asyncio.ensure_future(routine('b',8))]
print('muf')
loop.run_until_complete(asyncio.wait(tasks))
print('puf')
loop.close()
|
<commit_before><commit_msg>Test asyncio in python 3.6<commit_after>import asyncio
async def routine0(s,n):
print('CRT:',s,':',n)
async def routine(id, n):
print('TEST[%s] %d'%(id,n))
if not n:
return
n -= 1
await routine(id, n)
await routine0(id, n)
loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(routine('a',5)),
asyncio.ensure_future(routine('b',8))]
print('muf')
loop.run_until_complete(asyncio.wait(tasks))
print('puf')
loop.close()
|
|
7963e426e2d1f58105d8712c0379114d93d32b07
|
examples/plot_feature_extraction_classification.py
|
examples/plot_feature_extraction_classification.py
|
"""
UMAP as a Feature Extraction Technique for Classification
---------------------------------------------------------
The following script shows how UMAP can be used as a feature extraction
technique to improve the accuracy on a classification task. It also shows
how UMAP can be integrated in standard scikit-learn pipelines.
The first step is to create a dataset for a classification task, which is
performed with the function ``sklearn.datasets.make_classification``. The
dataset is then split into a training set and a test set using the
``sklearn.model_selection.train_test_split`` function.
Second, a linear SVM is fitted on the training set. To choose the best
hyperparameters automatically, a gridsearch is performed on the training set.
The performance of the model is then evaluated on the test set with the
accuracy metric.
Third, the previous step is repeated with a slight modification: UMAP is
used as a feature extraction technique. This small change results in a
substantial improvement compared to the model where raw data is used.
"""
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
# Make a toy dataset
X, y = make_classification(n_samples=1000, n_features=300, n_informative=250,
n_redundant=0, n_repeated=0, n_classes=2,
random_state=1212)
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Classification with a linear SVM
svc = LinearSVC(dual=False, random_state=123)
params_grid = {"C": [10**k for k in range(-3, 4)]}
clf = GridSearchCV(svc, params_grid)
clf.fit(X_train, y_train)
print("Accuracy on the test set with raw data: {:.3f}".format(
clf.score(X_test, y_test)))
# Transformation with UMAP followed by classification with a linear SVM
umap = UMAP(random_state=456)
pipeline = Pipeline([("umap", umap),
("svc", svc)])
params_grid_pipeline = {"umap__n_neighbors": [5, 20],
"umap__n_components": [15, 25, 50],
"svc__C": [10**k for k in range(-3, 4)]}
clf_pipeline = GridSearchCV(pipeline, params_grid_pipeline)
clf_pipeline.fit(X_train, y_train)
print("Accuracy on the test set with UMAP transformation: {:.3f}".format(
clf_pipeline.score(X_test, y_test)))
|
Add example with sklearn pipeline
|
Add example with sklearn pipeline
|
Python
|
bsd-3-clause
|
lmcinnes/umap,lmcinnes/umap
|
Add example with sklearn pipeline
|
"""
UMAP as a Feature Extraction Technique for Classification
---------------------------------------------------------
The following script shows how UMAP can be used as a feature extraction
technique to improve the accuracy on a classification task. It also shows
how UMAP can be integrated in standard scikit-learn pipelines.
The first step is to create a dataset for a classification task, which is
performed with the function ``sklearn.datasets.make_classification``. The
dataset is then split into a training set and a test set using the
``sklearn.model_selection.train_test_split`` function.
Second, a linear SVM is fitted on the training set. To choose the best
hyperparameters automatically, a gridsearch is performed on the training set.
The performance of the model is then evaluated on the test set with the
accuracy metric.
Third, the previous step is repeated with a slight modification: UMAP is
used as a feature extraction technique. This small change results in a
substantial improvement compared to the model where raw data is used.
"""
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
# Make a toy dataset
X, y = make_classification(n_samples=1000, n_features=300, n_informative=250,
n_redundant=0, n_repeated=0, n_classes=2,
random_state=1212)
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Classification with a linear SVM
svc = LinearSVC(dual=False, random_state=123)
params_grid = {"C": [10**k for k in range(-3, 4)]}
clf = GridSearchCV(svc, params_grid)
clf.fit(X_train, y_train)
print("Accuracy on the test set with raw data: {:.3f}".format(
clf.score(X_test, y_test)))
# Transformation with UMAP followed by classification with a linear SVM
umap = UMAP(random_state=456)
pipeline = Pipeline([("umap", umap),
("svc", svc)])
params_grid_pipeline = {"umap__n_neighbors": [5, 20],
"umap__n_components": [15, 25, 50],
"svc__C": [10**k for k in range(-3, 4)]}
clf_pipeline = GridSearchCV(pipeline, params_grid_pipeline)
clf_pipeline.fit(X_train, y_train)
print("Accuracy on the test set with UMAP transformation: {:.3f}".format(
clf_pipeline.score(X_test, y_test)))
|
<commit_before><commit_msg>Add example with sklearn pipeline<commit_after>
|
"""
UMAP as a Feature Extraction Technique for Classification
---------------------------------------------------------
The following script shows how UMAP can be used as a feature extraction
technique to improve the accuracy on a classification task. It also shows
how UMAP can be integrated in standard scikit-learn pipelines.
The first step is to create a dataset for a classification task, which is
performed with the function ``sklearn.datasets.make_classification``. The
dataset is then split into a training set and a test set using the
``sklearn.model_selection.train_test_split`` function.
Second, a linear SVM is fitted on the training set. To choose the best
hyperparameters automatically, a gridsearch is performed on the training set.
The performance of the model is then evaluated on the test set with the
accuracy metric.
Third, the previous step is repeated with a slight modification: UMAP is
used as a feature extraction technique. This small change results in a
substantial improvement compared to the model where raw data is used.
"""
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
# Make a toy dataset
X, y = make_classification(n_samples=1000, n_features=300, n_informative=250,
n_redundant=0, n_repeated=0, n_classes=2,
random_state=1212)
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Classification with a linear SVM
svc = LinearSVC(dual=False, random_state=123)
params_grid = {"C": [10**k for k in range(-3, 4)]}
clf = GridSearchCV(svc, params_grid)
clf.fit(X_train, y_train)
print("Accuracy on the test set with raw data: {:.3f}".format(
clf.score(X_test, y_test)))
# Transformation with UMAP followed by classification with a linear SVM
umap = UMAP(random_state=456)
pipeline = Pipeline([("umap", umap),
("svc", svc)])
params_grid_pipeline = {"umap__n_neighbors": [5, 20],
"umap__n_components": [15, 25, 50],
"svc__C": [10**k for k in range(-3, 4)]}
clf_pipeline = GridSearchCV(pipeline, params_grid_pipeline)
clf_pipeline.fit(X_train, y_train)
print("Accuracy on the test set with UMAP transformation: {:.3f}".format(
clf_pipeline.score(X_test, y_test)))
|
Add example with sklearn pipeline"""
UMAP as a Feature Extraction Technique for Classification
---------------------------------------------------------
The following script shows how UMAP can be used as a feature extraction
technique to improve the accuracy on a classification task. It also shows
how UMAP can be integrated in standard scikit-learn pipelines.
The first step is to create a dataset for a classification task, which is
performed with the function ``sklearn.datasets.make_classification``. The
dataset is then split into a training set and a test set using the
``sklearn.model_selection.train_test_split`` function.
Second, a linear SVM is fitted on the training set. To choose the best
hyperparameters automatically, a gridsearch is performed on the training set.
The performance of the model is then evaluated on the test set with the
accuracy metric.
Third, the previous step is repeated with a slight modification: UMAP is
used as a feature extraction technique. This small change results in a
substantial improvement compared to the model where raw data is used.
"""
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
# Make a toy dataset
X, y = make_classification(n_samples=1000, n_features=300, n_informative=250,
n_redundant=0, n_repeated=0, n_classes=2,
random_state=1212)
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Classification with a linear SVM
svc = LinearSVC(dual=False, random_state=123)
params_grid = {"C": [10**k for k in range(-3, 4)]}
clf = GridSearchCV(svc, params_grid)
clf.fit(X_train, y_train)
print("Accuracy on the test set with raw data: {:.3f}".format(
clf.score(X_test, y_test)))
# Transformation with UMAP followed by classification with a linear SVM
umap = UMAP(random_state=456)
pipeline = Pipeline([("umap", umap),
("svc", svc)])
params_grid_pipeline = {"umap__n_neighbors": [5, 20],
"umap__n_components": [15, 25, 50],
"svc__C": [10**k for k in range(-3, 4)]}
clf_pipeline = GridSearchCV(pipeline, params_grid_pipeline)
clf_pipeline.fit(X_train, y_train)
print("Accuracy on the test set with UMAP transformation: {:.3f}".format(
clf_pipeline.score(X_test, y_test)))
|
<commit_before><commit_msg>Add example with sklearn pipeline<commit_after>"""
UMAP as a Feature Extraction Technique for Classification
---------------------------------------------------------
The following script shows how UMAP can be used as a feature extraction
technique to improve the accuracy on a classification task. It also shows
how UMAP can be integrated in standard scikit-learn pipelines.
The first step is to create a dataset for a classification task, which is
performed with the function ``sklearn.datasets.make_classification``. The
dataset is then split into a training set and a test set using the
``sklearn.model_selection.train_test_split`` function.
Second, a linear SVM is fitted on the training set. To choose the best
hyperparameters automatically, a gridsearch is performed on the training set.
The performance of the model is then evaluated on the test set with the
accuracy metric.
Third, the previous step is repeated with a slight modification: UMAP is
used as a feature extraction technique. This small change results in a
substantial improvement compared to the model where raw data is used.
"""
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from umap import UMAP
# Make a toy dataset
X, y = make_classification(n_samples=1000, n_features=300, n_informative=250,
n_redundant=0, n_repeated=0, n_classes=2,
random_state=1212)
# Split the dataset into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Classification with a linear SVM
svc = LinearSVC(dual=False, random_state=123)
params_grid = {"C": [10**k for k in range(-3, 4)]}
clf = GridSearchCV(svc, params_grid)
clf.fit(X_train, y_train)
print("Accuracy on the test set with raw data: {:.3f}".format(
clf.score(X_test, y_test)))
# Transformation with UMAP followed by classification with a linear SVM
umap = UMAP(random_state=456)
pipeline = Pipeline([("umap", umap),
("svc", svc)])
params_grid_pipeline = {"umap__n_neighbors": [5, 20],
"umap__n_components": [15, 25, 50],
"svc__C": [10**k for k in range(-3, 4)]}
clf_pipeline = GridSearchCV(pipeline, params_grid_pipeline)
clf_pipeline.fit(X_train, y_train)
print("Accuracy on the test set with UMAP transformation: {:.3f}".format(
clf_pipeline.score(X_test, y_test)))
|
|
bcaa60ce73134e80e11e7df709e7ba7dbc07d349
|
tests/test_systemd.py
|
tests/test_systemd.py
|
#!/usr/bin/env python3
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
from portinus import systemd
class testSystemd(unittest.TestCase):
def setUp(self):
systemd.subprocess.check_output = MagicMock(return_value=True)
self.unit = systemd.Unit('foo')
def testBasicCalls(self):
self.unit.reload()
self.unit.restart()
self.unit.stop()
self.unit.enable()
self.unit.disable()
def testRemove(self):
with patch('os.path.exists', MagicMock(return_value=True)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_stop.called)
self.assertTrue(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
with patch('os.path.exists', MagicMock(return_value=False)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertFalse(fake_stop.called)
self.assertFalse(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
def testCreateServiceFile(self):
with patch('builtins.open', mock.mock_open(), create=True) as fake_open:
self.unit.create_service_file()
fake_open.assert_called_once_with(self.unit.service_file_path, 'w')
@patch.object(systemd.Unit, 'set_content')
@patch.object(systemd.Unit, 'create_service_file')
def testEnsure(self, fake_create_service_file, fake_set_content):
test_content = 'qweasdzxc'
self.unit.ensure()
self.assertFalse(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
self.unit.ensure(content='qwe')
self.assertTrue(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
|
Add tests for systemd module
|
Add tests for systemd module
|
Python
|
mit
|
justin8/portinus,justin8/portinus
|
Add tests for systemd module
|
#!/usr/bin/env python3
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
from portinus import systemd
class testSystemd(unittest.TestCase):
def setUp(self):
systemd.subprocess.check_output = MagicMock(return_value=True)
self.unit = systemd.Unit('foo')
def testBasicCalls(self):
self.unit.reload()
self.unit.restart()
self.unit.stop()
self.unit.enable()
self.unit.disable()
def testRemove(self):
with patch('os.path.exists', MagicMock(return_value=True)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_stop.called)
self.assertTrue(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
with patch('os.path.exists', MagicMock(return_value=False)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertFalse(fake_stop.called)
self.assertFalse(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
def testCreateServiceFile(self):
with patch('builtins.open', mock.mock_open(), create=True) as fake_open:
self.unit.create_service_file()
fake_open.assert_called_once_with(self.unit.service_file_path, 'w')
@patch.object(systemd.Unit, 'set_content')
@patch.object(systemd.Unit, 'create_service_file')
def testEnsure(self, fake_create_service_file, fake_set_content):
test_content = 'qweasdzxc'
self.unit.ensure()
self.assertFalse(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
self.unit.ensure(content='qwe')
self.assertTrue(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
|
<commit_before><commit_msg>Add tests for systemd module<commit_after>
|
#!/usr/bin/env python3
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
from portinus import systemd
class testSystemd(unittest.TestCase):
def setUp(self):
systemd.subprocess.check_output = MagicMock(return_value=True)
self.unit = systemd.Unit('foo')
def testBasicCalls(self):
self.unit.reload()
self.unit.restart()
self.unit.stop()
self.unit.enable()
self.unit.disable()
def testRemove(self):
with patch('os.path.exists', MagicMock(return_value=True)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_stop.called)
self.assertTrue(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
with patch('os.path.exists', MagicMock(return_value=False)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertFalse(fake_stop.called)
self.assertFalse(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
def testCreateServiceFile(self):
with patch('builtins.open', mock.mock_open(), create=True) as fake_open:
self.unit.create_service_file()
fake_open.assert_called_once_with(self.unit.service_file_path, 'w')
@patch.object(systemd.Unit, 'set_content')
@patch.object(systemd.Unit, 'create_service_file')
def testEnsure(self, fake_create_service_file, fake_set_content):
test_content = 'qweasdzxc'
self.unit.ensure()
self.assertFalse(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
self.unit.ensure(content='qwe')
self.assertTrue(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
|
Add tests for systemd module#!/usr/bin/env python3
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
from portinus import systemd
class testSystemd(unittest.TestCase):
def setUp(self):
systemd.subprocess.check_output = MagicMock(return_value=True)
self.unit = systemd.Unit('foo')
def testBasicCalls(self):
self.unit.reload()
self.unit.restart()
self.unit.stop()
self.unit.enable()
self.unit.disable()
def testRemove(self):
with patch('os.path.exists', MagicMock(return_value=True)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_stop.called)
self.assertTrue(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
with patch('os.path.exists', MagicMock(return_value=False)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertFalse(fake_stop.called)
self.assertFalse(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
def testCreateServiceFile(self):
with patch('builtins.open', mock.mock_open(), create=True) as fake_open:
self.unit.create_service_file()
fake_open.assert_called_once_with(self.unit.service_file_path, 'w')
@patch.object(systemd.Unit, 'set_content')
@patch.object(systemd.Unit, 'create_service_file')
def testEnsure(self, fake_create_service_file, fake_set_content):
test_content = 'qweasdzxc'
self.unit.ensure()
self.assertFalse(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
self.unit.ensure(content='qwe')
self.assertTrue(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
|
<commit_before><commit_msg>Add tests for systemd module<commit_after>#!/usr/bin/env python3
import unittest
from unittest import mock
from unittest.mock import MagicMock, patch
from portinus import systemd
class testSystemd(unittest.TestCase):
def setUp(self):
systemd.subprocess.check_output = MagicMock(return_value=True)
self.unit = systemd.Unit('foo')
def testBasicCalls(self):
self.unit.reload()
self.unit.restart()
self.unit.stop()
self.unit.enable()
self.unit.disable()
def testRemove(self):
with patch('os.path.exists', MagicMock(return_value=True)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_stop.called)
self.assertTrue(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
with patch('os.path.exists', MagicMock(return_value=False)) as fake_path_exists, \
patch.object(systemd.Unit, 'stop') as fake_stop, \
patch.object(systemd.Unit, 'disable') as fake_disable, \
patch('os.remove') as fake_os_remove, \
patch.object(systemd.Unit, 'reload') as fake_reload:
self.unit.remove()
fake_path_exists.assert_called_with(self.unit.service_file_path)
self.assertFalse(fake_stop.called)
self.assertFalse(fake_disable.called)
fake_os_remove.assert_called_with(self.unit.service_file_path)
self.assertTrue(fake_reload.called)
def testCreateServiceFile(self):
with patch('builtins.open', mock.mock_open(), create=True) as fake_open:
self.unit.create_service_file()
fake_open.assert_called_once_with(self.unit.service_file_path, 'w')
@patch.object(systemd.Unit, 'set_content')
@patch.object(systemd.Unit, 'create_service_file')
def testEnsure(self, fake_create_service_file, fake_set_content):
test_content = 'qweasdzxc'
self.unit.ensure()
self.assertFalse(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
self.unit.ensure(content='qwe')
self.assertTrue(fake_set_content.called)
self.assertTrue(fake_create_service_file.called)
|
|
0c3107739671398de1a206cfbb7673c25c543e60
|
driver27/migrations/0009_populate_driver_in_seats.py
|
driver27/migrations/0009_populate_driver_in_seats.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_driver_in_seats(apps, schema_editor):
Seat = apps.get_model("driver27", "Seat")
for seat in Seat.objects.all():
driver = seat.contender.driver
seat.driver = driver
seat.save()
class Migration(migrations.Migration):
dependencies = [
('driver27', '0008_auto_20170529_2220'),
]
operations = [
migrations.RunPython(
populate_driver_in_seats,
),
]
|
Update driver value in Seat model.
|
Update driver value in Seat model.
|
Python
|
mit
|
SRJ9/django-driver27,SRJ9/django-driver27,SRJ9/django-driver27
|
Update driver value in Seat model.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_driver_in_seats(apps, schema_editor):
Seat = apps.get_model("driver27", "Seat")
for seat in Seat.objects.all():
driver = seat.contender.driver
seat.driver = driver
seat.save()
class Migration(migrations.Migration):
dependencies = [
('driver27', '0008_auto_20170529_2220'),
]
operations = [
migrations.RunPython(
populate_driver_in_seats,
),
]
|
<commit_before><commit_msg>Update driver value in Seat model.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_driver_in_seats(apps, schema_editor):
Seat = apps.get_model("driver27", "Seat")
for seat in Seat.objects.all():
driver = seat.contender.driver
seat.driver = driver
seat.save()
class Migration(migrations.Migration):
dependencies = [
('driver27', '0008_auto_20170529_2220'),
]
operations = [
migrations.RunPython(
populate_driver_in_seats,
),
]
|
Update driver value in Seat model.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_driver_in_seats(apps, schema_editor):
Seat = apps.get_model("driver27", "Seat")
for seat in Seat.objects.all():
driver = seat.contender.driver
seat.driver = driver
seat.save()
class Migration(migrations.Migration):
dependencies = [
('driver27', '0008_auto_20170529_2220'),
]
operations = [
migrations.RunPython(
populate_driver_in_seats,
),
]
|
<commit_before><commit_msg>Update driver value in Seat model.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def populate_driver_in_seats(apps, schema_editor):
Seat = apps.get_model("driver27", "Seat")
for seat in Seat.objects.all():
driver = seat.contender.driver
seat.driver = driver
seat.save()
class Migration(migrations.Migration):
dependencies = [
('driver27', '0008_auto_20170529_2220'),
]
operations = [
migrations.RunPython(
populate_driver_in_seats,
),
]
|
|
8fe57fbbc5764d3e13c3513afcdb2c49d04b117e
|
src/yunohost/data_migrations/0003_php5_to_php7_pools.py
|
src/yunohost/data_migrations/0003_php5_to_php7_pools.py
|
import os
import glob
from shutil import copy2
from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration
from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration')
PHP5_POOLS = "/etc/php5/fpm/pool.d"
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
class MyMigration(Migration):
"Migrate php5-fpm 'pool' conf files to php7 stuff"
def migrate(self):
# Get list of php5 pool files
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
# Keep only basenames
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
for f in php5_pool_files:
# Copy the files to the php7 pool
src = "{}/{}".format(PHP5_POOLS, f)
dest = "{}/{}".format(PHP7_POOLS, f)
copy2(src, dest)
# Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
os.system(c)
# Also add a comment that it was automatically moved from php5
# (for human traceability and backward migration)
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
def backward(self):
# Get list of php7 pool files
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
# Keep only files which have the migration comment
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
# Delete those files
for f in php7_pool_files:
os.remove(f)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
|
Add a migration for php5-fpm pools to php7
|
Add a migration for php5-fpm pools to php7
|
Python
|
agpl-3.0
|
YunoHost/yunohost,YunoHost/yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost
|
Add a migration for php5-fpm pools to php7
|
import os
import glob
from shutil import copy2
from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration
from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration')
PHP5_POOLS = "/etc/php5/fpm/pool.d"
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
class MyMigration(Migration):
"Migrate php5-fpm 'pool' conf files to php7 stuff"
def migrate(self):
# Get list of php5 pool files
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
# Keep only basenames
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
for f in php5_pool_files:
# Copy the files to the php7 pool
src = "{}/{}".format(PHP5_POOLS, f)
dest = "{}/{}".format(PHP7_POOLS, f)
copy2(src, dest)
# Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
os.system(c)
# Also add a comment that it was automatically moved from php5
# (for human traceability and backward migration)
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
def backward(self):
# Get list of php7 pool files
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
# Keep only files which have the migration comment
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
# Delete those files
for f in php7_pool_files:
os.remove(f)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
|
<commit_before><commit_msg>Add a migration for php5-fpm pools to php7<commit_after>
|
import os
import glob
from shutil import copy2
from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration
from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration')
PHP5_POOLS = "/etc/php5/fpm/pool.d"
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
class MyMigration(Migration):
"Migrate php5-fpm 'pool' conf files to php7 stuff"
def migrate(self):
# Get list of php5 pool files
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
# Keep only basenames
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
for f in php5_pool_files:
# Copy the files to the php7 pool
src = "{}/{}".format(PHP5_POOLS, f)
dest = "{}/{}".format(PHP7_POOLS, f)
copy2(src, dest)
# Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
os.system(c)
# Also add a comment that it was automatically moved from php5
# (for human traceability and backward migration)
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
def backward(self):
# Get list of php7 pool files
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
# Keep only files which have the migration comment
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
# Delete those files
for f in php7_pool_files:
os.remove(f)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
|
Add a migration for php5-fpm pools to php7import os
import glob
from shutil import copy2
from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration
from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration')
PHP5_POOLS = "/etc/php5/fpm/pool.d"
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
class MyMigration(Migration):
"Migrate php5-fpm 'pool' conf files to php7 stuff"
def migrate(self):
# Get list of php5 pool files
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
# Keep only basenames
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
for f in php5_pool_files:
# Copy the files to the php7 pool
src = "{}/{}".format(PHP5_POOLS, f)
dest = "{}/{}".format(PHP7_POOLS, f)
copy2(src, dest)
# Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
os.system(c)
# Also add a comment that it was automatically moved from php5
# (for human traceability and backward migration)
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
def backward(self):
# Get list of php7 pool files
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
# Keep only files which have the migration comment
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
# Delete those files
for f in php7_pool_files:
os.remove(f)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
|
<commit_before><commit_msg>Add a migration for php5-fpm pools to php7<commit_after>import os
import glob
from shutil import copy2
from moulinette.utils.log import getActionLogger
from yunohost.tools import Migration
from yunohost.service import _run_service_command
logger = getActionLogger('yunohost.migration')
PHP5_POOLS = "/etc/php5/fpm/pool.d"
PHP7_POOLS = "/etc/php/7.0/fpm/pool.d"
PHP5_SOCKETS_PREFIX = "/var/run/php5-fpm"
PHP7_SOCKETS_PREFIX = "/run/php/php7.0-fpm"
MIGRATION_COMMENT = "; YunoHost note : this file was automatically moved from {}".format(PHP5_POOLS)
class MyMigration(Migration):
"Migrate php5-fpm 'pool' conf files to php7 stuff"
def migrate(self):
# Get list of php5 pool files
php5_pool_files = glob.glob("{}/*.conf".format(PHP5_POOLS))
# Keep only basenames
php5_pool_files = [os.path.basename(f) for f in php5_pool_files]
# Ignore the "www.conf" (default stuff, probably don't want to touch it ?)
php5_pool_files = [f for f in php5_pool_files if f != "www.conf"]
for f in php5_pool_files:
# Copy the files to the php7 pool
src = "{}/{}".format(PHP5_POOLS, f)
dest = "{}/{}".format(PHP7_POOLS, f)
copy2(src, dest)
# Replace the socket prefix if it's found
c = "sed -i -e 's@{}@{}@g' {}".format(PHP5_SOCKETS_PREFIX, PHP7_SOCKETS_PREFIX, dest)
os.system(c)
# Also add a comment that it was automatically moved from php5
# (for human traceability and backward migration)
c = "sed -i '1i {}' {}".format(MIGRATION_COMMENT, dest)
os.system(c)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
def backward(self):
# Get list of php7 pool files
php7_pool_files = glob.glob("{}/*.conf".format(PHP7_POOLS))
# Keep only files which have the migration comment
php7_pool_files = [f for f in php7_pool_files if open(f).readline().strip() == MIGRATION_COMMENT]
# Delete those files
for f in php7_pool_files:
os.remove(f)
# Reload/restart the php pools
_run_service_command("restart", "php-fpm")
|
|
bf34c1dbb37865e62e97e3463645c7df16a4ca08
|
markov_chain.py
|
markov_chain.py
|
from random import choice
class MarkovChain(object):
""" An interface for signle-word states Markov Chains """
def __init__(self, text=None):
self._states_map = {}
if text is not None:
self.add_text(text)
def add_text(self, text, separator=" "):
""" Adds text to the markov chain """
word_list = text.split(separator)
for i in range(0, len(word_list)-1):
self._states_map.setdefault(word_list[i], []).append(word_list[i+1])
return self
def get_word(self, key):
""" Returns a word from Markov Chain associated with the key """
values = self._states_map.get(key)
return choice(values) if values is not None else None
|
Add an interface for Markov Chains
|
Add an interface for Markov Chains
|
Python
|
mit
|
iluxonchik/lyricist
|
Add an interface for Markov Chains
|
from random import choice
class MarkovChain(object):
""" An interface for signle-word states Markov Chains """
def __init__(self, text=None):
self._states_map = {}
if text is not None:
self.add_text(text)
def add_text(self, text, separator=" "):
""" Adds text to the markov chain """
word_list = text.split(separator)
for i in range(0, len(word_list)-1):
self._states_map.setdefault(word_list[i], []).append(word_list[i+1])
return self
def get_word(self, key):
""" Returns a word from Markov Chain associated with the key """
values = self._states_map.get(key)
return choice(values) if values is not None else None
|
<commit_before><commit_msg>Add an interface for Markov Chains<commit_after>
|
from random import choice
class MarkovChain(object):
""" An interface for signle-word states Markov Chains """
def __init__(self, text=None):
self._states_map = {}
if text is not None:
self.add_text(text)
def add_text(self, text, separator=" "):
""" Adds text to the markov chain """
word_list = text.split(separator)
for i in range(0, len(word_list)-1):
self._states_map.setdefault(word_list[i], []).append(word_list[i+1])
return self
def get_word(self, key):
""" Returns a word from Markov Chain associated with the key """
values = self._states_map.get(key)
return choice(values) if values is not None else None
|
Add an interface for Markov Chainsfrom random import choice
class MarkovChain(object):
""" An interface for signle-word states Markov Chains """
def __init__(self, text=None):
self._states_map = {}
if text is not None:
self.add_text(text)
def add_text(self, text, separator=" "):
""" Adds text to the markov chain """
word_list = text.split(separator)
for i in range(0, len(word_list)-1):
self._states_map.setdefault(word_list[i], []).append(word_list[i+1])
return self
def get_word(self, key):
""" Returns a word from Markov Chain associated with the key """
values = self._states_map.get(key)
return choice(values) if values is not None else None
|
<commit_before><commit_msg>Add an interface for Markov Chains<commit_after>from random import choice
class MarkovChain(object):
""" An interface for signle-word states Markov Chains """
def __init__(self, text=None):
self._states_map = {}
if text is not None:
self.add_text(text)
def add_text(self, text, separator=" "):
""" Adds text to the markov chain """
word_list = text.split(separator)
for i in range(0, len(word_list)-1):
self._states_map.setdefault(word_list[i], []).append(word_list[i+1])
return self
def get_word(self, key):
""" Returns a word from Markov Chain associated with the key """
values = self._states_map.get(key)
return choice(values) if values is not None else None
|
|
e29de047d770de70f3745ae410b62d0ddad4b0b4
|
lib/oeqa/runtime/misc/appFW.py
|
lib/oeqa/runtime/misc/appFW.py
|
from oeqa.oetest import oeRuntimeTest
class AppFwTest(oeRuntimeTest):
""" App Framework testing """
def test_sqlite_integration(self):
""" test sqlite is integrated in image """
(status,output) = self.target.run("rpm -qa | grep sqlite")
self.assertEqual(status, 0, output)
|
Add one test case for IOTOS-358
|
Add one test case for IOTOS-358
To check SQlite is integrated in image
Signed-off-by: Wu Dawei <b8fdb5b9f0b6b29b63287bce074c688ca08d8ead@intel.com>
|
Python
|
mit
|
ostroproject/meta-iotqa,ostroproject/meta-iotqa,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,ostroproject/meta-iotqa,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,daweiwu/meta-iotqa-1,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,daweiwu/meta-iotqa-1
|
Add one test case for IOTOS-358
To check SQlite is integrated in image
Signed-off-by: Wu Dawei <b8fdb5b9f0b6b29b63287bce074c688ca08d8ead@intel.com>
|
from oeqa.oetest import oeRuntimeTest
class AppFwTest(oeRuntimeTest):
""" App Framework testing """
def test_sqlite_integration(self):
""" test sqlite is integrated in image """
(status,output) = self.target.run("rpm -qa | grep sqlite")
self.assertEqual(status, 0, output)
|
<commit_before><commit_msg>Add one test case for IOTOS-358
To check SQlite is integrated in image
Signed-off-by: Wu Dawei <b8fdb5b9f0b6b29b63287bce074c688ca08d8ead@intel.com><commit_after>
|
from oeqa.oetest import oeRuntimeTest
class AppFwTest(oeRuntimeTest):
""" App Framework testing """
def test_sqlite_integration(self):
""" test sqlite is integrated in image """
(status,output) = self.target.run("rpm -qa | grep sqlite")
self.assertEqual(status, 0, output)
|
Add one test case for IOTOS-358
To check SQlite is integrated in image
Signed-off-by: Wu Dawei <b8fdb5b9f0b6b29b63287bce074c688ca08d8ead@intel.com>from oeqa.oetest import oeRuntimeTest
class AppFwTest(oeRuntimeTest):
""" App Framework testing """
def test_sqlite_integration(self):
""" test sqlite is integrated in image """
(status,output) = self.target.run("rpm -qa | grep sqlite")
self.assertEqual(status, 0, output)
|
<commit_before><commit_msg>Add one test case for IOTOS-358
To check SQlite is integrated in image
Signed-off-by: Wu Dawei <b8fdb5b9f0b6b29b63287bce074c688ca08d8ead@intel.com><commit_after>from oeqa.oetest import oeRuntimeTest
class AppFwTest(oeRuntimeTest):
""" App Framework testing """
def test_sqlite_integration(self):
""" test sqlite is integrated in image """
(status,output) = self.target.run("rpm -qa | grep sqlite")
self.assertEqual(status, 0, output)
|
|
0782e8786272fcd6e3e1a41d31bea253865c468b
|
pastas/timer.py
|
pastas/timer.py
|
try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
raise ModuleNotFoundError("SolveTimer requires 'tqdm' to be installed.")
class SolveTimer(tqdm):
"""Progress indicator for model optimization.
Usage
-----
Print timer and number of iterations in console while running
`ml.solve()`::
>>> with SolveTimer() as t:
ml.solve(callback=t.update)
This prints the following to the console, for example::
Optimization progress: 73it [00:01, 67.68it/s]
Note
----
If the logger is also printing messages to the console the timer will not
be updated quite as nicely.
"""
def __init__(self, *args, **kwargs):
if "total" not in kwargs:
kwargs['total'] = None
if "desc" not in kwargs:
kwargs["desc"] = "Optimization progress"
super(SolveTimer, self).__init__(*args, **kwargs)
def update(self, _, n=1):
displayed = super(SolveTimer, self).update(n)
return displayed
|
Add SolveTimer - print number of iterations and elapsed time to console while running ml.solve() - see docstring for usage
|
Add SolveTimer
- print number of iterations and elapsed time to console while running ml.solve()
- see docstring for usage
|
Python
|
mit
|
pastas/pastas
|
Add SolveTimer
- print number of iterations and elapsed time to console while running ml.solve()
- see docstring for usage
|
try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
raise ModuleNotFoundError("SolveTimer requires 'tqdm' to be installed.")
class SolveTimer(tqdm):
"""Progress indicator for model optimization.
Usage
-----
Print timer and number of iterations in console while running
`ml.solve()`::
>>> with SolveTimer() as t:
ml.solve(callback=t.update)
This prints the following to the console, for example::
Optimization progress: 73it [00:01, 67.68it/s]
Note
----
If the logger is also printing messages to the console the timer will not
be updated quite as nicely.
"""
def __init__(self, *args, **kwargs):
if "total" not in kwargs:
kwargs['total'] = None
if "desc" not in kwargs:
kwargs["desc"] = "Optimization progress"
super(SolveTimer, self).__init__(*args, **kwargs)
def update(self, _, n=1):
displayed = super(SolveTimer, self).update(n)
return displayed
|
<commit_before><commit_msg>Add SolveTimer
- print number of iterations and elapsed time to console while running ml.solve()
- see docstring for usage<commit_after>
|
try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
raise ModuleNotFoundError("SolveTimer requires 'tqdm' to be installed.")
class SolveTimer(tqdm):
"""Progress indicator for model optimization.
Usage
-----
Print timer and number of iterations in console while running
`ml.solve()`::
>>> with SolveTimer() as t:
ml.solve(callback=t.update)
This prints the following to the console, for example::
Optimization progress: 73it [00:01, 67.68it/s]
Note
----
If the logger is also printing messages to the console the timer will not
be updated quite as nicely.
"""
def __init__(self, *args, **kwargs):
if "total" not in kwargs:
kwargs['total'] = None
if "desc" not in kwargs:
kwargs["desc"] = "Optimization progress"
super(SolveTimer, self).__init__(*args, **kwargs)
def update(self, _, n=1):
displayed = super(SolveTimer, self).update(n)
return displayed
|
Add SolveTimer
- print number of iterations and elapsed time to console while running ml.solve()
- see docstring for usagetry:
from tqdm.auto import tqdm
except ModuleNotFoundError:
raise ModuleNotFoundError("SolveTimer requires 'tqdm' to be installed.")
class SolveTimer(tqdm):
"""Progress indicator for model optimization.
Usage
-----
Print timer and number of iterations in console while running
`ml.solve()`::
>>> with SolveTimer() as t:
ml.solve(callback=t.update)
This prints the following to the console, for example::
Optimization progress: 73it [00:01, 67.68it/s]
Note
----
If the logger is also printing messages to the console the timer will not
be updated quite as nicely.
"""
def __init__(self, *args, **kwargs):
if "total" not in kwargs:
kwargs['total'] = None
if "desc" not in kwargs:
kwargs["desc"] = "Optimization progress"
super(SolveTimer, self).__init__(*args, **kwargs)
def update(self, _, n=1):
displayed = super(SolveTimer, self).update(n)
return displayed
|
<commit_before><commit_msg>Add SolveTimer
- print number of iterations and elapsed time to console while running ml.solve()
- see docstring for usage<commit_after>try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
raise ModuleNotFoundError("SolveTimer requires 'tqdm' to be installed.")
class SolveTimer(tqdm):
"""Progress indicator for model optimization.
Usage
-----
Print timer and number of iterations in console while running
`ml.solve()`::
>>> with SolveTimer() as t:
ml.solve(callback=t.update)
This prints the following to the console, for example::
Optimization progress: 73it [00:01, 67.68it/s]
Note
----
If the logger is also printing messages to the console the timer will not
be updated quite as nicely.
"""
def __init__(self, *args, **kwargs):
if "total" not in kwargs:
kwargs['total'] = None
if "desc" not in kwargs:
kwargs["desc"] = "Optimization progress"
super(SolveTimer, self).__init__(*args, **kwargs)
def update(self, _, n=1):
displayed = super(SolveTimer, self).update(n)
return displayed
|
|
6ece957e5317a9f54499714f9a7cb9bca221d4e5
|
bin/debug/intake_single_user.py
|
bin/debug/intake_single_user.py
|
import json
import logging
import argparse
import numpy as np
import uuid
import emission.pipeline.intake_stage as epi
import emission.core.wrapper.user as ecwu
if __name__ == '__main__':
np.random.seed(61297777)
parser = argparse.ArgumentParser(prog="intake_single_user")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
args = parser.parse_args()
if args.user_uuid:
sel_uuid = uuid.UUID(args.user_uuid)
else:
sel_uuid = ecwu.User.fromEmail(args.user_email).uuid
epi.run_intake_pipeline("single", [sel_uuid])
|
Add a simple script that runs the pipeline for the single specified user
|
Add a simple script that runs the pipeline for the single specified user
User can be specified by either email or UUID.
Calls the pipeline functions.
|
Python
|
bsd-3-clause
|
sunil07t/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server
|
Add a simple script that runs the pipeline for the single specified user
User can be specified by either email or UUID.
Calls the pipeline functions.
|
import json
import logging
import argparse
import numpy as np
import uuid
import emission.pipeline.intake_stage as epi
import emission.core.wrapper.user as ecwu
if __name__ == '__main__':
np.random.seed(61297777)
parser = argparse.ArgumentParser(prog="intake_single_user")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
args = parser.parse_args()
if args.user_uuid:
sel_uuid = uuid.UUID(args.user_uuid)
else:
sel_uuid = ecwu.User.fromEmail(args.user_email).uuid
epi.run_intake_pipeline("single", [sel_uuid])
|
<commit_before><commit_msg>Add a simple script that runs the pipeline for the single specified user
User can be specified by either email or UUID.
Calls the pipeline functions.<commit_after>
|
import json
import logging
import argparse
import numpy as np
import uuid
import emission.pipeline.intake_stage as epi
import emission.core.wrapper.user as ecwu
if __name__ == '__main__':
np.random.seed(61297777)
parser = argparse.ArgumentParser(prog="intake_single_user")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
args = parser.parse_args()
if args.user_uuid:
sel_uuid = uuid.UUID(args.user_uuid)
else:
sel_uuid = ecwu.User.fromEmail(args.user_email).uuid
epi.run_intake_pipeline("single", [sel_uuid])
|
Add a simple script that runs the pipeline for the single specified user
User can be specified by either email or UUID.
Calls the pipeline functions.import json
import logging
import argparse
import numpy as np
import uuid
import emission.pipeline.intake_stage as epi
import emission.core.wrapper.user as ecwu
if __name__ == '__main__':
np.random.seed(61297777)
parser = argparse.ArgumentParser(prog="intake_single_user")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
args = parser.parse_args()
if args.user_uuid:
sel_uuid = uuid.UUID(args.user_uuid)
else:
sel_uuid = ecwu.User.fromEmail(args.user_email).uuid
epi.run_intake_pipeline("single", [sel_uuid])
|
<commit_before><commit_msg>Add a simple script that runs the pipeline for the single specified user
User can be specified by either email or UUID.
Calls the pipeline functions.<commit_after>import json
import logging
import argparse
import numpy as np
import uuid
import emission.pipeline.intake_stage as epi
import emission.core.wrapper.user as ecwu
if __name__ == '__main__':
np.random.seed(61297777)
parser = argparse.ArgumentParser(prog="intake_single_user")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
args = parser.parse_args()
if args.user_uuid:
sel_uuid = uuid.UUID(args.user_uuid)
else:
sel_uuid = ecwu.User.fromEmail(args.user_email).uuid
epi.run_intake_pipeline("single", [sel_uuid])
|
|
4c5f750801cef0424fd93432b688fb74b079f4c5
|
temba/msgs/migrations/0037_backfill_recipient_counts.py
|
temba/msgs/migrations/0037_backfill_recipient_counts.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0036_auto_20151103_1014'),
]
def backfill_recipient_counts(apps, schema):
Broadcast = apps.get_model('msgs', 'Broadcast')
Msg = apps.get_model('msgs', 'Msg')
# get all broadcasts with 0 recipients
for broadcast in Broadcast.objects.filter(recipient_count=0):
# set to # of msgs
broadcast.recipient_count = Msg.objects.filter(broadcast=broadcast).count()
if recipient_count > 0:
broadcast.save()
print "Updated %d to %d recipients" % (broadcast.id, broadcast.recipient_count)
operations = [
migrations.RunPython(backfill_recipient_counts)
]
|
Add migration to backfill recipient counts
|
Add migration to backfill recipient counts
|
Python
|
agpl-3.0
|
tsotetsi/textily-web,reyrodrigues/EU-SMS,pulilab/rapidpro,reyrodrigues/EU-SMS,pulilab/rapidpro,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,reyrodrigues/EU-SMS,tsotetsi/textily-web,ewheeler/rapidpro,ewheeler/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,ewheeler/rapidpro,pulilab/rapidpro,ewheeler/rapidpro
|
Add migration to backfill recipient counts
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0036_auto_20151103_1014'),
]
def backfill_recipient_counts(apps, schema):
Broadcast = apps.get_model('msgs', 'Broadcast')
Msg = apps.get_model('msgs', 'Msg')
# get all broadcasts with 0 recipients
for broadcast in Broadcast.objects.filter(recipient_count=0):
# set to # of msgs
broadcast.recipient_count = Msg.objects.filter(broadcast=broadcast).count()
if recipient_count > 0:
broadcast.save()
print "Updated %d to %d recipients" % (broadcast.id, broadcast.recipient_count)
operations = [
migrations.RunPython(backfill_recipient_counts)
]
|
<commit_before><commit_msg>Add migration to backfill recipient counts<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0036_auto_20151103_1014'),
]
def backfill_recipient_counts(apps, schema):
Broadcast = apps.get_model('msgs', 'Broadcast')
Msg = apps.get_model('msgs', 'Msg')
# get all broadcasts with 0 recipients
for broadcast in Broadcast.objects.filter(recipient_count=0):
# set to # of msgs
broadcast.recipient_count = Msg.objects.filter(broadcast=broadcast).count()
if recipient_count > 0:
broadcast.save()
print "Updated %d to %d recipients" % (broadcast.id, broadcast.recipient_count)
operations = [
migrations.RunPython(backfill_recipient_counts)
]
|
Add migration to backfill recipient counts# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0036_auto_20151103_1014'),
]
def backfill_recipient_counts(apps, schema):
Broadcast = apps.get_model('msgs', 'Broadcast')
Msg = apps.get_model('msgs', 'Msg')
# get all broadcasts with 0 recipients
for broadcast in Broadcast.objects.filter(recipient_count=0):
# set to # of msgs
broadcast.recipient_count = Msg.objects.filter(broadcast=broadcast).count()
if recipient_count > 0:
broadcast.save()
print "Updated %d to %d recipients" % (broadcast.id, broadcast.recipient_count)
operations = [
migrations.RunPython(backfill_recipient_counts)
]
|
<commit_before><commit_msg>Add migration to backfill recipient counts<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msgs', '0036_auto_20151103_1014'),
]
def backfill_recipient_counts(apps, schema):
Broadcast = apps.get_model('msgs', 'Broadcast')
Msg = apps.get_model('msgs', 'Msg')
# get all broadcasts with 0 recipients
for broadcast in Broadcast.objects.filter(recipient_count=0):
# set to # of msgs
broadcast.recipient_count = Msg.objects.filter(broadcast=broadcast).count()
if recipient_count > 0:
broadcast.save()
print "Updated %d to %d recipients" % (broadcast.id, broadcast.recipient_count)
operations = [
migrations.RunPython(backfill_recipient_counts)
]
|
|
b44f13bfa1ac8b3c1bd24e528fc7874a06df0121
|
dev_tools/src/d1_dev/update-requirements-txt.py
|
dev_tools/src/d1_dev/update-requirements-txt.py
|
#!/usr/bin/env python
import shutil
import d1_dev.util
import os
import pip._internal.utils.misc
import re
REQUIREMENTS_FILENAME = 'requirements.txt'
# Modules in my dev environment that are not required by the stack
MODULE_FILTER_REGEX_LIST = {
'beautifulsoup',
'black',
'bs4',
'dataone.*',
'ete3',
'Flask',
'logging-tree',
'PyQt.*',
'pyqt5',
'python-magic',
'redbaron',
}
def main():
repo_dir = d1_dev.util.find_repo_root()
req_path = os.path.join(repo_dir, REQUIREMENTS_FILENAME)
req_backup_path = req_path + ".bak"
try:
os.remove(req_backup_path)
except FileNotFoundError:
pass
shutil.move(req_path, req_backup_path)
req_list = sorted(get_reqs())
write_reqs(req_path, req_list)
def get_reqs():
req_list = []
# noinspection PyProtectedMember
for package_dist in pip._internal.utils.misc.get_installed_distributions(local_only=True):
if not is_filtered_package(package_dist.project_name):
req_str = str(package_dist.as_requirement())
req_list.append(req_str)
return req_list
def is_filtered_package(project_name):
for filter_rx in MODULE_FILTER_REGEX_LIST:
if re.match(filter_rx, project_name, re.IGNORECASE):
print('Filtered: {}'.format(project_name, filter_rx))
return True
print('Included: {}'.format(project_name))
return False
def write_reqs(req_path, req_list):
"""
Args:
req_path:
req_list:
"""
with open(req_path, 'w') as f:
f.write('\n'.join(req_list) + "\n")
if __name__ == '__main__':
main()
|
Add script that creates a filtered list of required packages
|
Add script that creates a filtered list of required packages
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add script that creates a filtered list of required packages
|
#!/usr/bin/env python
import shutil
import d1_dev.util
import os
import pip._internal.utils.misc
import re
REQUIREMENTS_FILENAME = 'requirements.txt'
# Modules in my dev environment that are not required by the stack
MODULE_FILTER_REGEX_LIST = {
'beautifulsoup',
'black',
'bs4',
'dataone.*',
'ete3',
'Flask',
'logging-tree',
'PyQt.*',
'pyqt5',
'python-magic',
'redbaron',
}
def main():
repo_dir = d1_dev.util.find_repo_root()
req_path = os.path.join(repo_dir, REQUIREMENTS_FILENAME)
req_backup_path = req_path + ".bak"
try:
os.remove(req_backup_path)
except FileNotFoundError:
pass
shutil.move(req_path, req_backup_path)
req_list = sorted(get_reqs())
write_reqs(req_path, req_list)
def get_reqs():
req_list = []
# noinspection PyProtectedMember
for package_dist in pip._internal.utils.misc.get_installed_distributions(local_only=True):
if not is_filtered_package(package_dist.project_name):
req_str = str(package_dist.as_requirement())
req_list.append(req_str)
return req_list
def is_filtered_package(project_name):
for filter_rx in MODULE_FILTER_REGEX_LIST:
if re.match(filter_rx, project_name, re.IGNORECASE):
print('Filtered: {}'.format(project_name, filter_rx))
return True
print('Included: {}'.format(project_name))
return False
def write_reqs(req_path, req_list):
"""
Args:
req_path:
req_list:
"""
with open(req_path, 'w') as f:
f.write('\n'.join(req_list) + "\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that creates a filtered list of required packages<commit_after>
|
#!/usr/bin/env python
import shutil
import d1_dev.util
import os
import pip._internal.utils.misc
import re
REQUIREMENTS_FILENAME = 'requirements.txt'
# Modules in my dev environment that are not required by the stack
MODULE_FILTER_REGEX_LIST = {
'beautifulsoup',
'black',
'bs4',
'dataone.*',
'ete3',
'Flask',
'logging-tree',
'PyQt.*',
'pyqt5',
'python-magic',
'redbaron',
}
def main():
repo_dir = d1_dev.util.find_repo_root()
req_path = os.path.join(repo_dir, REQUIREMENTS_FILENAME)
req_backup_path = req_path + ".bak"
try:
os.remove(req_backup_path)
except FileNotFoundError:
pass
shutil.move(req_path, req_backup_path)
req_list = sorted(get_reqs())
write_reqs(req_path, req_list)
def get_reqs():
req_list = []
# noinspection PyProtectedMember
for package_dist in pip._internal.utils.misc.get_installed_distributions(local_only=True):
if not is_filtered_package(package_dist.project_name):
req_str = str(package_dist.as_requirement())
req_list.append(req_str)
return req_list
def is_filtered_package(project_name):
for filter_rx in MODULE_FILTER_REGEX_LIST:
if re.match(filter_rx, project_name, re.IGNORECASE):
print('Filtered: {}'.format(project_name, filter_rx))
return True
print('Included: {}'.format(project_name))
return False
def write_reqs(req_path, req_list):
"""
Args:
req_path:
req_list:
"""
with open(req_path, 'w') as f:
f.write('\n'.join(req_list) + "\n")
if __name__ == '__main__':
main()
|
Add script that creates a filtered list of required packages#!/usr/bin/env python
import shutil
import d1_dev.util
import os
import pip._internal.utils.misc
import re
REQUIREMENTS_FILENAME = 'requirements.txt'
# Modules in my dev environment that are not required by the stack
MODULE_FILTER_REGEX_LIST = {
'beautifulsoup',
'black',
'bs4',
'dataone.*',
'ete3',
'Flask',
'logging-tree',
'PyQt.*',
'pyqt5',
'python-magic',
'redbaron',
}
def main():
repo_dir = d1_dev.util.find_repo_root()
req_path = os.path.join(repo_dir, REQUIREMENTS_FILENAME)
req_backup_path = req_path + ".bak"
try:
os.remove(req_backup_path)
except FileNotFoundError:
pass
shutil.move(req_path, req_backup_path)
req_list = sorted(get_reqs())
write_reqs(req_path, req_list)
def get_reqs():
req_list = []
# noinspection PyProtectedMember
for package_dist in pip._internal.utils.misc.get_installed_distributions(local_only=True):
if not is_filtered_package(package_dist.project_name):
req_str = str(package_dist.as_requirement())
req_list.append(req_str)
return req_list
def is_filtered_package(project_name):
for filter_rx in MODULE_FILTER_REGEX_LIST:
if re.match(filter_rx, project_name, re.IGNORECASE):
print('Filtered: {}'.format(project_name, filter_rx))
return True
print('Included: {}'.format(project_name))
return False
def write_reqs(req_path, req_list):
"""
Args:
req_path:
req_list:
"""
with open(req_path, 'w') as f:
f.write('\n'.join(req_list) + "\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that creates a filtered list of required packages<commit_after>#!/usr/bin/env python
import shutil
import d1_dev.util
import os
import pip._internal.utils.misc
import re
REQUIREMENTS_FILENAME = 'requirements.txt'
# Modules in my dev environment that are not required by the stack
MODULE_FILTER_REGEX_LIST = {
'beautifulsoup',
'black',
'bs4',
'dataone.*',
'ete3',
'Flask',
'logging-tree',
'PyQt.*',
'pyqt5',
'python-magic',
'redbaron',
}
def main():
repo_dir = d1_dev.util.find_repo_root()
req_path = os.path.join(repo_dir, REQUIREMENTS_FILENAME)
req_backup_path = req_path + ".bak"
try:
os.remove(req_backup_path)
except FileNotFoundError:
pass
shutil.move(req_path, req_backup_path)
req_list = sorted(get_reqs())
write_reqs(req_path, req_list)
def get_reqs():
req_list = []
# noinspection PyProtectedMember
for package_dist in pip._internal.utils.misc.get_installed_distributions(local_only=True):
if not is_filtered_package(package_dist.project_name):
req_str = str(package_dist.as_requirement())
req_list.append(req_str)
return req_list
def is_filtered_package(project_name):
for filter_rx in MODULE_FILTER_REGEX_LIST:
if re.match(filter_rx, project_name, re.IGNORECASE):
print('Filtered: {}'.format(project_name, filter_rx))
return True
print('Included: {}'.format(project_name))
return False
def write_reqs(req_path, req_list):
"""
Args:
req_path:
req_list:
"""
with open(req_path, 'w') as f:
f.write('\n'.join(req_list) + "\n")
if __name__ == '__main__':
main()
|
|
6f2529d1891b5c256394b9c8aa991b25a029b5f1
|
migrations/004_load_seed_file.py
|
migrations/004_load_seed_file.py
|
"""
Load initial user and bucket data from seed files.
"""
import logging
import os
import subprocess
import sys
log = logging.getLogger(__name__)
def up(db):
names = db.collection_names()
if "users" in names:
log.info("users collection already created")
return
if "buckets" in names:
log.info("buckets collection already created")
return
invoke = os.path.join(os.path.dirname(sys.executable), "invoke")
subprocess.call([invoke, "load_seed"])
|
Add a migration to load users and buckets
|
Add a migration to load users and buckets
|
Python
|
mit
|
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
|
Add a migration to load users and buckets
|
"""
Load initial user and bucket data from seed files.
"""
import logging
import os
import subprocess
import sys
log = logging.getLogger(__name__)
def up(db):
names = db.collection_names()
if "users" in names:
log.info("users collection already created")
return
if "buckets" in names:
log.info("buckets collection already created")
return
invoke = os.path.join(os.path.dirname(sys.executable), "invoke")
subprocess.call([invoke, "load_seed"])
|
<commit_before><commit_msg>Add a migration to load users and buckets<commit_after>
|
"""
Load initial user and bucket data from seed files.
"""
import logging
import os
import subprocess
import sys
log = logging.getLogger(__name__)
def up(db):
names = db.collection_names()
if "users" in names:
log.info("users collection already created")
return
if "buckets" in names:
log.info("buckets collection already created")
return
invoke = os.path.join(os.path.dirname(sys.executable), "invoke")
subprocess.call([invoke, "load_seed"])
|
Add a migration to load users and buckets"""
Load initial user and bucket data from seed files.
"""
import logging
import os
import subprocess
import sys
log = logging.getLogger(__name__)
def up(db):
names = db.collection_names()
if "users" in names:
log.info("users collection already created")
return
if "buckets" in names:
log.info("buckets collection already created")
return
invoke = os.path.join(os.path.dirname(sys.executable), "invoke")
subprocess.call([invoke, "load_seed"])
|
<commit_before><commit_msg>Add a migration to load users and buckets<commit_after>"""
Load initial user and bucket data from seed files.
"""
import logging
import os
import subprocess
import sys
log = logging.getLogger(__name__)
def up(db):
names = db.collection_names()
if "users" in names:
log.info("users collection already created")
return
if "buckets" in names:
log.info("buckets collection already created")
return
invoke = os.path.join(os.path.dirname(sys.executable), "invoke")
subprocess.call([invoke, "load_seed"])
|
|
99282d42a3948b9ed45b02df657c344667ec0cf2
|
src/ggrc/migrations/versions/20150521125008_324d461206_migrate_directive_sections_to_.py
|
src/ggrc/migrations/versions/20150521125008_324d461206_migrate_directive_sections_to_.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate directive_sections to relationships
Revision ID: 324d461206
Revises: a2fc29a71f3
Create Date: 2015-05-21 12:50:08.987209
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '324d461206'
down_revision = 'a2fc29a71f3'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT dc.modified_by_id, dc.created_at, dc.updated_at,
dc.section_id as source_id, 'Clause' as source_type,
dc.directive_id as destination_id,
IFNULL(d.kind, "Policy") as destination_type,
dc.context_id
FROM directive_sections as dc JOIN directives as d ON dc.directive_id = d.id;
"""
op.execute(sql)
op.drop_constraint(
'directive_sections_ibfk_2',
'directive_sections',
type_='foreignkey')
op.drop_constraint(
'directive_sections_ibfk_3',
'directive_sections',
type_='foreignkey')
def downgrade():
op.create_foreign_key(
'directive_sections_ibfk_2',
'directive_sections',
'sections',
['section_id'],
['id'])
op.create_foreign_key(
'directive_sections_ibfk_3',
'directive_sections',
'directives',
['directive_id'],
['id'])
|
Add a migration for directive_sections -> relationships
|
Add a migration for directive_sections -> relationships
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core
|
Add a migration for directive_sections -> relationships
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate directive_sections to relationships
Revision ID: 324d461206
Revises: a2fc29a71f3
Create Date: 2015-05-21 12:50:08.987209
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '324d461206'
down_revision = 'a2fc29a71f3'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT dc.modified_by_id, dc.created_at, dc.updated_at,
dc.section_id as source_id, 'Clause' as source_type,
dc.directive_id as destination_id,
IFNULL(d.kind, "Policy") as destination_type,
dc.context_id
FROM directive_sections as dc JOIN directives as d ON dc.directive_id = d.id;
"""
op.execute(sql)
op.drop_constraint(
'directive_sections_ibfk_2',
'directive_sections',
type_='foreignkey')
op.drop_constraint(
'directive_sections_ibfk_3',
'directive_sections',
type_='foreignkey')
def downgrade():
op.create_foreign_key(
'directive_sections_ibfk_2',
'directive_sections',
'sections',
['section_id'],
['id'])
op.create_foreign_key(
'directive_sections_ibfk_3',
'directive_sections',
'directives',
['directive_id'],
['id'])
|
<commit_before><commit_msg>Add a migration for directive_sections -> relationships<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate directive_sections to relationships
Revision ID: 324d461206
Revises: a2fc29a71f3
Create Date: 2015-05-21 12:50:08.987209
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '324d461206'
down_revision = 'a2fc29a71f3'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT dc.modified_by_id, dc.created_at, dc.updated_at,
dc.section_id as source_id, 'Clause' as source_type,
dc.directive_id as destination_id,
IFNULL(d.kind, "Policy") as destination_type,
dc.context_id
FROM directive_sections as dc JOIN directives as d ON dc.directive_id = d.id;
"""
op.execute(sql)
op.drop_constraint(
'directive_sections_ibfk_2',
'directive_sections',
type_='foreignkey')
op.drop_constraint(
'directive_sections_ibfk_3',
'directive_sections',
type_='foreignkey')
def downgrade():
op.create_foreign_key(
'directive_sections_ibfk_2',
'directive_sections',
'sections',
['section_id'],
['id'])
op.create_foreign_key(
'directive_sections_ibfk_3',
'directive_sections',
'directives',
['directive_id'],
['id'])
|
Add a migration for directive_sections -> relationships# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate directive_sections to relationships
Revision ID: 324d461206
Revises: a2fc29a71f3
Create Date: 2015-05-21 12:50:08.987209
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '324d461206'
down_revision = 'a2fc29a71f3'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT dc.modified_by_id, dc.created_at, dc.updated_at,
dc.section_id as source_id, 'Clause' as source_type,
dc.directive_id as destination_id,
IFNULL(d.kind, "Policy") as destination_type,
dc.context_id
FROM directive_sections as dc JOIN directives as d ON dc.directive_id = d.id;
"""
op.execute(sql)
op.drop_constraint(
'directive_sections_ibfk_2',
'directive_sections',
type_='foreignkey')
op.drop_constraint(
'directive_sections_ibfk_3',
'directive_sections',
type_='foreignkey')
def downgrade():
op.create_foreign_key(
'directive_sections_ibfk_2',
'directive_sections',
'sections',
['section_id'],
['id'])
op.create_foreign_key(
'directive_sections_ibfk_3',
'directive_sections',
'directives',
['directive_id'],
['id'])
|
<commit_before><commit_msg>Add a migration for directive_sections -> relationships<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate directive_sections to relationships
Revision ID: 324d461206
Revises: a2fc29a71f3
Create Date: 2015-05-21 12:50:08.987209
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '324d461206'
down_revision = 'a2fc29a71f3'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT dc.modified_by_id, dc.created_at, dc.updated_at,
dc.section_id as source_id, 'Clause' as source_type,
dc.directive_id as destination_id,
IFNULL(d.kind, "Policy") as destination_type,
dc.context_id
FROM directive_sections as dc JOIN directives as d ON dc.directive_id = d.id;
"""
op.execute(sql)
op.drop_constraint(
'directive_sections_ibfk_2',
'directive_sections',
type_='foreignkey')
op.drop_constraint(
'directive_sections_ibfk_3',
'directive_sections',
type_='foreignkey')
def downgrade():
op.create_foreign_key(
'directive_sections_ibfk_2',
'directive_sections',
'sections',
['section_id'],
['id'])
op.create_foreign_key(
'directive_sections_ibfk_3',
'directive_sections',
'directives',
['directive_id'],
['id'])
|
|
5b9d9f531e3544f6d3dfe0a2e48dcaaebf132921
|
test/services/appmanager/test_http.py
|
test/services/appmanager/test_http.py
|
import time
import requests
from weavelib.messaging import Receiver
from weavelib.rpc import RPCServer, ServerAPI
from weavelib.services import BaseService
from weaveserver.core.services import ServiceManager
from weaveserver.services.appmanager import ApplicationService
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr"
},
"auth2": {
"appid": "appid2",
"package": "p"
}
}
class DummyService(BaseService):
def __init__(self, token):
super(DummyService, self).__init__(token)
self.rpc_server = RPCServer("name", "desc", [
ServerAPI("api1", "desc2", [], self.api1),
], self)
def api1(self):
return "OK"
def on_service_start(self):
self.rpc_server.start()
def on_service_stop(self):
self.rpc_server.stop()
class TestApplicationService(object):
def setup_class(cls):
cls.service_manager = ServiceManager()
cls.service_manager.apps = AUTH
cls.service_manager.start_services(["messaging"])
cls.appmgr = ApplicationService("auth1", {"apps": AUTH})
cls.appmgr.exited.set()
cls.appmgr.on_service_start()
# Wait till it starts.
receiver = Receiver("/_system/root_rpc/request")
while True:
try:
receiver.start()
break
except:
time.sleep(1)
def teardown_class(cls):
cls.service_manager.stop()
cls.appmgr.on_service_stop()
def setup_method(self):
self.dummy_service = DummyService("auth2")
self.dummy_service.service_start()
def teardown_method(self):
self.dummy_service.service_stop()
def test_http_rpc(self):
obj = {
"package_name": "p",
"rpc_name": "name",
"api_name": "api1",
"args": [],
"kwargs": {}
}
url = "http://localhost:5000/api/rpc"
for _ in range(1):
res = requests.post(url, json=obj).json()
assert res == "OK"
|
Test case for RPC HTTP handler.
|
Test case for RPC HTTP handler.
|
Python
|
mit
|
supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer
|
Test case for RPC HTTP handler.
|
import time
import requests
from weavelib.messaging import Receiver
from weavelib.rpc import RPCServer, ServerAPI
from weavelib.services import BaseService
from weaveserver.core.services import ServiceManager
from weaveserver.services.appmanager import ApplicationService
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr"
},
"auth2": {
"appid": "appid2",
"package": "p"
}
}
class DummyService(BaseService):
def __init__(self, token):
super(DummyService, self).__init__(token)
self.rpc_server = RPCServer("name", "desc", [
ServerAPI("api1", "desc2", [], self.api1),
], self)
def api1(self):
return "OK"
def on_service_start(self):
self.rpc_server.start()
def on_service_stop(self):
self.rpc_server.stop()
class TestApplicationService(object):
def setup_class(cls):
cls.service_manager = ServiceManager()
cls.service_manager.apps = AUTH
cls.service_manager.start_services(["messaging"])
cls.appmgr = ApplicationService("auth1", {"apps": AUTH})
cls.appmgr.exited.set()
cls.appmgr.on_service_start()
# Wait till it starts.
receiver = Receiver("/_system/root_rpc/request")
while True:
try:
receiver.start()
break
except:
time.sleep(1)
def teardown_class(cls):
cls.service_manager.stop()
cls.appmgr.on_service_stop()
def setup_method(self):
self.dummy_service = DummyService("auth2")
self.dummy_service.service_start()
def teardown_method(self):
self.dummy_service.service_stop()
def test_http_rpc(self):
obj = {
"package_name": "p",
"rpc_name": "name",
"api_name": "api1",
"args": [],
"kwargs": {}
}
url = "http://localhost:5000/api/rpc"
for _ in range(1):
res = requests.post(url, json=obj).json()
assert res == "OK"
|
<commit_before><commit_msg>Test case for RPC HTTP handler.<commit_after>
|
import time
import requests
from weavelib.messaging import Receiver
from weavelib.rpc import RPCServer, ServerAPI
from weavelib.services import BaseService
from weaveserver.core.services import ServiceManager
from weaveserver.services.appmanager import ApplicationService
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr"
},
"auth2": {
"appid": "appid2",
"package": "p"
}
}
class DummyService(BaseService):
def __init__(self, token):
super(DummyService, self).__init__(token)
self.rpc_server = RPCServer("name", "desc", [
ServerAPI("api1", "desc2", [], self.api1),
], self)
def api1(self):
return "OK"
def on_service_start(self):
self.rpc_server.start()
def on_service_stop(self):
self.rpc_server.stop()
class TestApplicationService(object):
def setup_class(cls):
cls.service_manager = ServiceManager()
cls.service_manager.apps = AUTH
cls.service_manager.start_services(["messaging"])
cls.appmgr = ApplicationService("auth1", {"apps": AUTH})
cls.appmgr.exited.set()
cls.appmgr.on_service_start()
# Wait till it starts.
receiver = Receiver("/_system/root_rpc/request")
while True:
try:
receiver.start()
break
except:
time.sleep(1)
def teardown_class(cls):
cls.service_manager.stop()
cls.appmgr.on_service_stop()
def setup_method(self):
self.dummy_service = DummyService("auth2")
self.dummy_service.service_start()
def teardown_method(self):
self.dummy_service.service_stop()
def test_http_rpc(self):
obj = {
"package_name": "p",
"rpc_name": "name",
"api_name": "api1",
"args": [],
"kwargs": {}
}
url = "http://localhost:5000/api/rpc"
for _ in range(1):
res = requests.post(url, json=obj).json()
assert res == "OK"
|
Test case for RPC HTTP handler.import time
import requests
from weavelib.messaging import Receiver
from weavelib.rpc import RPCServer, ServerAPI
from weavelib.services import BaseService
from weaveserver.core.services import ServiceManager
from weaveserver.services.appmanager import ApplicationService
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr"
},
"auth2": {
"appid": "appid2",
"package": "p"
}
}
class DummyService(BaseService):
def __init__(self, token):
super(DummyService, self).__init__(token)
self.rpc_server = RPCServer("name", "desc", [
ServerAPI("api1", "desc2", [], self.api1),
], self)
def api1(self):
return "OK"
def on_service_start(self):
self.rpc_server.start()
def on_service_stop(self):
self.rpc_server.stop()
class TestApplicationService(object):
def setup_class(cls):
cls.service_manager = ServiceManager()
cls.service_manager.apps = AUTH
cls.service_manager.start_services(["messaging"])
cls.appmgr = ApplicationService("auth1", {"apps": AUTH})
cls.appmgr.exited.set()
cls.appmgr.on_service_start()
# Wait till it starts.
receiver = Receiver("/_system/root_rpc/request")
while True:
try:
receiver.start()
break
except:
time.sleep(1)
def teardown_class(cls):
cls.service_manager.stop()
cls.appmgr.on_service_stop()
def setup_method(self):
self.dummy_service = DummyService("auth2")
self.dummy_service.service_start()
def teardown_method(self):
self.dummy_service.service_stop()
def test_http_rpc(self):
obj = {
"package_name": "p",
"rpc_name": "name",
"api_name": "api1",
"args": [],
"kwargs": {}
}
url = "http://localhost:5000/api/rpc"
for _ in range(1):
res = requests.post(url, json=obj).json()
assert res == "OK"
|
<commit_before><commit_msg>Test case for RPC HTTP handler.<commit_after>import time
import requests
from weavelib.messaging import Receiver
from weavelib.rpc import RPCServer, ServerAPI
from weavelib.services import BaseService
from weaveserver.core.services import ServiceManager
from weaveserver.services.appmanager import ApplicationService
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr"
},
"auth2": {
"appid": "appid2",
"package": "p"
}
}
class DummyService(BaseService):
def __init__(self, token):
super(DummyService, self).__init__(token)
self.rpc_server = RPCServer("name", "desc", [
ServerAPI("api1", "desc2", [], self.api1),
], self)
def api1(self):
return "OK"
def on_service_start(self):
self.rpc_server.start()
def on_service_stop(self):
self.rpc_server.stop()
class TestApplicationService(object):
def setup_class(cls):
cls.service_manager = ServiceManager()
cls.service_manager.apps = AUTH
cls.service_manager.start_services(["messaging"])
cls.appmgr = ApplicationService("auth1", {"apps": AUTH})
cls.appmgr.exited.set()
cls.appmgr.on_service_start()
# Wait till it starts.
receiver = Receiver("/_system/root_rpc/request")
while True:
try:
receiver.start()
break
except:
time.sleep(1)
def teardown_class(cls):
cls.service_manager.stop()
cls.appmgr.on_service_stop()
def setup_method(self):
self.dummy_service = DummyService("auth2")
self.dummy_service.service_start()
def teardown_method(self):
self.dummy_service.service_stop()
def test_http_rpc(self):
obj = {
"package_name": "p",
"rpc_name": "name",
"api_name": "api1",
"args": [],
"kwargs": {}
}
url = "http://localhost:5000/api/rpc"
for _ in range(1):
res = requests.post(url, json=obj).json()
assert res == "OK"
|
|
8ba5b29200520d853791943341d41798ff80a248
|
src/repository/migrations/0003_auto_20170524_1503.py
|
src/repository/migrations/0003_auto_20170524_1503.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
Change meta option for Github
|
Change meta option for Github
|
Python
|
bsd-3-clause
|
lozadaOmr/ansible-admin,lozadaOmr/ansible-admin,lozadaOmr/ansible-admin
|
Change meta option for Github
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
<commit_before><commit_msg>Change meta option for Github<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
Change meta option for Github# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
<commit_before><commit_msg>Change meta option for Github<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-24 15:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0002_auto_20170522_2021'),
]
operations = [
migrations.AlterModelOptions(
name='github',
options={'verbose_name': 'github project', 'verbose_name_plural': 'github projects'},
),
]
|
|
21a0948eb1d25e9126e2940cbc7d0496181d6a93
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
Add Django version trove classifiers.
|
Add Django version trove classifiers.
|
Python
|
bsd-3-clause
|
grzes/djangae,asendecka/djangae,grzes/djangae,kirberich/djangae,asendecka/djangae,kirberich/djangae,armirusco/djangae,grzes/djangae,armirusco/djangae,potatolondon/djangae,chargrizzle/djangae,potatolondon/djangae,armirusco/djangae,kirberich/djangae,asendecka/djangae,chargrizzle/djangae,chargrizzle/djangae
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
Add Django version trove classifiers.
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
<commit_before>import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
<commit_msg>Add Django version trove classifiers.<commit_after>
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
Add Django version trove classifiers.import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
<commit_before>import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
<commit_msg>Add Django version trove classifiers.<commit_after>import os
from setuptools import setup, find_packages
NAME = 'djangae'
PACKAGES = find_packages()
DESCRIPTION = 'Django integration with Google App Engine'
URL = "https://github.com/potatolondon/djangae"
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
AUTHOR = 'Potato London Ltd.'
EXTRAS = {
"test": ["webtest"],
}
setup(
name=NAME,
version='0.9.1',
packages=PACKAGES,
# metadata for upload to PyPI
author=AUTHOR,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=["django", "Google App Engine", "GAE"],
url=URL,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
include_package_data=True,
# dependencies
extras_require=EXTRAS,
tests_require=EXTRAS['test'],
)
|
c45ccd0f258fcbb152ffa9597ceb1bacd472f73b
|
web/impact/impact/tests/test_impact_email_backend.py
|
web/impact/impact/tests/test_impact_email_backend.py
|
from mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
Add test for email backend coverage
|
[AC-7570] Add test for email backend coverage
|
Python
|
mit
|
masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api
|
[AC-7570] Add test for email backend coverage
|
from mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
<commit_before><commit_msg>[AC-7570] Add test for email backend coverage<commit_after>
|
from mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
[AC-7570] Add test for email backend coveragefrom mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
<commit_before><commit_msg>[AC-7570] Add test for email backend coverage<commit_after>from mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.