hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67a2c1bfd6641e9143048cbf8bbb0916508a6035 | 318 | py | Python | IO2.py | vitorgt/SCC0210 | f7689be1b72cbf3066ca051faef995f22f446d90 | [
"MIT"
] | null | null | null | IO2.py | vitorgt/SCC0210 | f7689be1b72cbf3066ca051faef995f22f446d90 | [
"MIT"
] | null | null | null | IO2.py | vitorgt/SCC0210 | f7689be1b72cbf3066ca051faef995f22f446d90 | [
"MIT"
] | null | null | null | #reads each line and adds the entire document
s = 0
try:
while(True):
line = input()
if line != "":
line = (" ".join(line.split())).split(' ')
for i in range(0,len(line)):
s += int(line[i])
else:
break
except EOFError:
pass
print(s)
| 19.875 | 54 | 0.468553 | #reads each line and adds the entire document
s = 0
try:
while(True):
line = input()
if line != "":
line = (" ".join(line.split())).split(' ')
for i in range(0,len(line)):
s += int(line[i])
else:
break
except EOFError:
pass
print(s)
| 0 | 0 | 0 |
2205e24fdb38333d09c2048c1cb19b0a45a22485 | 2,501 | py | Python | doc/integrations/pytorch/parlai/scripts/vacuum.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2020-09-27T05:00:06.000Z | 2020-09-27T05:00:06.000Z | doc/integrations/pytorch/parlai/scripts/vacuum.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-08-04T11:17:39.000Z | 2021-08-04T11:17:39.000Z | doc/integrations/pytorch/parlai/scripts/vacuum.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-05-03T13:27:14.000Z | 2021-05-03T13:27:14.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Reduces the size of a model file by stripping the optimizer.
Assumes we are working with a TorchAgent
"""
import os
import torch
from parlai.core.params import ParlaiParser
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.torch import atomic_save
from parlai.utils.io import PathManager
import parlai.utils.pickle
import parlai.utils.logging as logging
@register_script("vacuum", hidden=True)
| 33.797297 | 86 | 0.594162 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Reduces the size of a model file by stripping the optimizer.
Assumes we are working with a TorchAgent
"""
import os
import torch
from parlai.core.params import ParlaiParser
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.torch import atomic_save
from parlai.utils.io import PathManager
import parlai.utils.pickle
import parlai.utils.logging as logging
@register_script("vacuum", hidden=True)
class Vacuum(ParlaiScript):
@classmethod
def setup_args(cls):
parser = ParlaiParser(
False, False, description='Shrink a model file for release.'
)
parser.add_argument(
# dest is intentionally not model_file so the parlai parser doesn't
# add extra opts to it
'-mf',
'--model-file',
dest='path',
help="Path to model file.",
)
parser.add_argument(
'--no-backup', action='store_true', help="Do not create a backup."
)
return parser
def run(self):
self.opt.log()
model_file = self.opt['path']
if not model_file:
raise RuntimeError('--model-file argument is required')
if not os.path.isfile(model_file):
raise RuntimeError(f"'{model_file}' does not exist")
logging.info(f"Loading {model_file}")
with PathManager.open(model_file, 'rb') as f:
states = torch.load(
f, map_location=lambda cpu, _: cpu, pickle_module=parlai.utils.pickle
)
if not self.opt['no_backup']:
logging.info(f"Backing up {model_file} to {model_file}.unvacuumed")
os.rename(model_file, model_file + ".unvacuumed")
for key in [
'optimizer',
'optimizer_type',
'lr_scheduler',
'lr_scheduler_type',
'warmup_scheduler',
'number_training_updates',
]:
if key in states:
logging.info(f"Deleting key {key}")
del states[key]
keys = ", ".join(states.keys())
logging.info(f"Remaining keys: {keys}")
logging.info(f"Saving to {model_file}")
atomic_save(states, model_file)
| 1,742 | 81 | 22 |
eb9e5d820783831711eed8cb2948ef4b0129eea3 | 349 | py | Python | scripts/management/commands/uncheck_infomail_for_all_guests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | scripts/management/commands/uncheck_infomail_for_all_guests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | scripts/management/commands/uncheck_infomail_for_all_guests.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from apps.authentication.models import OnlineUser as User
| 24.928571 | 57 | 0.664756 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from apps.authentication.models import OnlineUser as User
class Command(BaseCommand):
def handle(self, *args, **kwargs):
guests = User.objects.filter(field_of_study=0)
for guest in guests:
guest.infomail = False
guest.save()
| 157 | 6 | 49 |
c98684ffeb7fb21a1987fe04102295cc3242f2c9 | 1,557 | py | Python | ExampleSingleInterferogram.py | rdrews-dev/GPRI_Processing | 3066204177fb5f8739815dc21d1b6be620e34798 | [
"MIT"
] | 1 | 2021-11-25T08:57:23.000Z | 2021-11-25T08:57:23.000Z | ExampleSingleInterferogram.py | rdrews-dev/GPRI_Processing | 3066204177fb5f8739815dc21d1b6be620e34798 | [
"MIT"
] | null | null | null | ExampleSingleInterferogram.py | rdrews-dev/GPRI_Processing | 3066204177fb5f8739815dc21d1b6be620e34798 | [
"MIT"
] | null | null | null | import os
import numpy as np
from datetime import date, datetime, timedelta
from GPRIBatchProcessFunctions import *
import pickle
import time
import pylab as plt
## In Python3 Shell: exec(open('Main.py').read())
main()
| 33.847826 | 134 | 0.712909 | import os
import numpy as np
from datetime import date, datetime, timedelta
from GPRIBatchProcessFunctions import *
import pickle
import time
import pylab as plt
## In Python3 Shell: exec(open('Main.py').read())
def main():
## All Folderst with "/" at the end
Root = '../proc2/'
#RootDirectoryWithSlcFolders = f'/esd/esd02/data/radar_data_vol1/Switzerland/Lauterbrunne/201207/20120730LB02_Final/slc/20120730/'
RootDirectoryWithSlcFolders = f'/esd/esd02/data/radar_data_vol1/Switzerland/Lauterbrunnen/201202_201207/'
IntProcFolder = f'{Root}/ints/'
GammaConfigFileFullPath = 'GAMMA_config4Python.py'
## Load Processing Paramters from GammaConfigFile
GetProcessingParametersFromFile(GammaConfigFileFullPath)
# Get SLC Structure with lower antenna removed
SlcStructure = GetSlcStructure(RootDirectoryWithSlcFolders)
SlcStructure = RemoveLowerAntenna(SlcStructure)
##Print contents of SLC structure to get a feel for it
for i, x in enumerate(SlcStructure["SlcDate"]):
print(f'This is the SLC date: {i} -- {x}')
#
#Get Master-rectified SLC for output tifs as background:
GetMasterRecMli(SlcStructure["SlcFullPath"][1])
#
# ## Get and Int Structure:
# ##-------------------------------------------------------
IntStructure = SetupIntStructure(IntProcFolder)
## We don't care about temporal Baselin here. Choose your Index1 and Index2 wisely
IntStructure = AddToIntStructure(IntStructure,SlcStructure,0,1)
GetInterferogramm(IntStructure,0,1,0,0)
main()
| 1,309 | 0 | 23 |
d827b2c87f325f7feeece5d7c6c8c54572b0024a | 13,500 | py | Python | tests/make_summary.py | Dexterp37/rappor | ed69c7020cf354d216d51a4bf4d211e1da2bb7bd | [
"Apache-2.0"
] | null | null | null | tests/make_summary.py | Dexterp37/rappor | ed69c7020cf354d216d51a4bf4d211e1da2bb7bd | [
"Apache-2.0"
] | null | null | null | tests/make_summary.py | Dexterp37/rappor | ed69c7020cf354d216d51a4bf4d211e1da2bb7bd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""Given a regtest result tree, prints an HTML summary to a file.
See HTML skeleton in tests/regtest.html.
"""
import os
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
SUMMARY_ROW = """\
<tfoot style="font-weight: bold; text-align: right">
<tr>
<td>
%(name)s
</td>
<!-- input params -->
<td></td>
<td></td>
<td></td>
<td></td>
<!-- RAPPOR params -->
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<!-- MAP params -->
<td></td>
<td></td>
<!-- Result metrics -->
<td></td>
<td></td>
<td>%(mean_fpr)s</td>
<td>%(mean_fnr)s</td>
<td>%(mean_tv)s</td>
<td>%(mean_am)s</td>
<td>%(mean_time)s</td>
</tr>
</tfoot>
"""
# Navigation and links to plot.
DETAILS = """\
<p style="text-align: right">
<a href="#top">Up</a>
</p>
<a id="%(anchor)s"></a>
<p style="text-align: center">
<img src="%(instance_dir)s/dist.png"/>
</p>
<p>
<a href="%(instance_dir)s">%(name)s files</a>
</p>
"""
# Plots comparing simulations
PLOTS = """ \
<h2>Plots</h2>
<h3 style="text-align: center">Total variation distance</h3>
<p style="text-align: center">
<img src="plots/tv.png"/>
</p>
<h3 style="text-align: center">False negative rate</h3>
<p style="text-align: center">
<img src="plots/fnr.png"/>
</p>
<h3 style="text-align: center">False positive rate</h3>
<p style="text-align: center">
<img src="plots/fpr.png"/>
</p>
<h3 style="text-align: center">Allocated mass</h3>
<p style="text-align: center">
<img src="plots/am.png"/>
</p>
<h3 style="text-align: center">Time</h3>
<p style="text-align: center">
<img src="plots/time.png"/>
</p>
"""
def FormatFloat(x, percent):
"""Formats a floating-point number."""
if percent:
return '{:.1f}%'.format(x * 100.0)
else:
return '{:.3f}'.format(x)
def FormatMeanWithSem(m_std_error, percent=False):
"""Formats an estimate with standard error."""
if m_std_error is None:
return ''
m, std_error = m_std_error
if std_error is None:
return FormatFloat(m, percent)
else:
return '{}±{}'.format(
FormatFloat(m, percent),
FormatFloat(std_error, percent))
def Mean(l):
"""Computes the mean (average) for a list of numbers."""
if l:
return float(sum(l)) / len(l)
else:
return None
def SampleVar(l):
"""Computes the sample variance for a list of numbers."""
if len(l) > 1:
mean = Mean(l)
var = sum([(x - mean) ** 2 for x in l]) / (len(l) - 1)
return var
else:
return None
def StandardErrorEstimate(l):
"""Returns the standard error estimate for a list of numbers.
For a singleton the standard error is assumed to be 10% of its value.
"""
if len(l) > 1:
return (SampleVar(l) / len(l)) ** .5
elif l:
return l[0] / 10.0
else:
return None
def MeanOfMeans(dict_of_lists):
"""Returns the average of averages with the standard error of the estimate.
"""
means = [Mean(dict_of_lists[key]) for key in dict_of_lists
if dict_of_lists[key]]
if means:
# Compute variances of the estimate for each sublist.
se = [StandardErrorEstimate(dict_of_lists[key]) ** 2 for key
in dict_of_lists if dict_of_lists[key]]
return (Mean(means), # Mean over all sublists
sum(se) ** .5 / len(se)) # Standard deviation of the mean
else:
return None
def ParseSpecFile(spec_filename):
"""Parses the spec (parameters) file.
Returns:
An integer and a string. The integer is the number of bogus candidates
and the string is parameters in the HTML format.
"""
with open(spec_filename) as s:
spec_row = s.readline().split()
# Second to last column is 'num_additional' -- the number of bogus
# candidates added
num_additional = int(spec_row[-2])
spec_in_html = ' '.join('<td>%s</td>' % cell for cell in spec_row[1:])
return num_additional, spec_in_html
def ExtractTime(log_filename):
"""Extracts the elapsed time information from the log file.
Returns:
Elapsed time (in seconds) or None in case of failure.
"""
if os.path.isfile(log_filename):
with open(log_filename) as log:
log_str = log.read()
# Matching a line output by analyze.R.
match = re.search(r'Inference took ([0-9.]+) seconds', log_str)
if match:
return float(match.group(1))
return None
def ParseMetrics(metrics_file, log_file, num_additional):
"""Processes the metrics file.
Args:
metrics_file: name of the metrics file
log_file: name of the log.txt file
num_additional: A number of bogus candidates added to the candidate list.
Returns a pair:
- A dictionary of metrics (some can be []).
- An HTML-formatted portion of the report row.
"""
if not os.path.isfile(metrics_file):
metrics_row_str = ['', '', '', '', '', '']
metrics_row_dict = {}
else:
with open(metrics_file) as m:
m.readline()
metrics_row = m.readline().split(',')
(num_actual, num_rappor, num_false_pos, num_false_neg, total_variation,
allocated_mass) = metrics_row
num_actual = int(num_actual)
num_rappor = int(num_rappor)
num_false_pos = int(num_false_pos)
num_false_neg = int(num_false_neg)
total_variation = float(total_variation)
allocated_mass = float(allocated_mass)
# e.g. if there are 20 additional candidates added, and 1 false positive,
# the false positive rate is 5%.
fp_rate = float(num_false_pos) / num_additional if num_additional else 0
# e.g. if there are 100 strings in the true input, and 80 strings
# detected by RAPPOR, then we have 20 false negatives, and a false
# negative rate of 20%.
fn_rate = float(num_false_neg) / num_actual
metrics_row_str = [
str(num_actual),
str(num_rappor),
'%.1f%% (%d)' % (fp_rate * 100, num_false_pos) if num_additional
else '',
'%.1f%% (%d)' % (fn_rate * 100, num_false_neg),
'%.3f' % total_variation,
'%.3f' % allocated_mass,
]
metrics_row_dict = {
'tv': [total_variation],
'fpr': [fp_rate] if num_additional else [],
'fnr': [fn_rate],
'am': [allocated_mass],
}
elapsed_time = ExtractTime(log_file)
if elapsed_time is not None:
metrics_row_str = metrics_row_str + ['%.2f' % elapsed_time]
metrics_row_dict['time'] = [elapsed_time]
# return metrics formatted as HTML table entries
return (metrics_row_dict,
' '.join('<td>%s</td>' % cell for cell in metrics_row_str))
def FormatCell1(test_case, test_instance, metrics_file, log_file, plot_file,
link_to_plots):
"""Outputs an HTML table entry for the first cell of the row.
The row is filled if the metrics file exist. The first cell contains a link
that for short tables points to a plot file inline, for large tables to an
external file.
If the metrics file is missing, the link points to the log file (if one
exists)
"""
relpath_report = '{}/{}_report'.format(test_case, test_instance)
if os.path.isfile(metrics_file):
external_file = plot_file
if link_to_plots:
link = '#{}_{}'.format(test_case, test_instance) # anchor
else:
link = os.path.join(relpath_report, 'dist.png')
else: # no results likely due to an error, puts a link to the log file
external_file = log_file
link = os.path.join(relpath_report, 'log.txt')
if os.path.isfile(external_file):
return '<td><a href="{}">{}</a></td>'.format(link, test_case)
else: # if no file to link to
return '<td>{}</td>'.format(test_case)
def FormatSummaryRow(metrics_lists):
"""Outputs an HTML-formatted summary row."""
means_with_sem = {} # SEM - standard error of the mean
for key in metrics_lists:
means_with_sem[key] = MeanOfMeans(metrics_lists[key])
# If none of the lists is longer than one element, drop the SEM component.
if means_with_sem[key] and max([len(l) for l in metrics_lists[key]]) < 2:
means_with_sem[key] = [means_with_sem[key][0], None]
summary = {
'name': 'Means',
'mean_fpr': FormatMeanWithSem(means_with_sem['fpr'], percent=True),
'mean_fnr': FormatMeanWithSem(means_with_sem['fnr'], percent=True),
'mean_tv': FormatMeanWithSem(means_with_sem['tv'], percent=True),
'mean_am': FormatMeanWithSem(means_with_sem['am'], percent=True),
'mean_time': FormatMeanWithSem(means_with_sem['time']),
}
return SUMMARY_ROW % summary
def FormatPlots(base_dir, test_instances):
"""Outputs HTML-formatted plots."""
result = ''
for instance in test_instances:
# A test instance is identified by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
instance_dir = test_case + '/' + test_instance + '_report'
if os.path.isfile(os.path.join(base_dir, instance_dir, 'dist.png')):
result += DETAILS % {'anchor': test_case + '_' + test_instance,
'name': '{} (instance {})'.format(test_case,
test_instance),
'instance_dir': instance_dir}
return result
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError, e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
| 28.723404 | 78 | 0.644 | #!/usr/bin/python
"""Given a regtest result tree, prints an HTML summary to a file.
See HTML skeleton in tests/regtest.html.
"""
import os
import re
import sys
import numpy as np
import matplotlib.pyplot as plt
SUMMARY_ROW = """\
<tfoot style="font-weight: bold; text-align: right">
<tr>
<td>
%(name)s
</td>
<!-- input params -->
<td></td>
<td></td>
<td></td>
<td></td>
<!-- RAPPOR params -->
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<!-- MAP params -->
<td></td>
<td></td>
<!-- Result metrics -->
<td></td>
<td></td>
<td>%(mean_fpr)s</td>
<td>%(mean_fnr)s</td>
<td>%(mean_tv)s</td>
<td>%(mean_am)s</td>
<td>%(mean_time)s</td>
</tr>
</tfoot>
"""
# Navigation and links to plot.
DETAILS = """\
<p style="text-align: right">
<a href="#top">Up</a>
</p>
<a id="%(anchor)s"></a>
<p style="text-align: center">
<img src="%(instance_dir)s/dist.png"/>
</p>
<p>
<a href="%(instance_dir)s">%(name)s files</a>
</p>
"""
# Plots comparing simulations
PLOTS = """ \
<h2>Plots</h2>
<h3 style="text-align: center">Total variation distance</h3>
<p style="text-align: center">
<img src="plots/tv.png"/>
</p>
<h3 style="text-align: center">False negative rate</h3>
<p style="text-align: center">
<img src="plots/fnr.png"/>
</p>
<h3 style="text-align: center">False positive rate</h3>
<p style="text-align: center">
<img src="plots/fpr.png"/>
</p>
<h3 style="text-align: center">Allocated mass</h3>
<p style="text-align: center">
<img src="plots/am.png"/>
</p>
<h3 style="text-align: center">Time</h3>
<p style="text-align: center">
<img src="plots/time.png"/>
</p>
"""
def FormatFloat(x, percent):
"""Formats a floating-point number."""
if percent:
return '{:.1f}%'.format(x * 100.0)
else:
return '{:.3f}'.format(x)
def FormatMeanWithSem(m_std_error, percent=False):
"""Formats an estimate with standard error."""
if m_std_error is None:
return ''
m, std_error = m_std_error
if std_error is None:
return FormatFloat(m, percent)
else:
return '{}±{}'.format(
FormatFloat(m, percent),
FormatFloat(std_error, percent))
def Mean(l):
"""Computes the mean (average) for a list of numbers."""
if l:
return float(sum(l)) / len(l)
else:
return None
def SampleVar(l):
"""Computes the sample variance for a list of numbers."""
if len(l) > 1:
mean = Mean(l)
var = sum([(x - mean) ** 2 for x in l]) / (len(l) - 1)
return var
else:
return None
def StandardErrorEstimate(l):
"""Returns the standard error estimate for a list of numbers.
For a singleton the standard error is assumed to be 10% of its value.
"""
if len(l) > 1:
return (SampleVar(l) / len(l)) ** .5
elif l:
return l[0] / 10.0
else:
return None
def MeanOfMeans(dict_of_lists):
"""Returns the average of averages with the standard error of the estimate.
"""
means = [Mean(dict_of_lists[key]) for key in dict_of_lists
if dict_of_lists[key]]
if means:
# Compute variances of the estimate for each sublist.
se = [StandardErrorEstimate(dict_of_lists[key]) ** 2 for key
in dict_of_lists if dict_of_lists[key]]
return (Mean(means), # Mean over all sublists
sum(se) ** .5 / len(se)) # Standard deviation of the mean
else:
return None
def ParseSpecFile(spec_filename):
"""Parses the spec (parameters) file.
Returns:
An integer and a string. The integer is the number of bogus candidates
and the string is parameters in the HTML format.
"""
with open(spec_filename) as s:
spec_row = s.readline().split()
# Second to last column is 'num_additional' -- the number of bogus
# candidates added
num_additional = int(spec_row[-2])
spec_in_html = ' '.join('<td>%s</td>' % cell for cell in spec_row[1:])
return num_additional, spec_in_html
def ExtractTime(log_filename):
"""Extracts the elapsed time information from the log file.
Returns:
Elapsed time (in seconds) or None in case of failure.
"""
if os.path.isfile(log_filename):
with open(log_filename) as log:
log_str = log.read()
# Matching a line output by analyze.R.
match = re.search(r'Inference took ([0-9.]+) seconds', log_str)
if match:
return float(match.group(1))
return None
def ParseMetrics(metrics_file, log_file, num_additional):
"""Processes the metrics file.
Args:
metrics_file: name of the metrics file
log_file: name of the log.txt file
num_additional: A number of bogus candidates added to the candidate list.
Returns a pair:
- A dictionary of metrics (some can be []).
- An HTML-formatted portion of the report row.
"""
if not os.path.isfile(metrics_file):
metrics_row_str = ['', '', '', '', '', '']
metrics_row_dict = {}
else:
with open(metrics_file) as m:
m.readline()
metrics_row = m.readline().split(',')
(num_actual, num_rappor, num_false_pos, num_false_neg, total_variation,
allocated_mass) = metrics_row
num_actual = int(num_actual)
num_rappor = int(num_rappor)
num_false_pos = int(num_false_pos)
num_false_neg = int(num_false_neg)
total_variation = float(total_variation)
allocated_mass = float(allocated_mass)
# e.g. if there are 20 additional candidates added, and 1 false positive,
# the false positive rate is 5%.
fp_rate = float(num_false_pos) / num_additional if num_additional else 0
# e.g. if there are 100 strings in the true input, and 80 strings
# detected by RAPPOR, then we have 20 false negatives, and a false
# negative rate of 20%.
fn_rate = float(num_false_neg) / num_actual
metrics_row_str = [
str(num_actual),
str(num_rappor),
'%.1f%% (%d)' % (fp_rate * 100, num_false_pos) if num_additional
else '',
'%.1f%% (%d)' % (fn_rate * 100, num_false_neg),
'%.3f' % total_variation,
'%.3f' % allocated_mass,
]
metrics_row_dict = {
'tv': [total_variation],
'fpr': [fp_rate] if num_additional else [],
'fnr': [fn_rate],
'am': [allocated_mass],
}
elapsed_time = ExtractTime(log_file)
if elapsed_time is not None:
metrics_row_str = metrics_row_str + ['%.2f' % elapsed_time]
metrics_row_dict['time'] = [elapsed_time]
# return metrics formatted as HTML table entries
return (metrics_row_dict,
' '.join('<td>%s</td>' % cell for cell in metrics_row_str))
def FormatCell1(test_case, test_instance, metrics_file, log_file, plot_file,
link_to_plots):
"""Outputs an HTML table entry for the first cell of the row.
The row is filled if the metrics file exist. The first cell contains a link
that for short tables points to a plot file inline, for large tables to an
external file.
If the metrics file is missing, the link points to the log file (if one
exists)
"""
relpath_report = '{}/{}_report'.format(test_case, test_instance)
if os.path.isfile(metrics_file):
external_file = plot_file
if link_to_plots:
link = '#{}_{}'.format(test_case, test_instance) # anchor
else:
link = os.path.join(relpath_report, 'dist.png')
else: # no results likely due to an error, puts a link to the log file
external_file = log_file
link = os.path.join(relpath_report, 'log.txt')
if os.path.isfile(external_file):
return '<td><a href="{}">{}</a></td>'.format(link, test_case)
else: # if no file to link to
return '<td>{}</td>'.format(test_case)
def plots(metrics_lists, base_dir):
for k, v in metrics_lists.iteritems():
_makeplot(k, v, base_dir)
def _makeplot(title, values, base_dir):
plt.figure()
plt.title(title)
vals = []
x_legend = values.keys()
x_legend.sort()
for k in x_legend:
vals.append(np.mean(values[k]))
x = range(len(vals))
fig, ax = plt.subplots(1)
fig.autofmt_xdate()
plt.xticks(x, x_legend)
plt.xlabel('Simulation')
plt.ylabel('Value')
plt.grid(True)
plt.tight_layout()
plt.plot(x, vals)
if not os.path.exists('{}/plots'.format(base_dir)):
os.makedirs('{}/plots'.format(base_dir))
plt.savefig('{}/plots/{}.png'.format(base_dir, title))
def FormatSummaryRow(metrics_lists):
"""Outputs an HTML-formatted summary row."""
means_with_sem = {} # SEM - standard error of the mean
for key in metrics_lists:
means_with_sem[key] = MeanOfMeans(metrics_lists[key])
# If none of the lists is longer than one element, drop the SEM component.
if means_with_sem[key] and max([len(l) for l in metrics_lists[key]]) < 2:
means_with_sem[key] = [means_with_sem[key][0], None]
summary = {
'name': 'Means',
'mean_fpr': FormatMeanWithSem(means_with_sem['fpr'], percent=True),
'mean_fnr': FormatMeanWithSem(means_with_sem['fnr'], percent=True),
'mean_tv': FormatMeanWithSem(means_with_sem['tv'], percent=True),
'mean_am': FormatMeanWithSem(means_with_sem['am'], percent=True),
'mean_time': FormatMeanWithSem(means_with_sem['time']),
}
return SUMMARY_ROW % summary
def FormatPlots(base_dir, test_instances):
"""Outputs HTML-formatted plots."""
result = ''
for instance in test_instances:
# A test instance is identified by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
instance_dir = test_case + '/' + test_instance + '_report'
if os.path.isfile(os.path.join(base_dir, instance_dir, 'dist.png')):
result += DETAILS % {'anchor': test_case + '_' + test_instance,
'name': '{} (instance {})'.format(test_case,
test_instance),
'instance_dir': instance_dir}
return result
def main(argv):
base_dir = argv[1]
output_file = open(argv[2], 'w')
# This file has the test case names, in the order that they should be
# displayed.
instances_file = os.path.join(base_dir, 'test-instances.txt')
if not os.path.isfile(instances_file):
raise RuntimeError('{} is missing'.format(instances_file))
with open(instances_file) as f:
test_instances = [line.strip() for line in f]
# Metrics are assembled into a dictionary of dictionaries. The top-level
# key is the metric name ('tv', 'fpr', etc.), the second level key is
# the test case. These keys reference a list of floats, which can be empty.
metrics = {
'tv': {}, # total_variation for all test cases
'fpr': {}, # dictionary of false positive rates
'fnr': {}, # dictionary of false negative rates
'am': {}, # dictionary of total allocated masses
'time': {}, # dictionary of total elapsed time measurements
}
# If there are too many tests, the plots are not included in the results
# file. Instead, rows' names are links to the corresponding .png files.
include_plots = len(test_instances) < 20
instances_succeeded = 0
instances_failed = 0
instances_running = 0
for instance in test_instances:
# A test instance is idenfied by the test name and the test run.
test_case, test_instance, _ = instance.split(' ')
spec_file = os.path.join(base_dir, test_case, 'spec.txt')
if not os.path.isfile(spec_file):
raise RuntimeError('{} is missing'.format(spec_file))
num_additional, spec_html = ParseSpecFile(spec_file)
metrics_html = '' # will be filled in later on, if metrics exist
report_dir = os.path.join(base_dir, test_case, test_instance + '_report')
metrics_file = os.path.join(report_dir, 'metrics.csv')
log_file = os.path.join(report_dir, 'log.txt')
plot_file = os.path.join(report_dir, 'dist.png')
cell1_html = FormatCell1(test_case, test_instance, metrics_file, log_file,
plot_file, include_plots)
# ParseMetrics outputs an HTML table row and also updates lists
metrics_dict, metrics_html = ParseMetrics(metrics_file, log_file,
num_additional)
# Update the metrics structure. Initialize dictionaries if necessary.
for m in metrics:
if m in metrics_dict:
if not test_case in metrics[m]:
metrics[m][test_case] = metrics_dict[m]
else:
metrics[m][test_case] += metrics_dict[m]
print >>output_file, '<tr>{}{}{}</tr>'.format(cell1_html,
spec_html, metrics_html)
# Update counters
if 'tv' in metrics_dict:
instances_succeeded += 1
else:
if 'time' in metrics_dict:
instances_failed += 1
else:
if os.path.isfile(log_file):
instances_running += 1
plots(metrics, base_dir)
print >>output_file, FormatSummaryRow(metrics)
print >>output_file, '</tbody>'
print >>output_file, '</table>'
print >>output_file, '<p style="padding-bottom: 3em"></p>' # vertical space
# Plot links.
if include_plots:
print >>output_file, FormatPlots(base_dir, test_instances)
else:
print >>output_file, ('<p>Too many tests to include plots. '
'Click links within rows for details.</p>')
print >>output_file, PLOTS
print ('Instances'
' succeeded: {} failed: {} running: {} total: {}'.
format(instances_succeeded, instances_failed, instances_running,
len(test_instances)))
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError, e:
print >>sys.stderr, 'FATAL: %s' % e
sys.exit(1)
| 4,185 | 0 | 69 |
32b7c5e71fa96a35bb3b6423674b5759c4640848 | 85 | py | Python | tccli/services/cii/__init__.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | 47 | 2018-05-31T11:26:25.000Z | 2022-03-08T02:12:45.000Z | tccli/services/cii/__init__.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | 23 | 2018-06-14T10:46:30.000Z | 2022-02-28T02:53:09.000Z | tccli/services/cii/__init__.py | zqfan/tencentcloud-cli | b6ad9fced2a2b340087e4e5522121d405f68b615 | [
"Apache-2.0"
] | 22 | 2018-10-22T09:49:45.000Z | 2022-03-30T08:06:04.000Z | # -*- coding: utf-8 -*-
from tccli.services.cii.cii_client import action_caller
| 21.25 | 55 | 0.694118 | # -*- coding: utf-8 -*-
from tccli.services.cii.cii_client import action_caller
| 0 | 0 | 0 |
7ae46e436c9e6cbe209120cb3546acda13d2d78f | 1,956 | py | Python | tests/Grpc_test/Fields_test/FixedField_test.py | shuttl-io/simple-grpc | 03649b25742591d17cdf6982045fc81c61f8adfb | [
"MIT"
] | null | null | null | tests/Grpc_test/Fields_test/FixedField_test.py | shuttl-io/simple-grpc | 03649b25742591d17cdf6982045fc81c61f8adfb | [
"MIT"
] | null | null | null | tests/Grpc_test/Fields_test/FixedField_test.py | shuttl-io/simple-grpc | 03649b25742591d17cdf6982045fc81c61f8adfb | [
"MIT"
] | null | null | null | from simple.gRPC.Fields import Fixed
import unittest | 29.19403 | 88 | 0.53681 | from simple.gRPC.Fields import Fixed
import unittest
class TestFixedField(unittest.TestCase):
def test_unmarshalls(self):
##Parses 32 bit correctly
b = b'\x02\x00\x00\x00'
f = Fixed(2)
f.unmarshall(b)
self.assertEquals(1, f.data)
b = b'\x01\x00\x00\x00'
f.unmarshall(b)
self.assertEquals(-1, f.data)
b = bytes([0x46, 0xaa, 0x25, 0x00])
f.unmarshall(b)
self.assertEquals(1234211, f.data)
##Parses 64 bit correctly
b = b'\x02\x00\x00\x00\x00\x00\x00\x00'
f = Fixed(2, size=64)
f.unmarshall(b)
self.assertEquals(1, f.data)
b = b'\x01\x00\x00\x00\x00\x00\x00\x00'
f.unmarshall(b)
self.assertEquals(-1, f.data)
b = bytes([0x46, 0xaa, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00])
f.unmarshall(b)
self.assertEquals(1234211, f.data)
def test_marshal(self):
f = Fixed(2)
f.data = 1
p = f.serialize()
self.assertEqual(4, len(p[1:]))
self.assertEqual(b'\x02\x00\x00\x00', p[1:])
f.data = -1
p = f.serialize()
self.assertEqual(4, len(p[1:]))
self.assertEqual(b'\x01\x00\x00\x00', p[1:])
f.data = 1234211
p = f.serialize()
self.assertEqual(4, len(p[1:]))
self.assertEqual(bytes([0x46, 0xaa, 0x25, 0x00]), p[1:])
## Serializes 64 bits
f = Fixed(2, size=64)
f.data = 1
p = f.serialize()
self.assertEqual(8, len(p[1:]))
self.assertEqual(b'\x02\x00\x00\x00\x00\x00\x00\x00', p[1:])
f.data = -1
p = f.serialize()
self.assertEqual(8, len(p[1:]))
self.assertEqual(b'\x01\x00\x00\x00\x00\x00\x00\x00', p[1:])
f.data = 1234211
p = f.serialize()
self.assertEqual(8, len(p[1:]))
self.assertEqual(bytes([0x46, 0xaa, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00]), p[1:]) | 1,809 | 19 | 76 |
c67646639ad59e2560a1f3107c3b7d0f9073beb3 | 1,795 | py | Python | deepscratch/models/layers/activations/relu.py | mari-linhares/deep-python-scratch | b447ed20c981db5ffef810b6f80d1638cf7d2ccd | [
"MIT"
] | 9 | 2018-09-18T00:29:10.000Z | 2021-02-20T17:58:30.000Z | deepscratch/models/layers/activations/relu.py | Jagannathrk2020/deeplearning-from-scratch | b447ed20c981db5ffef810b6f80d1638cf7d2ccd | [
"MIT"
] | null | null | null | deepscratch/models/layers/activations/relu.py | Jagannathrk2020/deeplearning-from-scratch | b447ed20c981db5ffef810b6f80d1638cf7d2ccd | [
"MIT"
] | 3 | 2018-09-18T15:47:59.000Z | 2020-08-09T03:10:34.000Z | import numpy as np
from deepscratch.models.layers.activations.activation import Activation
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://arxiv.org/abs/1706.02515,
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
| 35.196078 | 120 | 0.68468 | import numpy as np
from deepscratch.models.layers.activations.activation import Activation
class Relu(Activation):
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
def __call__(self, data):
return np.where(data >= 0, data, 0)
def grads(self, data):
return np.where(data >= 0, 1, 0)
class LeakyRelu():
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
def __init__(self, alpha=0.2):
self.alpha = alpha
def __call__(self, data):
return np.where(data >= 0, data, self.alpha * data)
def grads(self, data):
return np.where(data >= 0, 1, self.alpha)
class Elu():
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
def __init__(self, alpha=0.2):
self.alpha = alpha
def __call__(self, data):
return np.where(data >= 0.0, data, self.alpha * (np.exp(data) - 1))
def grads(self, data):
return np.where(data >= 0.0, 1, self.__call__(data) + self.alpha)
class Selu():
# https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
# https://arxiv.org/abs/1706.02515,
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
def __init__(self):
self.alpha = 1.6732632423543772848170429916717
self.scale = 1.0507009873554804934193349852946
def __call__(self, data):
return self.scale * np.where(data >= 0.0, data, self.alpha*(np.exp(data)-1))
def grads(self, data):
return self.scale * np.where(data >= 0.0, 1, self.alpha * np.exp(data))
| 710 | -18 | 391 |
99539ada5453c5e92cb00460c8d56d4b6550efe1 | 4,552 | py | Python | course_list.py | IIIT-Delhi/HCD-IIITD | eb940560736f5c55bac2dea0cc00b0227e7cf2c4 | [
"MIT"
] | null | null | null | course_list.py | IIIT-Delhi/HCD-IIITD | eb940560736f5c55bac2dea0cc00b0227e7cf2c4 | [
"MIT"
] | null | null | null | course_list.py | IIIT-Delhi/HCD-IIITD | eb940560736f5c55bac2dea0cc00b0227e7cf2c4 | [
"MIT"
] | null | null | null | courseJson=[
{
'Course_Name': 'Introduction to Engineering Design',
'Code': 'DES130',
'Course_Text': 'Introduction to Engineering Design is a core, multidisciplinary course offered with an aim to ignite the young minds with concepts in design and innovation. Using the tools and skills learnt in the lab, the students participate in a project challenge to build functional prototypes in the field of intelligent buildings, automotive, and robotics which will provide solutions to real life problems.'
},
{
'Course_Name': 'Design Drawing and Visualization',
'Code': 'DES101',
'Course_Text': 'This course fosters understanding of drawing and sketching as a means to develop observational skills through the study of the environment and as a tool for visual representation, ideation/conceptualization, visualization and communication or presentation of design ideas through sketching and drawing from both observation and memory.'
},
{
'Course_Name': 'Visual Design & Communication',
'Code': 'DES202',
'Course_Text': 'For a designer to communicate more concisely and in a visually appropriate manner, it is necessary to use commonly understood principles, perspective and design layout standards. Together, these conventions constitute a visual language, and help to ensure that the drawing is clear and relatively easy to understand.'
},
{
'Course_Name': 'Design Processes and Perspectives',
'Code': 'DES201',
'Course_Text': 'Broadly, the course gives students the opportunity to develop essential design thinking skills such as exploring the designed space to identify problem, applying the design thinking process to problems, visualizing design solutions, refining final designs and communicating ideas in visually appropriate form through assignments and projects.'
},
{
'Course_Name': 'Animation & Graphics',
'Code': 'DES302',
'Course_Text': 'This course will take you right through the fundamentals of Graphic Design from photorealism up to the point where fantasy and imagination begins. You will understand usage of the colour wheel and its role in creating Digital Art.'
},
{
'Course_Name': 'Film Making and Radio Podcasting',
'Code': 'DES303',
'Course_Text': 'This course will not only give you the basic technical skills but will also hand hold you into making a aesthetically correct decisions in assembling a film.'
},
{
'Course_Name': 'Wearable Applications, Research, Devices, Interactions (WARDI)',
'Code': 'DES513',
'Course_Text': 'This is a course about the current paradigm of Wearable Computing. In this course, we will cover the origins, pioneering contributions, and principles of Wearable Computing. With this foundation, we will initiate our exploration into the space by learning how to design physical (device form factor), digital (applications) as well as human (interaction techniques) aspects of Wearables.'
},
{
'Course_Name': 'Digital Audio - (Procedural Game Audio, Algorithmic Composition & Sound Synthesis)',
'Code': 'DES514',
'Course_Text': 'This hands-on project-based course will introduce students to the world of digital audio. Topics include real-time sound synthesis, machine listening, procedural game audio, algorithmic composition, digital instrument design and sound design. '
},
{
'Course_Name': 'Information systems in Public Health',
'Code': 'DES5XX',
'Course_Text': 'This course will give an understanding of public health information systems. It will include key concepts of public health, sources of public health information, ethics in public health practice and research, and an understanding of various public health information systems in use.'
},
{
'Course_Name': 'Game Development & Design',
'Code': 'DES512',
'Course_Text': 'This hands-on project-based course will introduce students to the fundamentals of game development & design using the Unreal 4 game engine. Topics include level design, lighting, materials, particle effects, game AI, game logic, user input mappings, audio, physics and motion.'
},
{
'Course_Name': ' Introduction to 3D Animation',
'Code': 'DES5XX',
'Course_Text': 'This course introduces students to: (i) Basics and fundamental principles of animation (ii) Workflow of animation (iii) Introduction to 3D Animation'
}
] | 79.859649 | 422 | 0.728032 | courseJson=[
{
'Course_Name': 'Introduction to Engineering Design',
'Code': 'DES130',
'Course_Text': 'Introduction to Engineering Design is a core, multidisciplinary course offered with an aim to ignite the young minds with concepts in design and innovation. Using the tools and skills learnt in the lab, the students participate in a project challenge to build functional prototypes in the field of intelligent buildings, automotive, and robotics which will provide solutions to real life problems.'
},
{
'Course_Name': 'Design Drawing and Visualization',
'Code': 'DES101',
'Course_Text': 'This course fosters understanding of drawing and sketching as a means to develop observational skills through the study of the environment and as a tool for visual representation, ideation/conceptualization, visualization and communication or presentation of design ideas through sketching and drawing from both observation and memory.'
},
{
'Course_Name': 'Visual Design & Communication',
'Code': 'DES202',
'Course_Text': 'For a designer to communicate more concisely and in a visually appropriate manner, it is necessary to use commonly understood principles, perspective and design layout standards. Together, these conventions constitute a visual language, and help to ensure that the drawing is clear and relatively easy to understand.'
},
{
'Course_Name': 'Design Processes and Perspectives',
'Code': 'DES201',
'Course_Text': 'Broadly, the course gives students the opportunity to develop essential design thinking skills such as exploring the designed space to identify problem, applying the design thinking process to problems, visualizing design solutions, refining final designs and communicating ideas in visually appropriate form through assignments and projects.'
},
{
'Course_Name': 'Animation & Graphics',
'Code': 'DES302',
'Course_Text': 'This course will take you right through the fundamentals of Graphic Design from photorealism up to the point where fantasy and imagination begins. You will understand usage of the colour wheel and its role in creating Digital Art.'
},
{
'Course_Name': 'Film Making and Radio Podcasting',
'Code': 'DES303',
'Course_Text': 'This course will not only give you the basic technical skills but will also hand hold you into making a aesthetically correct decisions in assembling a film.'
},
{
'Course_Name': 'Wearable Applications, Research, Devices, Interactions (WARDI)',
'Code': 'DES513',
'Course_Text': 'This is a course about the current paradigm of Wearable Computing. In this course, we will cover the origins, pioneering contributions, and principles of Wearable Computing. With this foundation, we will initiate our exploration into the space by learning how to design physical (device form factor), digital (applications) as well as human (interaction techniques) aspects of Wearables.'
},
{
'Course_Name': 'Digital Audio - (Procedural Game Audio, Algorithmic Composition & Sound Synthesis)',
'Code': 'DES514',
'Course_Text': 'This hands-on project-based course will introduce students to the world of digital audio. Topics include real-time sound synthesis, machine listening, procedural game audio, algorithmic composition, digital instrument design and sound design. '
},
{
'Course_Name': 'Information systems in Public Health',
'Code': 'DES5XX',
'Course_Text': 'This course will give an understanding of public health information systems. It will include key concepts of public health, sources of public health information, ethics in public health practice and research, and an understanding of various public health information systems in use.'
},
{
'Course_Name': 'Game Development & Design',
'Code': 'DES512',
'Course_Text': 'This hands-on project-based course will introduce students to the fundamentals of game development & design using the Unreal 4 game engine. Topics include level design, lighting, materials, particle effects, game AI, game logic, user input mappings, audio, physics and motion.'
},
{
'Course_Name': ' Introduction to 3D Animation',
'Code': 'DES5XX',
'Course_Text': 'This course introduces students to: (i) Basics and fundamental principles of animation (ii) Workflow of animation (iii) Introduction to 3D Animation'
}
] | 0 | 0 | 0 |
e8172569ea6db90a64e2532de8ddbf54122415c8 | 495 | py | Python | 影像處理/透明背景轉白.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | 影像處理/透明背景轉白.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | 影像處理/透明背景轉白.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
for i in range(10):
image = Image.open('output{:08d}.png'.format(i))
newImage = []
for item in image.getdata():
if item[:4] == (0, 0, 0 , 0): #將透明區(0,0,0,0)轉成(255,255,255)
newImage.append((255, 255, 255))
else:
newImage.append(item)
image.putdata(newImage)
image = image.convert('RGB')#RGBA轉RGB
image.save('output{:08d}_removebg.png'.format(i))
| 33 | 85 | 0.561616 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
for i in range(10):
image = Image.open('output{:08d}.png'.format(i))
newImage = []
for item in image.getdata():
if item[:4] == (0, 0, 0 , 0): #將透明區(0,0,0,0)轉成(255,255,255)
newImage.append((255, 255, 255))
else:
newImage.append(item)
image.putdata(newImage)
image = image.convert('RGB')#RGBA轉RGB
image.save('output{:08d}_removebg.png'.format(i))
| 0 | 0 | 0 |
f1f558d8438912cf0ce7a1910e128cfbfe0504c9 | 2,111 | py | Python | scripts/check_ugc_backups.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 61 | 2015-11-10T17:13:46.000Z | 2021-08-06T17:58:30.000Z | scripts/check_ugc_backups.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 13 | 2015-11-11T07:49:41.000Z | 2021-06-09T03:45:31.000Z | scripts/check_ugc_backups.py | bopopescu/drawquest-web | 8d8f9149b6efeb65202809a5f8916386f58a1b3b | [
"BSD-3-Clause"
] | 18 | 2015-11-11T04:50:04.000Z | 2021-08-20T00:57:11.000Z | from datetime import datetime, date, timedelta
import os
import sys; sys.path += ['/var/canvas/common', '../../common']
import yaml
from boto.s3.connection import S3Connection
from configuration import aws
# results format
# {
# 'start_time': start_time,
# 'end_time': end_time,
# 'time': (end_time - start_time),
# 'stored': stored,
# 'skipped': skipped,
# 'failed': failed,
# 'size': backup_size_str,
# }
results_bucket_name = 'canvas-ugc-backup-logging'
key_format_str = 'ugc-backup-results-{0}'
if __name__ == '__main__':
check_backups()
| 27.415584 | 93 | 0.641402 | from datetime import datetime, date, timedelta
import os
import sys; sys.path += ['/var/canvas/common', '../../common']
import yaml
from boto.s3.connection import S3Connection
from configuration import aws
# results format
# {
# 'start_time': start_time,
# 'end_time': end_time,
# 'time': (end_time - start_time),
# 'stored': stored,
# 'skipped': skipped,
# 'failed': failed,
# 'size': backup_size_str,
# }
results_bucket_name = 'canvas-ugc-backup-logging'
key_format_str = 'ugc-backup-results-{0}'
def datestr(_date=date.today()):
return "{0}{1}{2}".format(_date.year, str(_date.month).zfill(2), str(_date.day).zfill(2))
def get_last_key():
today = date.today()
yesterday = today - timedelta(days=1)
return key_format_str.format(datestr(yesterday))
def max_key(bucket):
max_key = ''
for key in bucket:
if key.name > max_key:
max_key = key.name
return max_key
def are_results_recent(results):
today = datetime.utcnow()
start = results['start_time']
yesterday = today - timedelta(days=2)
if start < yesterday:
return False
return True
def print_results(results):
start = results['start_time']
end = results['end_time']
print "Start: {0}".format(start)
print "Finished: {0}".format(end)
print "Time: {0}".format(str(end - start))
print "Stored: {0}".format(results['stored'])
print "Skipped: {0}".format(results['skipped'])
print "Failed: {0}".format(results['failed'])
print "Size: {0}".format(results['size'])
try:
print "Failed list:\n{0}".format(results['failed_list'])
except KeyError:
pass
def check_backups():
conn = S3Connection(*aws)
bucket = conn.get_bucket(results_bucket_name)
key = max_key(bucket)
results_str = bucket.get_key(key).get_contents_as_string()
results = yaml.load(results_str)
print_results(results)
if are_results_recent(results) and results['failed'] == 0:
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
check_backups()
| 1,366 | 0 | 138 |
ed16cd47815e11fe805c30346bd8dead959b9fdc | 1,467 | py | Python | banana.py | pickfire/banana | b9f307bf8bcadc41f3f7212a25c87a6c274fc7ab | [
"0BSD"
] | null | null | null | banana.py | pickfire/banana | b9f307bf8bcadc41f3f7212a25c87a6c274fc7ab | [
"0BSD"
] | null | null | null | banana.py | pickfire/banana | b9f307bf8bcadc41f3f7212a25c87a6c274fc7ab | [
"0BSD"
] | null | null | null | #!/usr/bin/env python3
import sys
from threading import Thread, Event
print("INIT 1 0")
run = 1
pool = []
while run or pool:
command, id, *name = sys.stdin.readline().split()
if command == "RESOLVE":
Resolve(id, *name)
#print("print", pool)
#pool[-1].start()
elif command == "CANCEL":
pass
| 27.679245 | 76 | 0.546694 | #!/usr/bin/env python3
import sys
from threading import Thread, Event
class TimeoutError(Exception):
pass
class Resolve(Thread):
def __init__(self, id, name):
self.id = id
self.finished = Event()
self.t = Thread.__init__(self, None, self.resolve, name, (id, name))
self.start()
Thread(None, self.timeout).start()
def resolve(self, id, name):
f = open("hosts")
while not self.finished.is_set():
for l in f:
if l == '\n' or l[0] == '#':
continue
if name in l.split()[1:]:
self._stop(f'RESOLVED {self.id} 0 {l.split()[0]}')
self._stop(f'RESOLVED {self.id} 3 "{self.name} not available"')
f.close()
#except Exception:
# self._stop(f'RESOLVED {self.id} 1 "fail to resolve"')
def timeout(self, interval=1):
self.finished.wait(interval)
self._stop(f'RESOLVED {self.id} 4 "timeout exceeded"')
def cancel(self):
self._stop(f'CANCELED {self.id}')
def _stop(self, message):
if not self.finished.is_set():
self.finished.set()
return print(message)
print("INIT 1 0")
run = 1
pool = []
while run or pool:
command, id, *name = sys.stdin.readline().split()
if command == "RESOLVE":
Resolve(id, *name)
#print("print", pool)
#pool[-1].start()
elif command == "CANCEL":
pass
| 932 | 19 | 180 |
be74d10a74b162ac915da849a5178a723256e461 | 3,837 | py | Python | tests/brownie_utils.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 10 | 2021-06-12T08:43:50.000Z | 2022-02-17T14:24:48.000Z | tests/brownie_utils.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 44 | 2021-04-11T06:43:10.000Z | 2022-03-30T12:42:32.000Z | tests/brownie_utils.py | BoostryJP/ibet-SmartContract | dc3f73a708ef145e7200ce58fce4e8171e21d3c2 | [
"Apache-2.0"
] | 1 | 2022-03-09T07:27:57.000Z | 2022-03-09T07:27:57.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
from brownie import web3
from brownie.network.contract import Contract, ContractTx, ContractCall
_BROWNIE_RESERVED_NAMES = [
'abi', 'at', 'bytecode', 'deploy', 'get_method', 'info', 'remove', 'selectors', 'signatures', 'topics', 'tx'
]
"""
Brownieコントラクトに定義済みのプロパティ名。
同名の公開関数がスマートコントラクトに存在するとBrownieでのデプロイ時にエラーになる。
"""
def force_deploy(deployer, contract, *deploy_args):
"""
Brownieだとエラーが発生するコントラクトを強制的にデプロイする。
Brownieでは brownie.network.contract.Contract に定義済みプロパティと
同名の公開関数を持つコントラクトはエラーとなりデプロイできない。
この関数はBrownieを利用せずweb3で直接デプロイすることでエラーを回避する。
なお、この関数により生成したContractオブジェクトではBrownieが提供する一部のDebug機能は使用できない。
使用例
>>> returned_contract = force_deploy(deployer, contract, *deploy_args)
>>> # 普通の関数はそのまま使用できる。
>>> returned_contract.nameOfFunction.transact({'from': deployer})
>>> # エラーの原因となる関数は `.functions` 経由でアクセスする。
>>> returned_contract.functions.signatures()
>>> returned_contract.functions.remove.transact({'from': deployer})
:param deployer: コントラクトをデプロイするアカウント
:param contract: Brownieのコントラクトオブジェクト
:param deploy_args: コントラクトのコンストラクタ引数
:return: Brownieのコントラクトインスタンス
"""
# 引数の型変換 (Note: web3.pyとBrownieでは型変換規則が異なる)
constructor_abi = list(filter(lambda entry: entry['type'] == 'constructor', contract.abi))
if len(constructor_abi) == 1:
deploy_args = brownie.convert.normalize.format_input(constructor_abi[0], deploy_args)
# web3を用いてデプロイする
web3_contract = web3.eth.contract(abi=contract.abi, bytecode=contract.bytecode)
txn_hash = web3_contract.constructor(*deploy_args).transact({'from': deployer.address})
receipt = web3.eth.waitForTransactionReceipt(txn_hash)
contract_address = receipt['contractAddress']
# Brownieでエラーを発生させるメソッドを取り除いたABIを作成する
# このABIを用いることでBrownieのContractオブジェクトが作成できるようになる
brownie_safe_abi = []
excluded_function_abi = []
for abi_entry in contract.abi:
if abi_entry['type'] == 'function' and abi_entry['name'] in _BROWNIE_RESERVED_NAMES:
excluded_function_abi.append(abi_entry)
else:
brownie_safe_abi.append(abi_entry)
contract_name = _resolve_contract_name(contract) + '__brownie_utils'
brownie_contract = Contract.from_abi(contract_name, contract_address, brownie_safe_abi)
# ABIから削除したメソッドを復元する
# (オーバロードには未対応)
brownie_contract.functions = _BrownieUnsafeFunctionContainer()
for abi_entry in excluded_function_abi:
name = abi_entry['name']
if _is_constant(abi_entry):
recovered_function = ContractCall(contract_address, abi_entry, name, None)
else:
recovered_function = ContractTx(contract_address, abi_entry, name, None)
setattr(brownie_contract.functions, name, recovered_function)
return brownie_contract
class _BrownieUnsafeFunctionContainer:
"""Brownieでエラーとなるスマートコントラクトの関数を保持するクラス"""
pass
| 34.258929 | 112 | 0.738598 | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
from brownie import web3
from brownie.network.contract import Contract, ContractTx, ContractCall
_BROWNIE_RESERVED_NAMES = [
'abi', 'at', 'bytecode', 'deploy', 'get_method', 'info', 'remove', 'selectors', 'signatures', 'topics', 'tx'
]
"""
Brownieコントラクトに定義済みのプロパティ名。
同名の公開関数がスマートコントラクトに存在するとBrownieでのデプロイ時にエラーになる。
"""
def force_deploy(deployer, contract, *deploy_args):
"""
Brownieだとエラーが発生するコントラクトを強制的にデプロイする。
Brownieでは brownie.network.contract.Contract に定義済みプロパティと
同名の公開関数を持つコントラクトはエラーとなりデプロイできない。
この関数はBrownieを利用せずweb3で直接デプロイすることでエラーを回避する。
なお、この関数により生成したContractオブジェクトではBrownieが提供する一部のDebug機能は使用できない。
使用例
>>> returned_contract = force_deploy(deployer, contract, *deploy_args)
>>> # 普通の関数はそのまま使用できる。
>>> returned_contract.nameOfFunction.transact({'from': deployer})
>>> # エラーの原因となる関数は `.functions` 経由でアクセスする。
>>> returned_contract.functions.signatures()
>>> returned_contract.functions.remove.transact({'from': deployer})
:param deployer: コントラクトをデプロイするアカウント
:param contract: Brownieのコントラクトオブジェクト
:param deploy_args: コントラクトのコンストラクタ引数
:return: Brownieのコントラクトインスタンス
"""
# 引数の型変換 (Note: web3.pyとBrownieでは型変換規則が異なる)
constructor_abi = list(filter(lambda entry: entry['type'] == 'constructor', contract.abi))
if len(constructor_abi) == 1:
deploy_args = brownie.convert.normalize.format_input(constructor_abi[0], deploy_args)
# web3を用いてデプロイする
web3_contract = web3.eth.contract(abi=contract.abi, bytecode=contract.bytecode)
txn_hash = web3_contract.constructor(*deploy_args).transact({'from': deployer.address})
receipt = web3.eth.waitForTransactionReceipt(txn_hash)
contract_address = receipt['contractAddress']
# Brownieでエラーを発生させるメソッドを取り除いたABIを作成する
# このABIを用いることでBrownieのContractオブジェクトが作成できるようになる
brownie_safe_abi = []
excluded_function_abi = []
for abi_entry in contract.abi:
if abi_entry['type'] == 'function' and abi_entry['name'] in _BROWNIE_RESERVED_NAMES:
excluded_function_abi.append(abi_entry)
else:
brownie_safe_abi.append(abi_entry)
contract_name = _resolve_contract_name(contract) + '__brownie_utils'
brownie_contract = Contract.from_abi(contract_name, contract_address, brownie_safe_abi)
# ABIから削除したメソッドを復元する
# (オーバロードには未対応)
brownie_contract.functions = _BrownieUnsafeFunctionContainer()
for abi_entry in excluded_function_abi:
name = abi_entry['name']
if _is_constant(abi_entry):
recovered_function = ContractCall(contract_address, abi_entry, name, None)
else:
recovered_function = ContractTx(contract_address, abi_entry, name, None)
setattr(brownie_contract.functions, name, recovered_function)
return brownie_contract
class _BrownieUnsafeFunctionContainer:
"""Brownieでエラーとなるスマートコントラクトの関数を保持するクラス"""
pass
def _resolve_contract_name(contract):
# コントラクト名は非公開となっているため、存在確認してから取得する
if hasattr(contract, '_name'):
return str(contract._name)
else:
return 'None'
def _is_constant(abi):
if "constant" in abi:
return abi["constant"]
else:
return abi["stateMutability"] in ("view", "pure")
| 347 | 0 | 46 |
b8be197bccecab3b3368a9cbcf43431337f4796d | 1,984 | py | Python | Maths/perfects.py | Axelancerr/CollegeWork | c0ee6e5a2bcb50a57b38621cbd3602e88d34f570 | [
"MIT"
] | 2 | 2021-11-04T22:21:05.000Z | 2022-03-28T11:28:56.000Z | Maths/perfects.py | Axelancerr/CollegeWork | c0ee6e5a2bcb50a57b38621cbd3602e88d34f570 | [
"MIT"
] | null | null | null | Maths/perfects.py | Axelancerr/CollegeWork | c0ee6e5a2bcb50a57b38621cbd3602e88d34f570 | [
"MIT"
] | null | null | null | # Define a function that will check if the given
# number is perfect and return a boolean value.
# Ask they user how many perfect numbers they want.
count = input("How many perfect numbers do you want? (Recommend less than 4, as any higher can take a loooong time): ")
try:
# Try to convert the user input to an integer (from string).
count = int(count)
except ValueError:
# If the user did not input a valid integer value, tell them.
print("That was not a valid number.")
else:
# Declare a list to store the perfect numbers in.
perfects = []
# Define a stating number.
current_number = 1
# Start a loop that will run as long as the number of found
# perfect numbers is less than the amount the user asked for.
while len(perfects) < count:
# If the current number is perfect, append it
# to the list of perfect numbers.
if is_perfect(current_number):
perfects.append(current_number)
# Increment the current number by one.
current_number += 1
# Print the final list of perfect numbers.
print(perfects)
| 34.206897 | 120 | 0.674899 | # Define a function that will check if the given
# number is perfect and return a boolean value.
def is_perfect(number: int) -> bool:
# Define a list to store the divisors of the given number in.
divisors = []
# Loop over all the numbers (potential divisors) leading up
# to the given number.
for potential_divisor in range(1, number - 1):
# If the given number divided by the current potential_divisor
# does not produce a remainder, it is a divisor and can be added
# to the list of divisors.
if number % potential_divisor == 0:
divisors.append(potential_divisor)
# By now, we should have all the divisors/factors of the given number, so
# we can check if the sum of all of them equals the given number. If so the
# number is considered perfect and we can return True, else return False as
# it is not perfect.
if sum(divisors) == number:
return True
return False
# Ask they user how many perfect numbers they want.
count = input("How many perfect numbers do you want? (Recommend less than 4, as any higher can take a loooong time): ")
try:
# Try to convert the user input to an integer (from string).
count = int(count)
except ValueError:
# If the user did not input a valid integer value, tell them.
print("That was not a valid number.")
else:
# Declare a list to store the perfect numbers in.
perfects = []
# Define a stating number.
current_number = 1
# Start a loop that will run as long as the number of found
# perfect numbers is less than the amount the user asked for.
while len(perfects) < count:
# If the current number is perfect, append it
# to the list of perfect numbers.
if is_perfect(current_number):
perfects.append(current_number)
# Increment the current number by one.
current_number += 1
# Print the final list of perfect numbers.
print(perfects)
| 848 | 0 | 22 |
77381d9afb40065890dbb1483c4ee0ec2804adba | 2,033 | py | Python | raster_py/raster.py | gregorulm/s-raster-proof-of-concept | a3af41d3d1d6af2445849950a7c89515f75a10d8 | [
"MIT"
] | 1 | 2021-01-11T11:17:10.000Z | 2021-01-11T11:17:10.000Z | raster_py/raster.py | gregorulm/s-raster-proof-of-concept | a3af41d3d1d6af2445849950a7c89515f75a10d8 | [
"MIT"
] | null | null | null | raster_py/raster.py | gregorulm/s-raster-proof-of-concept | a3af41d3d1d6af2445849950a7c89515f75a10d8 | [
"MIT"
] | 1 | 2020-07-05T21:09:12.000Z | 2020-07-05T21:09:12.000Z | """
Contraction Clustering (RASTER):
Reference Implementation in Python with an Example
(c) 2016 - 2020 Fraunhofer-Chalmers Centre for Industrial Mathematics
Algorithm development and implementation:
Gregor Ulm (gregor.ulm@fcc.chalmers.se)
Requirements:
. Python 3
For a description of the algorithm including relevant theory, please
consult our paper on Contraction Clustering (RASTER).
To run this script, type
> python3 raster.py
"""
import os
import clustering as c
if __name__ == "__main__":
# load input data
with open("input/sample.csv", "r") as f:
content = f.readlines()
all_points = []
for line in content:
line = line.strip()
(x, y) = line.split(",")
x = float(x)
y = float(y)
all_points.append((x, y))
"""
RASTER clusters:
RASTER projects points to tiles and disregards the former after the
projection has been performed. Thus, it requires merely constant
space, assuming bounded integers or a bounded coordinate system like
the GPS coordinate system for our planet.
Input is projected to points that represent tiles.
"""
precision = 1
tau = 5 # threshold
min_size = 5
clusters, scalar = raster(all_points, precision, tau, min_size)
print("Number of clusters: ", len(clusters))
output = []
count = 1
for cluster in clusters:
for (x, y) in cluster:
x = x / scalar
y = y / scalar
output.append((count, x, y))
count += 1
f = open("output/clustered.csv", "w")
f.write("Cluster Number, X-Position, Y-Position\n")
for (num, x, y) in output:
f.write(str(num) + ", " + str(x) + ", " + str(y) + "\n")
f.close()
| 24.202381 | 72 | 0.636498 | """
Contraction Clustering (RASTER):
Reference Implementation in Python with an Example
(c) 2016 - 2020 Fraunhofer-Chalmers Centre for Industrial Mathematics
Algorithm development and implementation:
Gregor Ulm (gregor.ulm@fcc.chalmers.se)
Requirements:
. Python 3
For a description of the algorithm including relevant theory, please
consult our paper on Contraction Clustering (RASTER).
To run this script, type
> python3 raster.py
"""
import os
import clustering as c
def raster(all_points, precision, threshold, min_size):
## Step 1: Projection
(tiles, scalar) = c.map_to_tiles(all_points, precision, threshold)
## Step 2: Agglomeration
clusters = c.raster_clustering_tiles(tiles, min_size)
return (clusters, scalar)
if __name__ == "__main__":
# load input data
with open("input/sample.csv", "r") as f:
content = f.readlines()
all_points = []
for line in content:
line = line.strip()
(x, y) = line.split(",")
x = float(x)
y = float(y)
all_points.append((x, y))
"""
RASTER clusters:
RASTER projects points to tiles and disregards the former after the
projection has been performed. Thus, it requires merely constant
space, assuming bounded integers or a bounded coordinate system like
the GPS coordinate system for our planet.
Input is projected to points that represent tiles.
"""
precision = 1
tau = 5 # threshold
min_size = 5
clusters, scalar = raster(all_points, precision, tau, min_size)
print("Number of clusters: ", len(clusters))
output = []
count = 1
for cluster in clusters:
for (x, y) in cluster:
x = x / scalar
y = y / scalar
output.append((count, x, y))
count += 1
f = open("output/clustered.csv", "w")
f.write("Cluster Number, X-Position, Y-Position\n")
for (num, x, y) in output:
f.write(str(num) + ", " + str(x) + ", " + str(y) + "\n")
f.close()
| 251 | 0 | 23 |
0c5c22317589007c9b164054cbc49c53b81744af | 497 | py | Python | src/covid19model/visualization/__init__.py | jalarcon-ale/COVID19-Model | 3efaecf5ffc5d7fb6c47321a82aab0d8f3633765 | [
"MIT"
] | 22 | 2020-04-22T16:42:53.000Z | 2021-05-06T08:44:02.000Z | src/covid19model/visualization/__init__.py | jalarcon-ale/COVID19-Model | 3efaecf5ffc5d7fb6c47321a82aab0d8f3633765 | [
"MIT"
] | 90 | 2020-04-17T19:25:52.000Z | 2022-03-25T12:34:39.000Z | src/covid19model/visualization/__init__.py | jalarcon-ale/COVID19-Model | 3efaecf5ffc5d7fb6c47321a82aab0d8f3633765 | [
"MIT"
] | 26 | 2020-04-06T06:09:04.000Z | 2020-11-21T22:40:40.000Z | from .output import population_status,infected
from .utils import colorscale_okabe_ito
import matplotlib.pyplot as plt
__all__ = ["population_status", "infected"]
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
"font.size": 15,
"lines.linewidth" : 3,
"axes.spines.top": False,
"axes.spines.right": False,
"ytick.major.left": True,
"axes.grid": True
}) | 29.235294 | 71 | 0.65996 | from .output import population_status,infected
from .utils import colorscale_okabe_ito
import matplotlib.pyplot as plt
__all__ = ["population_status", "infected"]
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
"font.size": 15,
"lines.linewidth" : 3,
"axes.spines.top": False,
"axes.spines.right": False,
"ytick.major.left": True,
"axes.grid": True
}) | 0 | 0 | 0 |
073a9b8c171f15923673d3d9e06618b19fa5100c | 35 | py | Python | fortiosapi/__init__.py | barbosm/fortiosapi | 08695e49f7bccf46c990262b283c1b6e2dc489cc | [
"Apache-2.0"
] | null | null | null | fortiosapi/__init__.py | barbosm/fortiosapi | 08695e49f7bccf46c990262b283c1b6e2dc489cc | [
"Apache-2.0"
] | null | null | null | fortiosapi/__init__.py | barbosm/fortiosapi | 08695e49f7bccf46c990262b283c1b6e2dc489cc | [
"Apache-2.0"
] | null | null | null | from .fortiosapi import FortiOSAPI
| 17.5 | 34 | 0.857143 | from .fortiosapi import FortiOSAPI
| 0 | 0 | 0 |
e1b525e4677849c18229ec8a1935f283adc734a2 | 36,489 | py | Python | PyZ3950/oids.py | entu/entu-ester | 9489bab537c9f2186155b7a02ed2b08b68d01c11 | [
"MIT"
] | 21 | 2015-07-08T16:54:06.000Z | 2021-11-30T11:31:03.000Z | PyZ3950/oids.py | entu/entu-ester | 9489bab537c9f2186155b7a02ed2b08b68d01c11 | [
"MIT"
] | 16 | 2015-11-07T12:17:11.000Z | 2022-03-30T03:26:04.000Z | PyZ3950/oids.py | entu/entu-ester | 9489bab537c9f2186155b7a02ed2b08b68d01c11 | [
"MIT"
] | 19 | 2015-03-11T17:17:02.000Z | 2021-11-30T13:20:08.000Z | from PyZ3950 import asn1
oids = {}
oids['Z3950'] = {'oid': asn1.OidVal([1, 2, 840, 10003]), 'val': [1, 2, 840, 10003]}
oids['Z3950']['ATTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3]), 'val': [1, 2, 840, 10003, 3]}
oids['Z3950']['DIAG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4]), 'val': [1, 2, 840, 10003, 4]}
oids['Z3950']['RECSYN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5]), 'val': [1, 2, 840, 10003, 5]}
oids['Z3950']['TRANSFER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 6]), 'val': [1, 2, 840, 10003, 6]}
oids['Z3950']['RRF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7]), 'val': [1, 2, 840, 10003, 7]}
oids['Z3950']['ACCESS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8]), 'val': [1, 2, 840, 10003, 8]}
oids['Z3950']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9]), 'val': [1, 2, 840, 10003, 9]}
oids['Z3950']['USR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10]), 'val': [1, 2, 840, 10003, 10]}
oids['Z3950']['SPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11]), 'val': [1, 2, 840, 10003, 11]}
oids['Z3950']['VAR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12]), 'val': [1, 2, 840, 10003, 12]}
oids['Z3950']['SCHEMA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13]), 'val': [1, 2, 840, 10003, 13]}
oids['Z3950']['TAGSET'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14]), 'val': [1, 2, 840, 10003, 14]}
oids['Z3950']['NEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15]), 'val': [1, 2, 840, 10003, 15]}
oids['Z3950']['QUERY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16]), 'val': [1, 2, 840, 10003, 16]}
oids['Z3950']['ATTRS']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 1]), 'val': [1, 2, 840, 10003, 3, 1]}
oids['Z3950']['ATTRS']['EXP1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 2]), 'val': [1, 2, 840, 10003, 3, 2]}
oids['Z3950']['ATTRS']['EXT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 3]), 'val': [1, 2, 840, 10003, 3, 3]}
oids['Z3950']['ATTRS']['CCL1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 4]), 'val': [1, 2, 840, 10003, 3, 4]}
oids['Z3950']['ATTRS']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 5]), 'val': [1, 2, 840, 10003, 3, 5]}
oids['Z3950']['ATTRS']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 6]), 'val': [1, 2, 840, 10003, 3, 6]}
oids['Z3950']['ATTRS']['COLLECTIONS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 7]), 'val': [1, 2, 840, 10003, 3, 7]}
oids['Z3950']['ATTRS']['CIMI1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 8]), 'val': [1, 2, 840, 10003, 3, 8]}
oids['Z3950']['ATTRS']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 9]), 'val': [1, 2, 840, 10003, 3, 9]}
oids['Z3950']['ATTRS']['ZBIG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 10]), 'val': [1, 2, 840, 10003, 3, 10]}
oids['Z3950']['ATTRS']['UTIL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 11]), 'val': [1, 2, 840, 10003, 3, 11]}
oids['Z3950']['ATTRS']['XD1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 12]), 'val': [1, 2, 840, 10003, 3, 12]}
oids['Z3950']['ATTRS']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 13]), 'val': [1, 2, 840, 10003, 3, 13]}
oids['Z3950']['ATTRS']['FIN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 14]), 'val': [1, 2, 840, 10003, 3, 14]}
oids['Z3950']['ATTRS']['DAN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 15]), 'val': [1, 2, 840, 10003, 3, 15]}
oids['Z3950']['ATTRS']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 16]), 'val': [1, 2, 840, 10003, 3, 16]}
oids['Z3950']['ATTRS']['MARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 17]), 'val': [1, 2, 840, 10003, 3, 17]}
oids['Z3950']['ATTRS']['BIB2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 18]), 'val': [1, 2, 840, 10003, 3, 18]}
oids['Z3950']['ATTRS']['ZEEREX'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 19]), 'val': [1, 2, 840, 10003, 3, 19]}
oids['Z3950']['DIAG']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 1]), 'val': [1, 2, 840, 10003, 4, 1]}
oids['Z3950']['DIAG']['DIAG1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 2]), 'val': [1, 2, 840, 10003, 4, 2]}
oids['Z3950']['DIAG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 3]), 'val': [1, 2, 840, 10003, 4, 3]}
oids['Z3950']['DIAG']['GENERAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 4]), 'val': [1, 2, 840, 10003, 4, 4]}
oids['Z3950']['RECSYN']['UNIMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 1]), 'val': [1, 2, 840, 10003, 5, 1]}
oids['Z3950']['RECSYN']['INTERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 2]), 'val': [1, 2, 840, 10003, 5, 2]}
oids['Z3950']['RECSYN']['CCF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 3]), 'val': [1, 2, 840, 10003, 5, 3]}
oids['Z3950']['RECSYN']['USMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10]), 'val': [1, 2, 840, 10003, 5, 10]}
oids['Z3950']['RECSYN']['USMARC']['BIBLIO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 1]), 'val': [1, 2, 840, 10003, 5, 10, 1]}
oids['Z3950']['RECSYN']['USMARC']['AUTH'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 2]), 'val': [1, 2, 840, 10003, 5, 10, 2]}
oids['Z3950']['RECSYN']['USMARC']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 3]), 'val': [1, 2, 840, 10003, 5, 10, 3]}
oids['Z3950']['RECSYN']['USMARC']['COMMUNITY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 4]), 'val': [1, 2, 840, 10003, 5, 10, 4]}
oids['Z3950']['RECSYN']['USMARC']['CLASS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 5]), 'val': [1, 2, 840, 10003, 5, 10, 5]}
oids['Z3950']['RECSYN']['UKMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 11]), 'val': [1, 2, 840, 10003, 5, 11]}
oids['Z3950']['RECSYN']['NORMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 12]), 'val': [1, 2, 840, 10003, 5, 12]}
oids['Z3950']['RECSYN']['LIBRISMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 13]), 'val': [1, 2, 840, 10003, 5, 13]}
oids['Z3950']['RECSYN']['DANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 14]), 'val': [1, 2, 840, 10003, 5, 14]}
oids['Z3950']['RECSYN']['FINMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 15]), 'val': [1, 2, 840, 10003, 5, 15]}
oids['Z3950']['RECSYN']['MAB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 16]), 'val': [1, 2, 840, 10003, 5, 16]}
oids['Z3950']['RECSYN']['CANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 17]), 'val': [1, 2, 840, 10003, 5, 17]}
oids['Z3950']['RECSYN']['SBNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 18]), 'val': [1, 2, 840, 10003, 5, 18]}
oids['Z3950']['RECSYN']['PICAMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 19]), 'val': [1, 2, 840, 10003, 5, 19]}
oids['Z3950']['RECSYN']['AUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 20]), 'val': [1, 2, 840, 10003, 5, 20]}
oids['Z3950']['RECSYN']['IBERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 21]), 'val': [1, 2, 840, 10003, 5, 21]}
oids['Z3950']['RECSYN']['CATMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 22]), 'val': [1, 2, 840, 10003, 5, 22]}
oids['Z3950']['RECSYN']['MALMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 23]), 'val': [1, 2, 840, 10003, 5, 23]}
oids['Z3950']['RECSYN']['JPMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 24]), 'val': [1, 2, 840, 10003, 5, 24]}
oids['Z3950']['RECSYN']['SWEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 25]), 'val': [1, 2, 840, 10003, 5, 25]}
oids['Z3950']['RECSYN']['SIGLEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 26]), 'val': [1, 2, 840, 10003, 5, 26]}
oids['Z3950']['RECSYN']['ISDSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 27]), 'val': [1, 2, 840, 10003, 5, 27]}
oids['Z3950']['RECSYN']['RUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 28]), 'val': [1, 2, 840, 10003, 5, 28]}
oids['Z3950']['RECSYN']['HUNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 29]), 'val': [1, 2, 840, 10003, 5, 29]}
oids['Z3950']['RECSYN']['NACSISCATP'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 30]), 'val': [1, 2, 840, 10003, 5, 30]}
oids['Z3950']['RECSYN']['FINMARC2000'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 31]), 'val': [1, 2, 840, 10003, 5, 31]}
oids['Z3950']['RECSYN']['MARC21FIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 32]), 'val': [1, 2, 840, 10003, 5, 32]}
oids['Z3950']['RECSYN']['COMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 33]), 'val': [1, 2, 840, 10003, 5, 33]}
oids['Z3950']['RECSYN']['EXPLAIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 100]), 'val': [1, 2, 840, 10003, 5, 100]}
oids['Z3950']['RECSYN']['SUTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 101]), 'val': [1, 2, 840, 10003, 5, 101]}
oids['Z3950']['RECSYN']['OPAC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 102]), 'val': [1, 2, 840, 10003, 5, 102]}
oids['Z3950']['RECSYN']['SUMMARY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 103]), 'val': [1, 2, 840, 10003, 5, 103]}
oids['Z3950']['RECSYN']['GRS0'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 104]), 'val': [1, 2, 840, 10003, 5, 104]}
oids['Z3950']['RECSYN']['GRS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 105]), 'val': [1, 2, 840, 10003, 5, 105]}
oids['Z3950']['RECSYN']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 106]), 'val': [1, 2, 840, 10003, 5, 106]}
oids['Z3950']['RECSYN']['FRAGMENT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 107]), 'val': [1, 2, 840, 10003, 5, 107]}
oids['Z3950']['RECSYN']['MIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109]), 'val': [1, 2, 840, 10003, 5, 109]}
oids['Z3950']['RECSYN']['MIME']['PDF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 1]), 'val': [1, 2, 840, 10003, 5, 109, 1]}
oids['Z3950']['RECSYN']['MIME']['POSTSCRIPT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 2]), 'val': [1, 2, 840, 10003, 5, 109, 2]}
oids['Z3950']['RECSYN']['MIME']['HTML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 3]), 'val': [1, 2, 840, 10003, 5, 109, 3]}
oids['Z3950']['RECSYN']['MIME']['TIFF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 4]), 'val': [1, 2, 840, 10003, 5, 109, 4]}
oids['Z3950']['RECSYN']['MIME']['GIF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 5]), 'val': [1, 2, 840, 10003, 5, 109, 5]}
oids['Z3950']['RECSYN']['MIME']['JPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 6]), 'val': [1, 2, 840, 10003, 5, 109, 6]}
oids['Z3950']['RECSYN']['MIME']['PNG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 7]), 'val': [1, 2, 840, 10003, 5, 109, 7]}
oids['Z3950']['RECSYN']['MIME']['MPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 8]), 'val': [1, 2, 840, 10003, 5, 109, 8]}
oids['Z3950']['RECSYN']['MIME']['SGML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 9]), 'val': [1, 2, 840, 10003, 5, 109, 9]}
oids['Z3950']['RECSYN']['MIME']['XML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 10]), 'val': [1, 2, 840, 10003, 5, 109, 10]}
oids['Z3950']['RECSYN']['ZMIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110]), 'val': [1, 2, 840, 10003, 5, 110]}
oids['Z3950']['RECSYN']['ZMIME']['TIFFB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 1]), 'val': [1, 2, 840, 10003, 5, 110, 1]}
oids['Z3950']['RECSYN']['ZMIME']['WAV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 2]), 'val': [1, 2, 840, 10003, 5, 110, 2]}
oids['Z3950']['RECSYN']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 111]), 'val': [1, 2, 840, 10003, 5, 111]}
oids['Z3950']['RRF']['RESOURCE1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 1]), 'val': [1, 2, 840, 10003, 7, 1]}
oids['Z3950']['RRF']['RESOURCE2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 2]), 'val': [1, 2, 840, 10003, 7, 2]}
oids['Z3950']['ACCESS']['PROMPT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 1]), 'val': [1, 2, 840, 10003, 8, 1]}
oids['Z3950']['ACCESS']['DES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 2]), 'val': [1, 2, 840, 10003, 8, 2]}
oids['Z3950']['ACCESS']['KRB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 3]), 'val': [1, 2, 840, 10003, 8, 3]}
oids['Z3950']['ES']['PERSISTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 1]), 'val': [1, 2, 840, 10003, 9, 1]}
oids['Z3950']['ES']['PERSISTQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 2]), 'val': [1, 2, 840, 10003, 9, 2]}
oids['Z3950']['ES']['PERIODQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 3]), 'val': [1, 2, 840, 10003, 9, 3]}
oids['Z3950']['ES']['ITEMORDER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 4]), 'val': [1, 2, 840, 10003, 9, 4]}
oids['Z3950']['ES']['DBUPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5]), 'val': [1, 2, 840, 10003, 9, 5]}
oids['Z3950']['ES']['DBUPDATE']['REV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1]}
oids['Z3950']['ES']['DBUPDATE']['REV']['1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1, 1]}
oids['Z3950']['ES']['EXPORTSPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 6]), 'val': [1, 2, 840, 10003, 9, 6]}
oids['Z3950']['ES']['EXPORTINV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 7]), 'val': [1, 2, 840, 10003, 9, 7]}
oids['Z3950']['USR']['SEARCHRES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1]), 'val': [1, 2, 840, 10003, 10, 1]}
oids['Z3950']['USR']['CHARSETNEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 2]), 'val': [1, 2, 840, 10003, 10, 2]}
oids['Z3950']['USR']['INFO1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 3]), 'val': [1, 2, 840, 10003, 10, 3]}
oids['Z3950']['USR']['SEARCHTERMS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 4]), 'val': [1, 2, 840, 10003, 10, 4]}
oids['Z3950']['USR']['SEARCHTERMS2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 5]), 'val': [1, 2, 840, 10003, 10, 5]}
oids['Z3950']['USR']['DATETIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 6]), 'val': [1, 2, 840, 10003, 10, 6]}
oids['Z3950']['USR']['INSERTACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 9]), 'val': [1, 2, 840, 10003, 10, 9]}
oids['Z3950']['USR']['EDITACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 10]), 'val': [1, 2, 840, 10003, 10, 10]}
oids['Z3950']['USR']['AUTHFILE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 11]), 'val': [1, 2, 840, 10003, 10, 11]}
oids['Z3950']['USR']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000]), 'val': [1, 2, 840, 10003, 10, 1000]}
oids['Z3950']['USR']['PRIVATE']['OCLC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17]), 'val': [1, 2, 840, 10003, 10, 1000, 17]}
oids['Z3950']['USR']['PRIVATE']['OCLC']['INFO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1]), 'val': [1, 2, 840, 10003, 10, 1000, 17, 1]}
oids['Z3950']['SPEC']['ESPEC1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 1]), 'val': [1, 2, 840, 10003, 11, 1]}
oids['Z3950']['SPEC']['ESPEC2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 2]), 'val': [1, 2, 840, 10003, 11, 2]}
oids['Z3950']['SPEC']['ESPECQ'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 3]), 'val': [1, 2, 840, 10003, 11, 3]}
oids['Z3950']['VAR']['VARIANT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12, 1]), 'val': [1, 2, 840, 10003, 12, 1]}
oids['Z3950']['SCHEMA']['WAIS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 2]), 'val': [1, 2, 840, 10003, 13, 2]}
oids['Z3950']['SCHEMA']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 3]), 'val': [1, 2, 840, 10003, 13, 3]}
oids['Z3950']['SCHEMA']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 4]), 'val': [1, 2, 840, 10003, 13, 4]}
oids['Z3950']['SCHEMA']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 5]), 'val': [1, 2, 840, 10003, 13, 5]}
oids['Z3950']['SCHEMA']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 6]), 'val': [1, 2, 840, 10003, 13, 6]}
oids['Z3950']['SCHEMA']['HOLDINGS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7]), 'val': [1, 2, 840, 10003, 13, 7]}
oids['Z3950']['SCHEMA']['HOLDINGS']['11'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 1]), 'val': [1, 2, 840, 10003, 13, 7, 1]}
oids['Z3950']['SCHEMA']['HOLDINGS']['12'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 2]), 'val': [1, 2, 840, 10003, 13, 7, 2]}
oids['Z3950']['SCHEMA']['HOLDINGS']['14'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 4]), 'val': [1, 2, 840, 10003, 13, 7, 4]}
oids['Z3950']['SCHEMA']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['INSERT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['EDIT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['TAGSET']['M'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 1]), 'val': [1, 2, 840, 10003, 14, 1]}
oids['Z3950']['TAGSET']['G'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 2]), 'val': [1, 2, 840, 10003, 14, 2]}
oids['Z3950']['TAGSET']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 3]), 'val': [1, 2, 840, 10003, 14, 3]}
oids['Z3950']['TAGSET']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 4]), 'val': [1, 2, 840, 10003, 14, 4]}
oids['Z3950']['TAGSET']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 5]), 'val': [1, 2, 840, 10003, 14, 5]}
oids['Z3950']['TAGSET']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 6]), 'val': [1, 2, 840, 10003, 14, 6]}
oids['Z3950']['TAGSET']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 7]), 'val': [1, 2, 840, 10003, 14, 7]}
oids['Z3950']['TAGSET']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 8]), 'val': [1, 2, 840, 10003, 14, 8]}
oids['Z3950']['NEG']['CHARSET2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1]), 'val': [1, 2, 840, 10003, 15, 1]}
oids['Z3950']['NEG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 2]), 'val': [1, 2, 840, 10003, 15, 2]}
oids['Z3950']['NEG']['CHARSET3'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 3]), 'val': [1, 2, 840, 10003, 15, 3]}
oids['Z3950']['NEG']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000]), 'val': [1, 2, 840, 10003, 15, 1000]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81]), 'val': [1, 2, 840, 10003, 15, 1000, 81]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA']['CHARSETNAME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1]), 'val': [1, 2, 840, 10003, 15, 1000, 81, 1]}
oids['Z3950']['QUERY']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 1]), 'val': [1, 2, 840, 10003, 16, 1]}
oids['Z3950']['QUERY']['CQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 2]), 'val': [1, 2, 840, 10003, 16, 2]}
oids['UNICODE'] = {'oid': asn1.OidVal([1, 0, 10646]), 'val': [1, 0, 10646]}
oids['UNICODE']['PART1'] = {'oid': asn1.OidVal([1, 0, 10646, 1]), 'val': [1, 0, 10646, 1]}
oids['UNICODE']['PART1']['XFERSYN'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0]), 'val': [1, 0, 10646, 1, 0]}
oids['UNICODE']['PART1']['XFERSYN']['UCS2'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 2]), 'val': [1, 0, 10646, 1, 0, 2]}
oids['UNICODE']['PART1']['XFERSYN']['UCS4'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 4]), 'val': [1, 0, 10646, 1, 0, 4]}
oids['UNICODE']['PART1']['XFERSYN']['UTF16'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 5]), 'val': [1, 0, 10646, 1, 0, 5]}
oids['UNICODE']['PART1']['XFERSYN']['UTF8'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 8]), 'val': [1, 0, 10646, 1, 0, 8]}
UNICODE = [1, 0, 10646]
UNICODE_ov = asn1.OidVal([1, 0, 10646])
UNICODE_PART1 = [1, 0, 10646, 1]
UNICODE_PART1_ov = asn1.OidVal([1, 0, 10646, 1])
UNICODE_PART1_XFERSYN = [1, 0, 10646, 1, 0]
UNICODE_PART1_XFERSYN_ov = asn1.OidVal([1, 0, 10646, 1, 0])
UNICODE_PART1_XFERSYN_UCS2 = [1, 0, 10646, 1, 0, 2]
UNICODE_PART1_XFERSYN_UCS2_ov = asn1.OidVal([1, 0, 10646, 1, 0, 2])
UNICODE_PART1_XFERSYN_UCS4 = [1, 0, 10646, 1, 0, 4]
UNICODE_PART1_XFERSYN_UCS4_ov = asn1.OidVal([1, 0, 10646, 1, 0, 4])
UNICODE_PART1_XFERSYN_UTF16 = [1, 0, 10646, 1, 0, 5]
UNICODE_PART1_XFERSYN_UTF16_ov = asn1.OidVal([1, 0, 10646, 1, 0, 5])
UNICODE_PART1_XFERSYN_UTF8 = [1, 0, 10646, 1, 0, 8]
UNICODE_PART1_XFERSYN_UTF8_ov = asn1.OidVal([1, 0, 10646, 1, 0, 8])
Z3950 = [1, 2, 840, 10003]
Z3950_ov = asn1.OidVal([1, 2, 840, 10003])
Z3950_ACCESS = [1, 2, 840, 10003, 8]
Z3950_ACCESS_ov = asn1.OidVal([1, 2, 840, 10003, 8])
Z3950_ACCESS_DES1 = [1, 2, 840, 10003, 8, 2]
Z3950_ACCESS_DES1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 2])
Z3950_ACCESS_KRB1 = [1, 2, 840, 10003, 8, 3]
Z3950_ACCESS_KRB1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 3])
Z3950_ACCESS_PROMPT1 = [1, 2, 840, 10003, 8, 1]
Z3950_ACCESS_PROMPT1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 1])
Z3950_ATTRS = [1, 2, 840, 10003, 3]
Z3950_ATTRS_ov = asn1.OidVal([1, 2, 840, 10003, 3])
Z3950_ATTRS_BIB1 = [1, 2, 840, 10003, 3, 1]
Z3950_ATTRS_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 1])
Z3950_ATTRS_BIB2 = [1, 2, 840, 10003, 3, 18]
Z3950_ATTRS_BIB2_ov = asn1.OidVal([1, 2, 840, 10003, 3, 18])
Z3950_ATTRS_CCL1 = [1, 2, 840, 10003, 3, 4]
Z3950_ATTRS_CCL1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 4])
Z3950_ATTRS_CIMI1 = [1, 2, 840, 10003, 3, 8]
Z3950_ATTRS_CIMI1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 8])
Z3950_ATTRS_COLLECTIONS1 = [1, 2, 840, 10003, 3, 7]
Z3950_ATTRS_COLLECTIONS1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 7])
Z3950_ATTRS_DAN1 = [1, 2, 840, 10003, 3, 15]
Z3950_ATTRS_DAN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 15])
Z3950_ATTRS_EXP1 = [1, 2, 840, 10003, 3, 2]
Z3950_ATTRS_EXP1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 2])
Z3950_ATTRS_EXT1 = [1, 2, 840, 10003, 3, 3]
Z3950_ATTRS_EXT1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 3])
Z3950_ATTRS_FIN1 = [1, 2, 840, 10003, 3, 14]
Z3950_ATTRS_FIN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 14])
Z3950_ATTRS_GEO = [1, 2, 840, 10003, 3, 9]
Z3950_ATTRS_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 3, 9])
Z3950_ATTRS_GILS = [1, 2, 840, 10003, 3, 5]
Z3950_ATTRS_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 5])
Z3950_ATTRS_HOLD = [1, 2, 840, 10003, 3, 16]
Z3950_ATTRS_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 3, 16])
Z3950_ATTRS_MARC = [1, 2, 840, 10003, 3, 17]
Z3950_ATTRS_MARC_ov = asn1.OidVal([1, 2, 840, 10003, 3, 17])
Z3950_ATTRS_STAS = [1, 2, 840, 10003, 3, 6]
Z3950_ATTRS_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 6])
Z3950_ATTRS_UTIL = [1, 2, 840, 10003, 3, 11]
Z3950_ATTRS_UTIL_ov = asn1.OidVal([1, 2, 840, 10003, 3, 11])
Z3950_ATTRS_XD1 = [1, 2, 840, 10003, 3, 12]
Z3950_ATTRS_XD1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 12])
Z3950_ATTRS_ZBIG = [1, 2, 840, 10003, 3, 10]
Z3950_ATTRS_ZBIG_ov = asn1.OidVal([1, 2, 840, 10003, 3, 10])
Z3950_ATTRS_ZEEREX = [1, 2, 840, 10003, 3, 19]
Z3950_ATTRS_ZEEREX_ov = asn1.OidVal([1, 2, 840, 10003, 3, 19])
Z3950_ATTRS_ZTHES = [1, 2, 840, 10003, 3, 13]
Z3950_ATTRS_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 3, 13])
Z3950_DIAG = [1, 2, 840, 10003, 4]
Z3950_DIAG_ov = asn1.OidVal([1, 2, 840, 10003, 4])
Z3950_DIAG_BIB1 = [1, 2, 840, 10003, 4, 1]
Z3950_DIAG_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 1])
Z3950_DIAG_DIAG1 = [1, 2, 840, 10003, 4, 2]
Z3950_DIAG_DIAG1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 2])
Z3950_DIAG_ES = [1, 2, 840, 10003, 4, 3]
Z3950_DIAG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 4, 3])
Z3950_DIAG_GENERAL = [1, 2, 840, 10003, 4, 4]
Z3950_DIAG_GENERAL_ov = asn1.OidVal([1, 2, 840, 10003, 4, 4])
Z3950_ES = [1, 2, 840, 10003, 9]
Z3950_ES_ov = asn1.OidVal([1, 2, 840, 10003, 9])
Z3950_ES_DBUPDATE = [1, 2, 840, 10003, 9, 5]
Z3950_ES_DBUPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5])
Z3950_ES_DBUPDATE_REV = [1, 2, 840, 10003, 9, 5, 1]
Z3950_ES_DBUPDATE_REV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1])
Z3950_ES_DBUPDATE_REV_1 = [1, 2, 840, 10003, 9, 5, 1, 1]
Z3950_ES_DBUPDATE_REV_1_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1])
Z3950_ES_EXPORTINV = [1, 2, 840, 10003, 9, 7]
Z3950_ES_EXPORTINV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 7])
Z3950_ES_EXPORTSPEC = [1, 2, 840, 10003, 9, 6]
Z3950_ES_EXPORTSPEC_ov = asn1.OidVal([1, 2, 840, 10003, 9, 6])
Z3950_ES_ITEMORDER = [1, 2, 840, 10003, 9, 4]
Z3950_ES_ITEMORDER_ov = asn1.OidVal([1, 2, 840, 10003, 9, 4])
Z3950_ES_PERIODQRY = [1, 2, 840, 10003, 9, 3]
Z3950_ES_PERIODQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 3])
Z3950_ES_PERSISTQRY = [1, 2, 840, 10003, 9, 2]
Z3950_ES_PERSISTQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 2])
Z3950_ES_PERSISTRS = [1, 2, 840, 10003, 9, 1]
Z3950_ES_PERSISTRS_ov = asn1.OidVal([1, 2, 840, 10003, 9, 1])
Z3950_NEG = [1, 2, 840, 10003, 15]
Z3950_NEG_ov = asn1.OidVal([1, 2, 840, 10003, 15])
Z3950_NEG_CHARSET2 = [1, 2, 840, 10003, 15, 1]
Z3950_NEG_CHARSET2_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1])
Z3950_NEG_CHARSET3 = [1, 2, 840, 10003, 15, 3]
Z3950_NEG_CHARSET3_ov = asn1.OidVal([1, 2, 840, 10003, 15, 3])
Z3950_NEG_ES = [1, 2, 840, 10003, 15, 2]
Z3950_NEG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 15, 2])
Z3950_NEG_PRIVATE = [1, 2, 840, 10003, 15, 1000]
Z3950_NEG_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000])
Z3950_NEG_PRIVATE_INDEXDATA = [1, 2, 840, 10003, 15, 1000, 81]
Z3950_NEG_PRIVATE_INDEXDATA_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81])
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME = [1, 2, 840, 10003, 15, 1000, 81, 1]
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1])
Z3950_QUERY = [1, 2, 840, 10003, 16]
Z3950_QUERY_ov = asn1.OidVal([1, 2, 840, 10003, 16])
Z3950_QUERY_CQL = [1, 2, 840, 10003, 16, 2]
Z3950_QUERY_CQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 2])
Z3950_QUERY_SQL = [1, 2, 840, 10003, 16, 1]
Z3950_QUERY_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 1])
Z3950_RECSYN = [1, 2, 840, 10003, 5]
Z3950_RECSYN_ov = asn1.OidVal([1, 2, 840, 10003, 5])
Z3950_RECSYN_AUSMARC = [1, 2, 840, 10003, 5, 20]
Z3950_RECSYN_AUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 20])
Z3950_RECSYN_CANMARC = [1, 2, 840, 10003, 5, 17]
Z3950_RECSYN_CANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 17])
Z3950_RECSYN_CATMARC = [1, 2, 840, 10003, 5, 22]
Z3950_RECSYN_CATMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 22])
Z3950_RECSYN_CCF = [1, 2, 840, 10003, 5, 3]
Z3950_RECSYN_CCF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 3])
Z3950_RECSYN_COMARC = [1, 2, 840, 10003, 5, 33]
Z3950_RECSYN_COMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 33])
Z3950_RECSYN_DANMARC = [1, 2, 840, 10003, 5, 14]
Z3950_RECSYN_DANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 14])
Z3950_RECSYN_ES = [1, 2, 840, 10003, 5, 106]
Z3950_RECSYN_ES_ov = asn1.OidVal([1, 2, 840, 10003, 5, 106])
Z3950_RECSYN_EXPLAIN = [1, 2, 840, 10003, 5, 100]
Z3950_RECSYN_EXPLAIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 100])
Z3950_RECSYN_FINMARC = [1, 2, 840, 10003, 5, 15]
Z3950_RECSYN_FINMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 15])
Z3950_RECSYN_FINMARC2000 = [1, 2, 840, 10003, 5, 31]
Z3950_RECSYN_FINMARC2000_ov = asn1.OidVal([1, 2, 840, 10003, 5, 31])
Z3950_RECSYN_FRAGMENT = [1, 2, 840, 10003, 5, 107]
Z3950_RECSYN_FRAGMENT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 107])
Z3950_RECSYN_GRS0 = [1, 2, 840, 10003, 5, 104]
Z3950_RECSYN_GRS0_ov = asn1.OidVal([1, 2, 840, 10003, 5, 104])
Z3950_RECSYN_GRS1 = [1, 2, 840, 10003, 5, 105]
Z3950_RECSYN_GRS1_ov = asn1.OidVal([1, 2, 840, 10003, 5, 105])
Z3950_RECSYN_HUNMARC = [1, 2, 840, 10003, 5, 29]
Z3950_RECSYN_HUNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 29])
Z3950_RECSYN_IBERMARC = [1, 2, 840, 10003, 5, 21]
Z3950_RECSYN_IBERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 21])
Z3950_RECSYN_INTERMARC = [1, 2, 840, 10003, 5, 2]
Z3950_RECSYN_INTERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 2])
Z3950_RECSYN_ISDSMARC = [1, 2, 840, 10003, 5, 27]
Z3950_RECSYN_ISDSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 27])
Z3950_RECSYN_JPMARC = [1, 2, 840, 10003, 5, 24]
Z3950_RECSYN_JPMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 24])
Z3950_RECSYN_LIBRISMARC = [1, 2, 840, 10003, 5, 13]
Z3950_RECSYN_LIBRISMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 13])
Z3950_RECSYN_MAB = [1, 2, 840, 10003, 5, 16]
Z3950_RECSYN_MAB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 16])
Z3950_RECSYN_MALMARC = [1, 2, 840, 10003, 5, 23]
Z3950_RECSYN_MALMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 23])
Z3950_RECSYN_MARC21FIN = [1, 2, 840, 10003, 5, 32]
Z3950_RECSYN_MARC21FIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 32])
Z3950_RECSYN_MIME = [1, 2, 840, 10003, 5, 109]
Z3950_RECSYN_MIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109])
Z3950_RECSYN_MIME_GIF = [1, 2, 840, 10003, 5, 109, 5]
Z3950_RECSYN_MIME_GIF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 5])
Z3950_RECSYN_MIME_HTML = [1, 2, 840, 10003, 5, 109, 3]
Z3950_RECSYN_MIME_HTML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 3])
Z3950_RECSYN_MIME_JPEG = [1, 2, 840, 10003, 5, 109, 6]
Z3950_RECSYN_MIME_JPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 6])
Z3950_RECSYN_MIME_MPEG = [1, 2, 840, 10003, 5, 109, 8]
Z3950_RECSYN_MIME_MPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 8])
Z3950_RECSYN_MIME_PDF = [1, 2, 840, 10003, 5, 109, 1]
Z3950_RECSYN_MIME_PDF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 1])
Z3950_RECSYN_MIME_PNG = [1, 2, 840, 10003, 5, 109, 7]
Z3950_RECSYN_MIME_PNG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 7])
Z3950_RECSYN_MIME_POSTSCRIPT = [1, 2, 840, 10003, 5, 109, 2]
Z3950_RECSYN_MIME_POSTSCRIPT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 2])
Z3950_RECSYN_MIME_SGML = [1, 2, 840, 10003, 5, 109, 9]
Z3950_RECSYN_MIME_SGML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 9])
Z3950_RECSYN_MIME_TIFF = [1, 2, 840, 10003, 5, 109, 4]
Z3950_RECSYN_MIME_TIFF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 4])
Z3950_RECSYN_MIME_XML = [1, 2, 840, 10003, 5, 109, 10]
Z3950_RECSYN_MIME_XML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 10])
Z3950_RECSYN_NACSISCATP = [1, 2, 840, 10003, 5, 30]
Z3950_RECSYN_NACSISCATP_ov = asn1.OidVal([1, 2, 840, 10003, 5, 30])
Z3950_RECSYN_NORMARC = [1, 2, 840, 10003, 5, 12]
Z3950_RECSYN_NORMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 12])
Z3950_RECSYN_OPAC = [1, 2, 840, 10003, 5, 102]
Z3950_RECSYN_OPAC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 102])
Z3950_RECSYN_PICAMARC = [1, 2, 840, 10003, 5, 19]
Z3950_RECSYN_PICAMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 19])
Z3950_RECSYN_RUSMARC = [1, 2, 840, 10003, 5, 28]
Z3950_RECSYN_RUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 28])
Z3950_RECSYN_SBNMARC = [1, 2, 840, 10003, 5, 18]
Z3950_RECSYN_SBNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 18])
Z3950_RECSYN_SIGLEMARC = [1, 2, 840, 10003, 5, 26]
Z3950_RECSYN_SIGLEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 26])
Z3950_RECSYN_SQL = [1, 2, 840, 10003, 5, 111]
Z3950_RECSYN_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 5, 111])
Z3950_RECSYN_SUMMARY = [1, 2, 840, 10003, 5, 103]
Z3950_RECSYN_SUMMARY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 103])
Z3950_RECSYN_SUTRS = [1, 2, 840, 10003, 5, 101]
Z3950_RECSYN_SUTRS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 101])
Z3950_RECSYN_SWEMARC = [1, 2, 840, 10003, 5, 25]
Z3950_RECSYN_SWEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 25])
Z3950_RECSYN_UKMARC = [1, 2, 840, 10003, 5, 11]
Z3950_RECSYN_UKMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 11])
Z3950_RECSYN_UNIMARC = [1, 2, 840, 10003, 5, 1]
Z3950_RECSYN_UNIMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 1])
Z3950_RECSYN_USMARC = [1, 2, 840, 10003, 5, 10]
Z3950_RECSYN_USMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10])
Z3950_RECSYN_USMARC_AUTH = [1, 2, 840, 10003, 5, 10, 2]
Z3950_RECSYN_USMARC_AUTH_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 2])
Z3950_RECSYN_USMARC_BIBLIO = [1, 2, 840, 10003, 5, 10, 1]
Z3950_RECSYN_USMARC_BIBLIO_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 1])
Z3950_RECSYN_USMARC_CLASS = [1, 2, 840, 10003, 5, 10, 5]
Z3950_RECSYN_USMARC_CLASS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 5])
Z3950_RECSYN_USMARC_COMMUNITY = [1, 2, 840, 10003, 5, 10, 4]
Z3950_RECSYN_USMARC_COMMUNITY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 4])
Z3950_RECSYN_USMARC_HOLD = [1, 2, 840, 10003, 5, 10, 3]
Z3950_RECSYN_USMARC_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 3])
Z3950_RECSYN_ZMIME = [1, 2, 840, 10003, 5, 110]
Z3950_RECSYN_ZMIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110])
Z3950_RECSYN_ZMIME_TIFFB = [1, 2, 840, 10003, 5, 110, 1]
Z3950_RECSYN_ZMIME_TIFFB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 1])
Z3950_RECSYN_ZMIME_WAV = [1, 2, 840, 10003, 5, 110, 2]
Z3950_RECSYN_ZMIME_WAV_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 2])
Z3950_RRF = [1, 2, 840, 10003, 7]
Z3950_RRF_ov = asn1.OidVal([1, 2, 840, 10003, 7])
Z3950_RRF_RESOURCE1 = [1, 2, 840, 10003, 7, 1]
Z3950_RRF_RESOURCE1_ov = asn1.OidVal([1, 2, 840, 10003, 7, 1])
Z3950_RRF_RESOURCE2 = [1, 2, 840, 10003, 7, 2]
Z3950_RRF_RESOURCE2_ov = asn1.OidVal([1, 2, 840, 10003, 7, 2])
Z3950_SCHEMA = [1, 2, 840, 10003, 13]
Z3950_SCHEMA_ov = asn1.OidVal([1, 2, 840, 10003, 13])
Z3950_SCHEMA_CIMI = [1, 2, 840, 10003, 13, 5]
Z3950_SCHEMA_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 13, 5])
Z3950_SCHEMA_COLLECTIONS = [1, 2, 840, 10003, 13, 3]
Z3950_SCHEMA_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 3])
Z3950_SCHEMA_EDIT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_EDIT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_GEO = [1, 2, 840, 10003, 13, 4]
Z3950_SCHEMA_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 13, 4])
Z3950_SCHEMA_GILS = [1, 2, 840, 10003, 13, 2]
Z3950_SCHEMA_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 2])
Z3950_SCHEMA_HOLDINGS = [1, 2, 840, 10003, 13, 7]
Z3950_SCHEMA_HOLDINGS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7])
Z3950_SCHEMA_HOLDINGS_11 = [1, 2, 840, 10003, 13, 7, 1]
Z3950_SCHEMA_HOLDINGS_11_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 1])
Z3950_SCHEMA_HOLDINGS_12 = [1, 2, 840, 10003, 13, 7, 2]
Z3950_SCHEMA_HOLDINGS_12_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 2])
Z3950_SCHEMA_HOLDINGS_14 = [1, 2, 840, 10003, 13, 7, 4]
Z3950_SCHEMA_HOLDINGS_14_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 4])
Z3950_SCHEMA_INSERT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_INSERT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_UPDATE = [1, 2, 840, 10003, 13, 6]
Z3950_SCHEMA_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 13, 6])
Z3950_SCHEMA_WAIS = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_WAIS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_ZTHES = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SPEC = [1, 2, 840, 10003, 11]
Z3950_SPEC_ov = asn1.OidVal([1, 2, 840, 10003, 11])
Z3950_SPEC_ESPEC1 = [1, 2, 840, 10003, 11, 1]
Z3950_SPEC_ESPEC1_ov = asn1.OidVal([1, 2, 840, 10003, 11, 1])
Z3950_SPEC_ESPEC2 = [1, 2, 840, 10003, 11, 2]
Z3950_SPEC_ESPEC2_ov = asn1.OidVal([1, 2, 840, 10003, 11, 2])
Z3950_SPEC_ESPECQ = [1, 2, 840, 10003, 11, 3]
Z3950_SPEC_ESPECQ_ov = asn1.OidVal([1, 2, 840, 10003, 11, 3])
Z3950_TAGSET = [1, 2, 840, 10003, 14]
Z3950_TAGSET_ov = asn1.OidVal([1, 2, 840, 10003, 14])
Z3950_TAGSET_CIMI = [1, 2, 840, 10003, 14, 6]
Z3950_TAGSET_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 14, 6])
Z3950_TAGSET_COLLECTIONS = [1, 2, 840, 10003, 14, 5]
Z3950_TAGSET_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 5])
Z3950_TAGSET_G = [1, 2, 840, 10003, 14, 2]
Z3950_TAGSET_G_ov = asn1.OidVal([1, 2, 840, 10003, 14, 2])
Z3950_TAGSET_GILS = [1, 2, 840, 10003, 14, 4]
Z3950_TAGSET_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 4])
Z3950_TAGSET_M = [1, 2, 840, 10003, 14, 1]
Z3950_TAGSET_M_ov = asn1.OidVal([1, 2, 840, 10003, 14, 1])
Z3950_TAGSET_STAS = [1, 2, 840, 10003, 14, 3]
Z3950_TAGSET_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 3])
Z3950_TAGSET_UPDATE = [1, 2, 840, 10003, 14, 7]
Z3950_TAGSET_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 14, 7])
Z3950_TAGSET_ZTHES = [1, 2, 840, 10003, 14, 8]
Z3950_TAGSET_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 14, 8])
Z3950_TRANSFER = [1, 2, 840, 10003, 6]
Z3950_TRANSFER_ov = asn1.OidVal([1, 2, 840, 10003, 6])
Z3950_USR = [1, 2, 840, 10003, 10]
Z3950_USR_ov = asn1.OidVal([1, 2, 840, 10003, 10])
Z3950_USR_AUTHFILE = [1, 2, 840, 10003, 10, 11]
Z3950_USR_AUTHFILE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 11])
Z3950_USR_CHARSETNEG = [1, 2, 840, 10003, 10, 2]
Z3950_USR_CHARSETNEG_ov = asn1.OidVal([1, 2, 840, 10003, 10, 2])
Z3950_USR_DATETIME = [1, 2, 840, 10003, 10, 6]
Z3950_USR_DATETIME_ov = asn1.OidVal([1, 2, 840, 10003, 10, 6])
Z3950_USR_EDITACTIONQUAL = [1, 2, 840, 10003, 10, 10]
Z3950_USR_EDITACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 10])
Z3950_USR_INFO1 = [1, 2, 840, 10003, 10, 3]
Z3950_USR_INFO1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 3])
Z3950_USR_INSERTACTIONQUAL = [1, 2, 840, 10003, 10, 9]
Z3950_USR_INSERTACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 9])
Z3950_USR_PRIVATE = [1, 2, 840, 10003, 10, 1000]
Z3950_USR_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000])
Z3950_USR_PRIVATE_OCLC = [1, 2, 840, 10003, 10, 1000, 17]
Z3950_USR_PRIVATE_OCLC_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17])
Z3950_USR_PRIVATE_OCLC_INFO = [1, 2, 840, 10003, 10, 1000, 17, 1]
Z3950_USR_PRIVATE_OCLC_INFO_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1])
Z3950_USR_SEARCHRES1 = [1, 2, 840, 10003, 10, 1]
Z3950_USR_SEARCHRES1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1])
Z3950_USR_SEARCHTERMS1 = [1, 2, 840, 10003, 10, 4]
Z3950_USR_SEARCHTERMS1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 4])
Z3950_USR_SEARCHTERMS2 = [1, 2, 840, 10003, 10, 5]
Z3950_USR_SEARCHTERMS2_ov = asn1.OidVal([1, 2, 840, 10003, 10, 5])
Z3950_VAR = [1, 2, 840, 10003, 12]
Z3950_VAR_ov = asn1.OidVal([1, 2, 840, 10003, 12])
Z3950_VAR_VARIANT1 = [1, 2, 840, 10003, 12, 1]
Z3950_VAR_VARIANT1_ov = asn1.OidVal([1, 2, 840, 10003, 12, 1])
| 76.01875 | 163 | 0.604484 | from PyZ3950 import asn1
oids = {}
oids['Z3950'] = {'oid': asn1.OidVal([1, 2, 840, 10003]), 'val': [1, 2, 840, 10003]}
oids['Z3950']['ATTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3]), 'val': [1, 2, 840, 10003, 3]}
oids['Z3950']['DIAG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4]), 'val': [1, 2, 840, 10003, 4]}
oids['Z3950']['RECSYN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5]), 'val': [1, 2, 840, 10003, 5]}
oids['Z3950']['TRANSFER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 6]), 'val': [1, 2, 840, 10003, 6]}
oids['Z3950']['RRF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7]), 'val': [1, 2, 840, 10003, 7]}
oids['Z3950']['ACCESS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8]), 'val': [1, 2, 840, 10003, 8]}
oids['Z3950']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9]), 'val': [1, 2, 840, 10003, 9]}
oids['Z3950']['USR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10]), 'val': [1, 2, 840, 10003, 10]}
oids['Z3950']['SPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11]), 'val': [1, 2, 840, 10003, 11]}
oids['Z3950']['VAR'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12]), 'val': [1, 2, 840, 10003, 12]}
oids['Z3950']['SCHEMA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13]), 'val': [1, 2, 840, 10003, 13]}
oids['Z3950']['TAGSET'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14]), 'val': [1, 2, 840, 10003, 14]}
oids['Z3950']['NEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15]), 'val': [1, 2, 840, 10003, 15]}
oids['Z3950']['QUERY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16]), 'val': [1, 2, 840, 10003, 16]}
oids['Z3950']['ATTRS']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 1]), 'val': [1, 2, 840, 10003, 3, 1]}
oids['Z3950']['ATTRS']['EXP1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 2]), 'val': [1, 2, 840, 10003, 3, 2]}
oids['Z3950']['ATTRS']['EXT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 3]), 'val': [1, 2, 840, 10003, 3, 3]}
oids['Z3950']['ATTRS']['CCL1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 4]), 'val': [1, 2, 840, 10003, 3, 4]}
oids['Z3950']['ATTRS']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 5]), 'val': [1, 2, 840, 10003, 3, 5]}
oids['Z3950']['ATTRS']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 6]), 'val': [1, 2, 840, 10003, 3, 6]}
oids['Z3950']['ATTRS']['COLLECTIONS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 7]), 'val': [1, 2, 840, 10003, 3, 7]}
oids['Z3950']['ATTRS']['CIMI1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 8]), 'val': [1, 2, 840, 10003, 3, 8]}
oids['Z3950']['ATTRS']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 9]), 'val': [1, 2, 840, 10003, 3, 9]}
oids['Z3950']['ATTRS']['ZBIG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 10]), 'val': [1, 2, 840, 10003, 3, 10]}
oids['Z3950']['ATTRS']['UTIL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 11]), 'val': [1, 2, 840, 10003, 3, 11]}
oids['Z3950']['ATTRS']['XD1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 12]), 'val': [1, 2, 840, 10003, 3, 12]}
oids['Z3950']['ATTRS']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 13]), 'val': [1, 2, 840, 10003, 3, 13]}
oids['Z3950']['ATTRS']['FIN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 14]), 'val': [1, 2, 840, 10003, 3, 14]}
oids['Z3950']['ATTRS']['DAN1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 15]), 'val': [1, 2, 840, 10003, 3, 15]}
oids['Z3950']['ATTRS']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 16]), 'val': [1, 2, 840, 10003, 3, 16]}
oids['Z3950']['ATTRS']['MARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 17]), 'val': [1, 2, 840, 10003, 3, 17]}
oids['Z3950']['ATTRS']['BIB2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 18]), 'val': [1, 2, 840, 10003, 3, 18]}
oids['Z3950']['ATTRS']['ZEEREX'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 3, 19]), 'val': [1, 2, 840, 10003, 3, 19]}
oids['Z3950']['DIAG']['BIB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 1]), 'val': [1, 2, 840, 10003, 4, 1]}
oids['Z3950']['DIAG']['DIAG1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 2]), 'val': [1, 2, 840, 10003, 4, 2]}
oids['Z3950']['DIAG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 3]), 'val': [1, 2, 840, 10003, 4, 3]}
oids['Z3950']['DIAG']['GENERAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 4, 4]), 'val': [1, 2, 840, 10003, 4, 4]}
oids['Z3950']['RECSYN']['UNIMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 1]), 'val': [1, 2, 840, 10003, 5, 1]}
oids['Z3950']['RECSYN']['INTERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 2]), 'val': [1, 2, 840, 10003, 5, 2]}
oids['Z3950']['RECSYN']['CCF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 3]), 'val': [1, 2, 840, 10003, 5, 3]}
oids['Z3950']['RECSYN']['USMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10]), 'val': [1, 2, 840, 10003, 5, 10]}
oids['Z3950']['RECSYN']['USMARC']['BIBLIO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 1]), 'val': [1, 2, 840, 10003, 5, 10, 1]}
oids['Z3950']['RECSYN']['USMARC']['AUTH'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 2]), 'val': [1, 2, 840, 10003, 5, 10, 2]}
oids['Z3950']['RECSYN']['USMARC']['HOLD'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 3]), 'val': [1, 2, 840, 10003, 5, 10, 3]}
oids['Z3950']['RECSYN']['USMARC']['COMMUNITY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 4]), 'val': [1, 2, 840, 10003, 5, 10, 4]}
oids['Z3950']['RECSYN']['USMARC']['CLASS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 10, 5]), 'val': [1, 2, 840, 10003, 5, 10, 5]}
oids['Z3950']['RECSYN']['UKMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 11]), 'val': [1, 2, 840, 10003, 5, 11]}
oids['Z3950']['RECSYN']['NORMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 12]), 'val': [1, 2, 840, 10003, 5, 12]}
oids['Z3950']['RECSYN']['LIBRISMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 13]), 'val': [1, 2, 840, 10003, 5, 13]}
oids['Z3950']['RECSYN']['DANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 14]), 'val': [1, 2, 840, 10003, 5, 14]}
oids['Z3950']['RECSYN']['FINMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 15]), 'val': [1, 2, 840, 10003, 5, 15]}
oids['Z3950']['RECSYN']['MAB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 16]), 'val': [1, 2, 840, 10003, 5, 16]}
oids['Z3950']['RECSYN']['CANMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 17]), 'val': [1, 2, 840, 10003, 5, 17]}
oids['Z3950']['RECSYN']['SBNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 18]), 'val': [1, 2, 840, 10003, 5, 18]}
oids['Z3950']['RECSYN']['PICAMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 19]), 'val': [1, 2, 840, 10003, 5, 19]}
oids['Z3950']['RECSYN']['AUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 20]), 'val': [1, 2, 840, 10003, 5, 20]}
oids['Z3950']['RECSYN']['IBERMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 21]), 'val': [1, 2, 840, 10003, 5, 21]}
oids['Z3950']['RECSYN']['CATMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 22]), 'val': [1, 2, 840, 10003, 5, 22]}
oids['Z3950']['RECSYN']['MALMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 23]), 'val': [1, 2, 840, 10003, 5, 23]}
oids['Z3950']['RECSYN']['JPMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 24]), 'val': [1, 2, 840, 10003, 5, 24]}
oids['Z3950']['RECSYN']['SWEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 25]), 'val': [1, 2, 840, 10003, 5, 25]}
oids['Z3950']['RECSYN']['SIGLEMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 26]), 'val': [1, 2, 840, 10003, 5, 26]}
oids['Z3950']['RECSYN']['ISDSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 27]), 'val': [1, 2, 840, 10003, 5, 27]}
oids['Z3950']['RECSYN']['RUSMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 28]), 'val': [1, 2, 840, 10003, 5, 28]}
oids['Z3950']['RECSYN']['HUNMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 29]), 'val': [1, 2, 840, 10003, 5, 29]}
oids['Z3950']['RECSYN']['NACSISCATP'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 30]), 'val': [1, 2, 840, 10003, 5, 30]}
oids['Z3950']['RECSYN']['FINMARC2000'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 31]), 'val': [1, 2, 840, 10003, 5, 31]}
oids['Z3950']['RECSYN']['MARC21FIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 32]), 'val': [1, 2, 840, 10003, 5, 32]}
oids['Z3950']['RECSYN']['COMARC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 33]), 'val': [1, 2, 840, 10003, 5, 33]}
oids['Z3950']['RECSYN']['EXPLAIN'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 100]), 'val': [1, 2, 840, 10003, 5, 100]}
oids['Z3950']['RECSYN']['SUTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 101]), 'val': [1, 2, 840, 10003, 5, 101]}
oids['Z3950']['RECSYN']['OPAC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 102]), 'val': [1, 2, 840, 10003, 5, 102]}
oids['Z3950']['RECSYN']['SUMMARY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 103]), 'val': [1, 2, 840, 10003, 5, 103]}
oids['Z3950']['RECSYN']['GRS0'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 104]), 'val': [1, 2, 840, 10003, 5, 104]}
oids['Z3950']['RECSYN']['GRS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 105]), 'val': [1, 2, 840, 10003, 5, 105]}
oids['Z3950']['RECSYN']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 106]), 'val': [1, 2, 840, 10003, 5, 106]}
oids['Z3950']['RECSYN']['FRAGMENT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 107]), 'val': [1, 2, 840, 10003, 5, 107]}
oids['Z3950']['RECSYN']['MIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109]), 'val': [1, 2, 840, 10003, 5, 109]}
oids['Z3950']['RECSYN']['MIME']['PDF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 1]), 'val': [1, 2, 840, 10003, 5, 109, 1]}
oids['Z3950']['RECSYN']['MIME']['POSTSCRIPT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 2]), 'val': [1, 2, 840, 10003, 5, 109, 2]}
oids['Z3950']['RECSYN']['MIME']['HTML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 3]), 'val': [1, 2, 840, 10003, 5, 109, 3]}
oids['Z3950']['RECSYN']['MIME']['TIFF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 4]), 'val': [1, 2, 840, 10003, 5, 109, 4]}
oids['Z3950']['RECSYN']['MIME']['GIF'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 5]), 'val': [1, 2, 840, 10003, 5, 109, 5]}
oids['Z3950']['RECSYN']['MIME']['JPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 6]), 'val': [1, 2, 840, 10003, 5, 109, 6]}
oids['Z3950']['RECSYN']['MIME']['PNG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 7]), 'val': [1, 2, 840, 10003, 5, 109, 7]}
oids['Z3950']['RECSYN']['MIME']['MPEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 8]), 'val': [1, 2, 840, 10003, 5, 109, 8]}
oids['Z3950']['RECSYN']['MIME']['SGML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 9]), 'val': [1, 2, 840, 10003, 5, 109, 9]}
oids['Z3950']['RECSYN']['MIME']['XML'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 109, 10]), 'val': [1, 2, 840, 10003, 5, 109, 10]}
oids['Z3950']['RECSYN']['ZMIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110]), 'val': [1, 2, 840, 10003, 5, 110]}
oids['Z3950']['RECSYN']['ZMIME']['TIFFB'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 1]), 'val': [1, 2, 840, 10003, 5, 110, 1]}
oids['Z3950']['RECSYN']['ZMIME']['WAV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 110, 2]), 'val': [1, 2, 840, 10003, 5, 110, 2]}
oids['Z3950']['RECSYN']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 5, 111]), 'val': [1, 2, 840, 10003, 5, 111]}
oids['Z3950']['RRF']['RESOURCE1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 1]), 'val': [1, 2, 840, 10003, 7, 1]}
oids['Z3950']['RRF']['RESOURCE2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 7, 2]), 'val': [1, 2, 840, 10003, 7, 2]}
oids['Z3950']['ACCESS']['PROMPT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 1]), 'val': [1, 2, 840, 10003, 8, 1]}
oids['Z3950']['ACCESS']['DES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 2]), 'val': [1, 2, 840, 10003, 8, 2]}
oids['Z3950']['ACCESS']['KRB1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 8, 3]), 'val': [1, 2, 840, 10003, 8, 3]}
oids['Z3950']['ES']['PERSISTRS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 1]), 'val': [1, 2, 840, 10003, 9, 1]}
oids['Z3950']['ES']['PERSISTQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 2]), 'val': [1, 2, 840, 10003, 9, 2]}
oids['Z3950']['ES']['PERIODQRY'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 3]), 'val': [1, 2, 840, 10003, 9, 3]}
oids['Z3950']['ES']['ITEMORDER'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 4]), 'val': [1, 2, 840, 10003, 9, 4]}
oids['Z3950']['ES']['DBUPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5]), 'val': [1, 2, 840, 10003, 9, 5]}
oids['Z3950']['ES']['DBUPDATE']['REV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1]}
oids['Z3950']['ES']['DBUPDATE']['REV']['1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1]), 'val': [1, 2, 840, 10003, 9, 5, 1, 1]}
oids['Z3950']['ES']['EXPORTSPEC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 6]), 'val': [1, 2, 840, 10003, 9, 6]}
oids['Z3950']['ES']['EXPORTINV'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 9, 7]), 'val': [1, 2, 840, 10003, 9, 7]}
oids['Z3950']['USR']['SEARCHRES1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1]), 'val': [1, 2, 840, 10003, 10, 1]}
oids['Z3950']['USR']['CHARSETNEG'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 2]), 'val': [1, 2, 840, 10003, 10, 2]}
oids['Z3950']['USR']['INFO1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 3]), 'val': [1, 2, 840, 10003, 10, 3]}
oids['Z3950']['USR']['SEARCHTERMS1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 4]), 'val': [1, 2, 840, 10003, 10, 4]}
oids['Z3950']['USR']['SEARCHTERMS2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 5]), 'val': [1, 2, 840, 10003, 10, 5]}
oids['Z3950']['USR']['DATETIME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 6]), 'val': [1, 2, 840, 10003, 10, 6]}
oids['Z3950']['USR']['INSERTACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 9]), 'val': [1, 2, 840, 10003, 10, 9]}
oids['Z3950']['USR']['EDITACTIONQUAL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 10]), 'val': [1, 2, 840, 10003, 10, 10]}
oids['Z3950']['USR']['AUTHFILE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 11]), 'val': [1, 2, 840, 10003, 10, 11]}
oids['Z3950']['USR']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000]), 'val': [1, 2, 840, 10003, 10, 1000]}
oids['Z3950']['USR']['PRIVATE']['OCLC'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17]), 'val': [1, 2, 840, 10003, 10, 1000, 17]}
oids['Z3950']['USR']['PRIVATE']['OCLC']['INFO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1]), 'val': [1, 2, 840, 10003, 10, 1000, 17, 1]}
oids['Z3950']['SPEC']['ESPEC1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 1]), 'val': [1, 2, 840, 10003, 11, 1]}
oids['Z3950']['SPEC']['ESPEC2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 2]), 'val': [1, 2, 840, 10003, 11, 2]}
oids['Z3950']['SPEC']['ESPECQ'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 11, 3]), 'val': [1, 2, 840, 10003, 11, 3]}
oids['Z3950']['VAR']['VARIANT1'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 12, 1]), 'val': [1, 2, 840, 10003, 12, 1]}
oids['Z3950']['SCHEMA']['WAIS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 2]), 'val': [1, 2, 840, 10003, 13, 2]}
oids['Z3950']['SCHEMA']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 3]), 'val': [1, 2, 840, 10003, 13, 3]}
oids['Z3950']['SCHEMA']['GEO'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 4]), 'val': [1, 2, 840, 10003, 13, 4]}
oids['Z3950']['SCHEMA']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 5]), 'val': [1, 2, 840, 10003, 13, 5]}
oids['Z3950']['SCHEMA']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 6]), 'val': [1, 2, 840, 10003, 13, 6]}
oids['Z3950']['SCHEMA']['HOLDINGS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7]), 'val': [1, 2, 840, 10003, 13, 7]}
oids['Z3950']['SCHEMA']['HOLDINGS']['11'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 1]), 'val': [1, 2, 840, 10003, 13, 7, 1]}
oids['Z3950']['SCHEMA']['HOLDINGS']['12'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 2]), 'val': [1, 2, 840, 10003, 13, 7, 2]}
oids['Z3950']['SCHEMA']['HOLDINGS']['14'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 7, 4]), 'val': [1, 2, 840, 10003, 13, 7, 4]}
oids['Z3950']['SCHEMA']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['INSERT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['SCHEMA']['EDIT'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 13, 1]), 'val': [1, 2, 840, 10003, 13, 1]}
oids['Z3950']['TAGSET']['M'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 1]), 'val': [1, 2, 840, 10003, 14, 1]}
oids['Z3950']['TAGSET']['G'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 2]), 'val': [1, 2, 840, 10003, 14, 2]}
oids['Z3950']['TAGSET']['STAS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 3]), 'val': [1, 2, 840, 10003, 14, 3]}
oids['Z3950']['TAGSET']['GILS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 4]), 'val': [1, 2, 840, 10003, 14, 4]}
oids['Z3950']['TAGSET']['COLLECTIONS'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 5]), 'val': [1, 2, 840, 10003, 14, 5]}
oids['Z3950']['TAGSET']['CIMI'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 6]), 'val': [1, 2, 840, 10003, 14, 6]}
oids['Z3950']['TAGSET']['UPDATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 7]), 'val': [1, 2, 840, 10003, 14, 7]}
oids['Z3950']['TAGSET']['ZTHES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 14, 8]), 'val': [1, 2, 840, 10003, 14, 8]}
oids['Z3950']['NEG']['CHARSET2'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1]), 'val': [1, 2, 840, 10003, 15, 1]}
oids['Z3950']['NEG']['ES'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 2]), 'val': [1, 2, 840, 10003, 15, 2]}
oids['Z3950']['NEG']['CHARSET3'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 3]), 'val': [1, 2, 840, 10003, 15, 3]}
oids['Z3950']['NEG']['PRIVATE'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000]), 'val': [1, 2, 840, 10003, 15, 1000]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81]), 'val': [1, 2, 840, 10003, 15, 1000, 81]}
oids['Z3950']['NEG']['PRIVATE']['INDEXDATA']['CHARSETNAME'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1]), 'val': [1, 2, 840, 10003, 15, 1000, 81, 1]}
oids['Z3950']['QUERY']['SQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 1]), 'val': [1, 2, 840, 10003, 16, 1]}
oids['Z3950']['QUERY']['CQL'] = {'oid': asn1.OidVal([1, 2, 840, 10003, 16, 2]), 'val': [1, 2, 840, 10003, 16, 2]}
oids['UNICODE'] = {'oid': asn1.OidVal([1, 0, 10646]), 'val': [1, 0, 10646]}
oids['UNICODE']['PART1'] = {'oid': asn1.OidVal([1, 0, 10646, 1]), 'val': [1, 0, 10646, 1]}
oids['UNICODE']['PART1']['XFERSYN'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0]), 'val': [1, 0, 10646, 1, 0]}
oids['UNICODE']['PART1']['XFERSYN']['UCS2'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 2]), 'val': [1, 0, 10646, 1, 0, 2]}
oids['UNICODE']['PART1']['XFERSYN']['UCS4'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 4]), 'val': [1, 0, 10646, 1, 0, 4]}
oids['UNICODE']['PART1']['XFERSYN']['UTF16'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 5]), 'val': [1, 0, 10646, 1, 0, 5]}
oids['UNICODE']['PART1']['XFERSYN']['UTF8'] = {'oid': asn1.OidVal([1, 0, 10646, 1, 0, 8]), 'val': [1, 0, 10646, 1, 0, 8]}
UNICODE = [1, 0, 10646]
UNICODE_ov = asn1.OidVal([1, 0, 10646])
UNICODE_PART1 = [1, 0, 10646, 1]
UNICODE_PART1_ov = asn1.OidVal([1, 0, 10646, 1])
UNICODE_PART1_XFERSYN = [1, 0, 10646, 1, 0]
UNICODE_PART1_XFERSYN_ov = asn1.OidVal([1, 0, 10646, 1, 0])
UNICODE_PART1_XFERSYN_UCS2 = [1, 0, 10646, 1, 0, 2]
UNICODE_PART1_XFERSYN_UCS2_ov = asn1.OidVal([1, 0, 10646, 1, 0, 2])
UNICODE_PART1_XFERSYN_UCS4 = [1, 0, 10646, 1, 0, 4]
UNICODE_PART1_XFERSYN_UCS4_ov = asn1.OidVal([1, 0, 10646, 1, 0, 4])
UNICODE_PART1_XFERSYN_UTF16 = [1, 0, 10646, 1, 0, 5]
UNICODE_PART1_XFERSYN_UTF16_ov = asn1.OidVal([1, 0, 10646, 1, 0, 5])
UNICODE_PART1_XFERSYN_UTF8 = [1, 0, 10646, 1, 0, 8]
UNICODE_PART1_XFERSYN_UTF8_ov = asn1.OidVal([1, 0, 10646, 1, 0, 8])
Z3950 = [1, 2, 840, 10003]
Z3950_ov = asn1.OidVal([1, 2, 840, 10003])
Z3950_ACCESS = [1, 2, 840, 10003, 8]
Z3950_ACCESS_ov = asn1.OidVal([1, 2, 840, 10003, 8])
Z3950_ACCESS_DES1 = [1, 2, 840, 10003, 8, 2]
Z3950_ACCESS_DES1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 2])
Z3950_ACCESS_KRB1 = [1, 2, 840, 10003, 8, 3]
Z3950_ACCESS_KRB1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 3])
Z3950_ACCESS_PROMPT1 = [1, 2, 840, 10003, 8, 1]
Z3950_ACCESS_PROMPT1_ov = asn1.OidVal([1, 2, 840, 10003, 8, 1])
Z3950_ATTRS = [1, 2, 840, 10003, 3]
Z3950_ATTRS_ov = asn1.OidVal([1, 2, 840, 10003, 3])
Z3950_ATTRS_BIB1 = [1, 2, 840, 10003, 3, 1]
Z3950_ATTRS_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 1])
Z3950_ATTRS_BIB2 = [1, 2, 840, 10003, 3, 18]
Z3950_ATTRS_BIB2_ov = asn1.OidVal([1, 2, 840, 10003, 3, 18])
Z3950_ATTRS_CCL1 = [1, 2, 840, 10003, 3, 4]
Z3950_ATTRS_CCL1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 4])
Z3950_ATTRS_CIMI1 = [1, 2, 840, 10003, 3, 8]
Z3950_ATTRS_CIMI1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 8])
Z3950_ATTRS_COLLECTIONS1 = [1, 2, 840, 10003, 3, 7]
Z3950_ATTRS_COLLECTIONS1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 7])
Z3950_ATTRS_DAN1 = [1, 2, 840, 10003, 3, 15]
Z3950_ATTRS_DAN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 15])
Z3950_ATTRS_EXP1 = [1, 2, 840, 10003, 3, 2]
Z3950_ATTRS_EXP1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 2])
Z3950_ATTRS_EXT1 = [1, 2, 840, 10003, 3, 3]
Z3950_ATTRS_EXT1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 3])
Z3950_ATTRS_FIN1 = [1, 2, 840, 10003, 3, 14]
Z3950_ATTRS_FIN1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 14])
Z3950_ATTRS_GEO = [1, 2, 840, 10003, 3, 9]
Z3950_ATTRS_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 3, 9])
Z3950_ATTRS_GILS = [1, 2, 840, 10003, 3, 5]
Z3950_ATTRS_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 5])
Z3950_ATTRS_HOLD = [1, 2, 840, 10003, 3, 16]
Z3950_ATTRS_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 3, 16])
Z3950_ATTRS_MARC = [1, 2, 840, 10003, 3, 17]
Z3950_ATTRS_MARC_ov = asn1.OidVal([1, 2, 840, 10003, 3, 17])
Z3950_ATTRS_STAS = [1, 2, 840, 10003, 3, 6]
Z3950_ATTRS_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 3, 6])
Z3950_ATTRS_UTIL = [1, 2, 840, 10003, 3, 11]
Z3950_ATTRS_UTIL_ov = asn1.OidVal([1, 2, 840, 10003, 3, 11])
Z3950_ATTRS_XD1 = [1, 2, 840, 10003, 3, 12]
Z3950_ATTRS_XD1_ov = asn1.OidVal([1, 2, 840, 10003, 3, 12])
Z3950_ATTRS_ZBIG = [1, 2, 840, 10003, 3, 10]
Z3950_ATTRS_ZBIG_ov = asn1.OidVal([1, 2, 840, 10003, 3, 10])
Z3950_ATTRS_ZEEREX = [1, 2, 840, 10003, 3, 19]
Z3950_ATTRS_ZEEREX_ov = asn1.OidVal([1, 2, 840, 10003, 3, 19])
Z3950_ATTRS_ZTHES = [1, 2, 840, 10003, 3, 13]
Z3950_ATTRS_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 3, 13])
Z3950_DIAG = [1, 2, 840, 10003, 4]
Z3950_DIAG_ov = asn1.OidVal([1, 2, 840, 10003, 4])
Z3950_DIAG_BIB1 = [1, 2, 840, 10003, 4, 1]
Z3950_DIAG_BIB1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 1])
Z3950_DIAG_DIAG1 = [1, 2, 840, 10003, 4, 2]
Z3950_DIAG_DIAG1_ov = asn1.OidVal([1, 2, 840, 10003, 4, 2])
Z3950_DIAG_ES = [1, 2, 840, 10003, 4, 3]
Z3950_DIAG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 4, 3])
Z3950_DIAG_GENERAL = [1, 2, 840, 10003, 4, 4]
Z3950_DIAG_GENERAL_ov = asn1.OidVal([1, 2, 840, 10003, 4, 4])
Z3950_ES = [1, 2, 840, 10003, 9]
Z3950_ES_ov = asn1.OidVal([1, 2, 840, 10003, 9])
Z3950_ES_DBUPDATE = [1, 2, 840, 10003, 9, 5]
Z3950_ES_DBUPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5])
Z3950_ES_DBUPDATE_REV = [1, 2, 840, 10003, 9, 5, 1]
Z3950_ES_DBUPDATE_REV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1])
Z3950_ES_DBUPDATE_REV_1 = [1, 2, 840, 10003, 9, 5, 1, 1]
Z3950_ES_DBUPDATE_REV_1_ov = asn1.OidVal([1, 2, 840, 10003, 9, 5, 1, 1])
Z3950_ES_EXPORTINV = [1, 2, 840, 10003, 9, 7]
Z3950_ES_EXPORTINV_ov = asn1.OidVal([1, 2, 840, 10003, 9, 7])
Z3950_ES_EXPORTSPEC = [1, 2, 840, 10003, 9, 6]
Z3950_ES_EXPORTSPEC_ov = asn1.OidVal([1, 2, 840, 10003, 9, 6])
Z3950_ES_ITEMORDER = [1, 2, 840, 10003, 9, 4]
Z3950_ES_ITEMORDER_ov = asn1.OidVal([1, 2, 840, 10003, 9, 4])
Z3950_ES_PERIODQRY = [1, 2, 840, 10003, 9, 3]
Z3950_ES_PERIODQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 3])
Z3950_ES_PERSISTQRY = [1, 2, 840, 10003, 9, 2]
Z3950_ES_PERSISTQRY_ov = asn1.OidVal([1, 2, 840, 10003, 9, 2])
Z3950_ES_PERSISTRS = [1, 2, 840, 10003, 9, 1]
Z3950_ES_PERSISTRS_ov = asn1.OidVal([1, 2, 840, 10003, 9, 1])
Z3950_NEG = [1, 2, 840, 10003, 15]
Z3950_NEG_ov = asn1.OidVal([1, 2, 840, 10003, 15])
Z3950_NEG_CHARSET2 = [1, 2, 840, 10003, 15, 1]
Z3950_NEG_CHARSET2_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1])
Z3950_NEG_CHARSET3 = [1, 2, 840, 10003, 15, 3]
Z3950_NEG_CHARSET3_ov = asn1.OidVal([1, 2, 840, 10003, 15, 3])
Z3950_NEG_ES = [1, 2, 840, 10003, 15, 2]
Z3950_NEG_ES_ov = asn1.OidVal([1, 2, 840, 10003, 15, 2])
Z3950_NEG_PRIVATE = [1, 2, 840, 10003, 15, 1000]
Z3950_NEG_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000])
Z3950_NEG_PRIVATE_INDEXDATA = [1, 2, 840, 10003, 15, 1000, 81]
Z3950_NEG_PRIVATE_INDEXDATA_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81])
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME = [1, 2, 840, 10003, 15, 1000, 81, 1]
Z3950_NEG_PRIVATE_INDEXDATA_CHARSETNAME_ov = asn1.OidVal([1, 2, 840, 10003, 15, 1000, 81, 1])
Z3950_QUERY = [1, 2, 840, 10003, 16]
Z3950_QUERY_ov = asn1.OidVal([1, 2, 840, 10003, 16])
Z3950_QUERY_CQL = [1, 2, 840, 10003, 16, 2]
Z3950_QUERY_CQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 2])
Z3950_QUERY_SQL = [1, 2, 840, 10003, 16, 1]
Z3950_QUERY_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 16, 1])
Z3950_RECSYN = [1, 2, 840, 10003, 5]
Z3950_RECSYN_ov = asn1.OidVal([1, 2, 840, 10003, 5])
Z3950_RECSYN_AUSMARC = [1, 2, 840, 10003, 5, 20]
Z3950_RECSYN_AUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 20])
Z3950_RECSYN_CANMARC = [1, 2, 840, 10003, 5, 17]
Z3950_RECSYN_CANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 17])
Z3950_RECSYN_CATMARC = [1, 2, 840, 10003, 5, 22]
Z3950_RECSYN_CATMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 22])
Z3950_RECSYN_CCF = [1, 2, 840, 10003, 5, 3]
Z3950_RECSYN_CCF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 3])
Z3950_RECSYN_COMARC = [1, 2, 840, 10003, 5, 33]
Z3950_RECSYN_COMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 33])
Z3950_RECSYN_DANMARC = [1, 2, 840, 10003, 5, 14]
Z3950_RECSYN_DANMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 14])
Z3950_RECSYN_ES = [1, 2, 840, 10003, 5, 106]
Z3950_RECSYN_ES_ov = asn1.OidVal([1, 2, 840, 10003, 5, 106])
Z3950_RECSYN_EXPLAIN = [1, 2, 840, 10003, 5, 100]
Z3950_RECSYN_EXPLAIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 100])
Z3950_RECSYN_FINMARC = [1, 2, 840, 10003, 5, 15]
Z3950_RECSYN_FINMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 15])
Z3950_RECSYN_FINMARC2000 = [1, 2, 840, 10003, 5, 31]
Z3950_RECSYN_FINMARC2000_ov = asn1.OidVal([1, 2, 840, 10003, 5, 31])
Z3950_RECSYN_FRAGMENT = [1, 2, 840, 10003, 5, 107]
Z3950_RECSYN_FRAGMENT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 107])
Z3950_RECSYN_GRS0 = [1, 2, 840, 10003, 5, 104]
Z3950_RECSYN_GRS0_ov = asn1.OidVal([1, 2, 840, 10003, 5, 104])
Z3950_RECSYN_GRS1 = [1, 2, 840, 10003, 5, 105]
Z3950_RECSYN_GRS1_ov = asn1.OidVal([1, 2, 840, 10003, 5, 105])
Z3950_RECSYN_HUNMARC = [1, 2, 840, 10003, 5, 29]
Z3950_RECSYN_HUNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 29])
Z3950_RECSYN_IBERMARC = [1, 2, 840, 10003, 5, 21]
Z3950_RECSYN_IBERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 21])
Z3950_RECSYN_INTERMARC = [1, 2, 840, 10003, 5, 2]
Z3950_RECSYN_INTERMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 2])
Z3950_RECSYN_ISDSMARC = [1, 2, 840, 10003, 5, 27]
Z3950_RECSYN_ISDSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 27])
Z3950_RECSYN_JPMARC = [1, 2, 840, 10003, 5, 24]
Z3950_RECSYN_JPMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 24])
Z3950_RECSYN_LIBRISMARC = [1, 2, 840, 10003, 5, 13]
Z3950_RECSYN_LIBRISMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 13])
Z3950_RECSYN_MAB = [1, 2, 840, 10003, 5, 16]
Z3950_RECSYN_MAB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 16])
Z3950_RECSYN_MALMARC = [1, 2, 840, 10003, 5, 23]
Z3950_RECSYN_MALMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 23])
Z3950_RECSYN_MARC21FIN = [1, 2, 840, 10003, 5, 32]
Z3950_RECSYN_MARC21FIN_ov = asn1.OidVal([1, 2, 840, 10003, 5, 32])
Z3950_RECSYN_MIME = [1, 2, 840, 10003, 5, 109]
Z3950_RECSYN_MIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109])
Z3950_RECSYN_MIME_GIF = [1, 2, 840, 10003, 5, 109, 5]
Z3950_RECSYN_MIME_GIF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 5])
Z3950_RECSYN_MIME_HTML = [1, 2, 840, 10003, 5, 109, 3]
Z3950_RECSYN_MIME_HTML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 3])
Z3950_RECSYN_MIME_JPEG = [1, 2, 840, 10003, 5, 109, 6]
Z3950_RECSYN_MIME_JPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 6])
Z3950_RECSYN_MIME_MPEG = [1, 2, 840, 10003, 5, 109, 8]
Z3950_RECSYN_MIME_MPEG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 8])
Z3950_RECSYN_MIME_PDF = [1, 2, 840, 10003, 5, 109, 1]
Z3950_RECSYN_MIME_PDF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 1])
Z3950_RECSYN_MIME_PNG = [1, 2, 840, 10003, 5, 109, 7]
Z3950_RECSYN_MIME_PNG_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 7])
Z3950_RECSYN_MIME_POSTSCRIPT = [1, 2, 840, 10003, 5, 109, 2]
Z3950_RECSYN_MIME_POSTSCRIPT_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 2])
Z3950_RECSYN_MIME_SGML = [1, 2, 840, 10003, 5, 109, 9]
Z3950_RECSYN_MIME_SGML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 9])
Z3950_RECSYN_MIME_TIFF = [1, 2, 840, 10003, 5, 109, 4]
Z3950_RECSYN_MIME_TIFF_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 4])
Z3950_RECSYN_MIME_XML = [1, 2, 840, 10003, 5, 109, 10]
Z3950_RECSYN_MIME_XML_ov = asn1.OidVal([1, 2, 840, 10003, 5, 109, 10])
Z3950_RECSYN_NACSISCATP = [1, 2, 840, 10003, 5, 30]
Z3950_RECSYN_NACSISCATP_ov = asn1.OidVal([1, 2, 840, 10003, 5, 30])
Z3950_RECSYN_NORMARC = [1, 2, 840, 10003, 5, 12]
Z3950_RECSYN_NORMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 12])
Z3950_RECSYN_OPAC = [1, 2, 840, 10003, 5, 102]
Z3950_RECSYN_OPAC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 102])
Z3950_RECSYN_PICAMARC = [1, 2, 840, 10003, 5, 19]
Z3950_RECSYN_PICAMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 19])
Z3950_RECSYN_RUSMARC = [1, 2, 840, 10003, 5, 28]
Z3950_RECSYN_RUSMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 28])
Z3950_RECSYN_SBNMARC = [1, 2, 840, 10003, 5, 18]
Z3950_RECSYN_SBNMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 18])
Z3950_RECSYN_SIGLEMARC = [1, 2, 840, 10003, 5, 26]
Z3950_RECSYN_SIGLEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 26])
Z3950_RECSYN_SQL = [1, 2, 840, 10003, 5, 111]
Z3950_RECSYN_SQL_ov = asn1.OidVal([1, 2, 840, 10003, 5, 111])
Z3950_RECSYN_SUMMARY = [1, 2, 840, 10003, 5, 103]
Z3950_RECSYN_SUMMARY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 103])
Z3950_RECSYN_SUTRS = [1, 2, 840, 10003, 5, 101]
Z3950_RECSYN_SUTRS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 101])
Z3950_RECSYN_SWEMARC = [1, 2, 840, 10003, 5, 25]
Z3950_RECSYN_SWEMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 25])
Z3950_RECSYN_UKMARC = [1, 2, 840, 10003, 5, 11]
Z3950_RECSYN_UKMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 11])
Z3950_RECSYN_UNIMARC = [1, 2, 840, 10003, 5, 1]
Z3950_RECSYN_UNIMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 1])
Z3950_RECSYN_USMARC = [1, 2, 840, 10003, 5, 10]
Z3950_RECSYN_USMARC_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10])
Z3950_RECSYN_USMARC_AUTH = [1, 2, 840, 10003, 5, 10, 2]
Z3950_RECSYN_USMARC_AUTH_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 2])
Z3950_RECSYN_USMARC_BIBLIO = [1, 2, 840, 10003, 5, 10, 1]
Z3950_RECSYN_USMARC_BIBLIO_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 1])
Z3950_RECSYN_USMARC_CLASS = [1, 2, 840, 10003, 5, 10, 5]
Z3950_RECSYN_USMARC_CLASS_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 5])
Z3950_RECSYN_USMARC_COMMUNITY = [1, 2, 840, 10003, 5, 10, 4]
Z3950_RECSYN_USMARC_COMMUNITY_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 4])
Z3950_RECSYN_USMARC_HOLD = [1, 2, 840, 10003, 5, 10, 3]
Z3950_RECSYN_USMARC_HOLD_ov = asn1.OidVal([1, 2, 840, 10003, 5, 10, 3])
Z3950_RECSYN_ZMIME = [1, 2, 840, 10003, 5, 110]
Z3950_RECSYN_ZMIME_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110])
Z3950_RECSYN_ZMIME_TIFFB = [1, 2, 840, 10003, 5, 110, 1]
Z3950_RECSYN_ZMIME_TIFFB_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 1])
Z3950_RECSYN_ZMIME_WAV = [1, 2, 840, 10003, 5, 110, 2]
Z3950_RECSYN_ZMIME_WAV_ov = asn1.OidVal([1, 2, 840, 10003, 5, 110, 2])
Z3950_RRF = [1, 2, 840, 10003, 7]
Z3950_RRF_ov = asn1.OidVal([1, 2, 840, 10003, 7])
Z3950_RRF_RESOURCE1 = [1, 2, 840, 10003, 7, 1]
Z3950_RRF_RESOURCE1_ov = asn1.OidVal([1, 2, 840, 10003, 7, 1])
Z3950_RRF_RESOURCE2 = [1, 2, 840, 10003, 7, 2]
Z3950_RRF_RESOURCE2_ov = asn1.OidVal([1, 2, 840, 10003, 7, 2])
Z3950_SCHEMA = [1, 2, 840, 10003, 13]
Z3950_SCHEMA_ov = asn1.OidVal([1, 2, 840, 10003, 13])
Z3950_SCHEMA_CIMI = [1, 2, 840, 10003, 13, 5]
Z3950_SCHEMA_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 13, 5])
Z3950_SCHEMA_COLLECTIONS = [1, 2, 840, 10003, 13, 3]
Z3950_SCHEMA_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 3])
Z3950_SCHEMA_EDIT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_EDIT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_GEO = [1, 2, 840, 10003, 13, 4]
Z3950_SCHEMA_GEO_ov = asn1.OidVal([1, 2, 840, 10003, 13, 4])
Z3950_SCHEMA_GILS = [1, 2, 840, 10003, 13, 2]
Z3950_SCHEMA_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 2])
Z3950_SCHEMA_HOLDINGS = [1, 2, 840, 10003, 13, 7]
Z3950_SCHEMA_HOLDINGS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7])
Z3950_SCHEMA_HOLDINGS_11 = [1, 2, 840, 10003, 13, 7, 1]
Z3950_SCHEMA_HOLDINGS_11_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 1])
Z3950_SCHEMA_HOLDINGS_12 = [1, 2, 840, 10003, 13, 7, 2]
Z3950_SCHEMA_HOLDINGS_12_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 2])
Z3950_SCHEMA_HOLDINGS_14 = [1, 2, 840, 10003, 13, 7, 4]
Z3950_SCHEMA_HOLDINGS_14_ov = asn1.OidVal([1, 2, 840, 10003, 13, 7, 4])
Z3950_SCHEMA_INSERT = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_INSERT_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_UPDATE = [1, 2, 840, 10003, 13, 6]
Z3950_SCHEMA_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 13, 6])
Z3950_SCHEMA_WAIS = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_WAIS_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SCHEMA_ZTHES = [1, 2, 840, 10003, 13, 1]
Z3950_SCHEMA_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 13, 1])
Z3950_SPEC = [1, 2, 840, 10003, 11]
Z3950_SPEC_ov = asn1.OidVal([1, 2, 840, 10003, 11])
Z3950_SPEC_ESPEC1 = [1, 2, 840, 10003, 11, 1]
Z3950_SPEC_ESPEC1_ov = asn1.OidVal([1, 2, 840, 10003, 11, 1])
Z3950_SPEC_ESPEC2 = [1, 2, 840, 10003, 11, 2]
Z3950_SPEC_ESPEC2_ov = asn1.OidVal([1, 2, 840, 10003, 11, 2])
Z3950_SPEC_ESPECQ = [1, 2, 840, 10003, 11, 3]
Z3950_SPEC_ESPECQ_ov = asn1.OidVal([1, 2, 840, 10003, 11, 3])
Z3950_TAGSET = [1, 2, 840, 10003, 14]
Z3950_TAGSET_ov = asn1.OidVal([1, 2, 840, 10003, 14])
Z3950_TAGSET_CIMI = [1, 2, 840, 10003, 14, 6]
Z3950_TAGSET_CIMI_ov = asn1.OidVal([1, 2, 840, 10003, 14, 6])
Z3950_TAGSET_COLLECTIONS = [1, 2, 840, 10003, 14, 5]
Z3950_TAGSET_COLLECTIONS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 5])
Z3950_TAGSET_G = [1, 2, 840, 10003, 14, 2]
Z3950_TAGSET_G_ov = asn1.OidVal([1, 2, 840, 10003, 14, 2])
Z3950_TAGSET_GILS = [1, 2, 840, 10003, 14, 4]
Z3950_TAGSET_GILS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 4])
Z3950_TAGSET_M = [1, 2, 840, 10003, 14, 1]
Z3950_TAGSET_M_ov = asn1.OidVal([1, 2, 840, 10003, 14, 1])
Z3950_TAGSET_STAS = [1, 2, 840, 10003, 14, 3]
Z3950_TAGSET_STAS_ov = asn1.OidVal([1, 2, 840, 10003, 14, 3])
Z3950_TAGSET_UPDATE = [1, 2, 840, 10003, 14, 7]
Z3950_TAGSET_UPDATE_ov = asn1.OidVal([1, 2, 840, 10003, 14, 7])
Z3950_TAGSET_ZTHES = [1, 2, 840, 10003, 14, 8]
Z3950_TAGSET_ZTHES_ov = asn1.OidVal([1, 2, 840, 10003, 14, 8])
Z3950_TRANSFER = [1, 2, 840, 10003, 6]
Z3950_TRANSFER_ov = asn1.OidVal([1, 2, 840, 10003, 6])
Z3950_USR = [1, 2, 840, 10003, 10]
Z3950_USR_ov = asn1.OidVal([1, 2, 840, 10003, 10])
Z3950_USR_AUTHFILE = [1, 2, 840, 10003, 10, 11]
Z3950_USR_AUTHFILE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 11])
Z3950_USR_CHARSETNEG = [1, 2, 840, 10003, 10, 2]
Z3950_USR_CHARSETNEG_ov = asn1.OidVal([1, 2, 840, 10003, 10, 2])
Z3950_USR_DATETIME = [1, 2, 840, 10003, 10, 6]
Z3950_USR_DATETIME_ov = asn1.OidVal([1, 2, 840, 10003, 10, 6])
Z3950_USR_EDITACTIONQUAL = [1, 2, 840, 10003, 10, 10]
Z3950_USR_EDITACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 10])
Z3950_USR_INFO1 = [1, 2, 840, 10003, 10, 3]
Z3950_USR_INFO1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 3])
Z3950_USR_INSERTACTIONQUAL = [1, 2, 840, 10003, 10, 9]
Z3950_USR_INSERTACTIONQUAL_ov = asn1.OidVal([1, 2, 840, 10003, 10, 9])
Z3950_USR_PRIVATE = [1, 2, 840, 10003, 10, 1000]
Z3950_USR_PRIVATE_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000])
Z3950_USR_PRIVATE_OCLC = [1, 2, 840, 10003, 10, 1000, 17]
Z3950_USR_PRIVATE_OCLC_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17])
Z3950_USR_PRIVATE_OCLC_INFO = [1, 2, 840, 10003, 10, 1000, 17, 1]
Z3950_USR_PRIVATE_OCLC_INFO_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1000, 17, 1])
Z3950_USR_SEARCHRES1 = [1, 2, 840, 10003, 10, 1]
Z3950_USR_SEARCHRES1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 1])
Z3950_USR_SEARCHTERMS1 = [1, 2, 840, 10003, 10, 4]
Z3950_USR_SEARCHTERMS1_ov = asn1.OidVal([1, 2, 840, 10003, 10, 4])
Z3950_USR_SEARCHTERMS2 = [1, 2, 840, 10003, 10, 5]
Z3950_USR_SEARCHTERMS2_ov = asn1.OidVal([1, 2, 840, 10003, 10, 5])
Z3950_VAR = [1, 2, 840, 10003, 12]
Z3950_VAR_ov = asn1.OidVal([1, 2, 840, 10003, 12])
Z3950_VAR_VARIANT1 = [1, 2, 840, 10003, 12, 1]
Z3950_VAR_VARIANT1_ov = asn1.OidVal([1, 2, 840, 10003, 12, 1])
| 0 | 0 | 0 |
a251261e5ebd948408f074b6e9deb101c19884c9 | 6,878 | py | Python | Example_V-REP-YouBot-Demo-master_chauby/7_Demo_youBotPickAndPlace/code/vrep/VREP_RemoteAPIs/sendSimultan2MovementSequences-mov.py | dongilc/symbolic_modern_robotics | 2439f26cdc29e73f12a82c7c1e0ac8b4ddc5a353 | [
"BSD-2-Clause"
] | 2 | 2022-03-14T16:01:53.000Z | 2022-03-25T09:14:21.000Z | Example_V-REP-YouBot-Demo-master_chauby/7_Demo_youBotPickAndPlace/code/vrep/VREP_RemoteAPIs/sendSimultan2MovementSequences-mov.py | dongilc/symbolic_modern_robotics | 2439f26cdc29e73f12a82c7c1e0ac8b4ddc5a353 | [
"BSD-2-Clause"
] | null | null | null | Example_V-REP-YouBot-Demo-master_chauby/7_Demo_youBotPickAndPlace/code/vrep/VREP_RemoteAPIs/sendSimultan2MovementSequences-mov.py | dongilc/symbolic_modern_robotics | 2439f26cdc29e73f12a82c7c1e0ac8b4ddc5a353 | [
"BSD-2-Clause"
] | 1 | 2022-03-25T07:16:57.000Z | 2022-03-25T07:16:57.000Z | # Make sure to have CoppeliaSim running, with followig scene loaded:
#
# scenes/movementViaRemoteApi.ttt
#
# Do not launch simulation, then run this script
#
# The client side (i.e. this script) depends on:
#
# sim.py, simConst.py, and the remote API library available
# in programming/remoteApiBindings/lib/lib
# Additionally you will need the python math and msgpack modules
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import math
import msgpack
with Client() as client:
print("running")
if client.id!=-1:
print ('Connected to remote API server')
targetArm1='threadedBlueArm'
targetArm2='nonThreadedRedArm'
client.stringSignalName1=targetArm1+'_executedMovId'
client.stringSignalName2=targetArm2+'_executedMovId'
# Start streaming client.stringSignalName1 and client.stringSignalName2 string signals:
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_streaming)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_streaming)
# Set-up some movement variables:
mVel=100*math.pi/180
mAccel=150*math.pi/180
maxVel=[mVel,mVel,mVel,mVel,mVel,mVel]
maxAccel=[mAccel,mAccel,mAccel,mAccel,mAccel,mAccel]
targetVel=[0,0,0,0,0,0]
# Start simulation:
sim.simxStartSimulation(client.id,sim.simx_opmode_blocking)
# Wait until ready:
waitForMovementExecuted1('ready')
waitForMovementExecuted1('ready')
# Send first movement sequence:
targetConfig=[90*math.pi/180,90*math.pi/180,-90*math.pi/180,90*math.pi/180,90*math.pi/180,90*math.pi/180]
movementData={"id":"movSeq1","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute first movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
# Wait until above movement sequence finished executing:
waitForMovementExecuted1('movSeq1')
waitForMovementExecuted1('movSeq1')
# Send second and third movement sequence, where third one should execute immediately after the second one:
targetConfig=[-90*math.pi/180,45*math.pi/180,90*math.pi/180,135*math.pi/180,90*math.pi/180,90*math.pi/180]
targetVel=[-60*math.pi/180,-20*math.pi/180,0,0,0,0]
movementData={"id":"movSeq2","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
targetConfig=[0,0,0,0,0,0]
targetVel=[0,0,0,0,0,0]
movementData={"id":"movSeq3","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute second and third movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
# Wait until above 2 movement sequences finished executing:
waitForMovementExecuted1('movSeq3')
waitForMovementExecuted1('movSeq3')
sim.simxStopSimulation(client.id,sim.simx_opmode_blocking)
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_discontinue)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_discontinue)
sim.simxGetPingTime(client.id)
# Now close the connection to CoppeliaSim:
sim.simxFinish(client.id)
else:
print ('Failed connecting to remote API server')
| 52.907692 | 172 | 0.708782 | # Make sure to have CoppeliaSim running, with followig scene loaded:
#
# scenes/movementViaRemoteApi.ttt
#
# Do not launch simulation, then run this script
#
# The client side (i.e. this script) depends on:
#
# sim.py, simConst.py, and the remote API library available
# in programming/remoteApiBindings/lib/lib
# Additionally you will need the python math and msgpack modules
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import math
import msgpack
class Client:
def __enter__(self):
self.executedMovId1='notReady'
self.executedMovId2='notReady'
sim.simxFinish(-1) # just in case, close all opened connections
self.id=sim.simxStart('127.0.0.1',19997,True,True,5000,5) # Connect to CoppeliaSim
return self
def __exit__(self,*err):
sim.simxFinish(-1)
print ('Program ended')
with Client() as client:
print("running")
if client.id!=-1:
print ('Connected to remote API server')
targetArm1='threadedBlueArm'
targetArm2='nonThreadedRedArm'
client.stringSignalName1=targetArm1+'_executedMovId'
client.stringSignalName2=targetArm2+'_executedMovId'
def waitForMovementExecuted1(id):
while client.executedMovId1!=id:
retCode,s=sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_buffer)
if retCode==sim.simx_return_ok:
client.executedMovId1=s
def waitForMovementExecuted2(id):
while client.executedMovId2!=id:
retCode,s=sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_buffer)
if retCode==sim.simx_return_ok:
client.executedMovId2=s
# Start streaming client.stringSignalName1 and client.stringSignalName2 string signals:
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_streaming)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_streaming)
# Set-up some movement variables:
mVel=100*math.pi/180
mAccel=150*math.pi/180
maxVel=[mVel,mVel,mVel,mVel,mVel,mVel]
maxAccel=[mAccel,mAccel,mAccel,mAccel,mAccel,mAccel]
targetVel=[0,0,0,0,0,0]
# Start simulation:
sim.simxStartSimulation(client.id,sim.simx_opmode_blocking)
# Wait until ready:
waitForMovementExecuted1('ready')
waitForMovementExecuted1('ready')
# Send first movement sequence:
targetConfig=[90*math.pi/180,90*math.pi/180,-90*math.pi/180,90*math.pi/180,90*math.pi/180,90*math.pi/180]
movementData={"id":"movSeq1","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute first movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq1',sim.simx_opmode_oneshot)
# Wait until above movement sequence finished executing:
waitForMovementExecuted1('movSeq1')
waitForMovementExecuted1('movSeq1')
# Send second and third movement sequence, where third one should execute immediately after the second one:
targetConfig=[-90*math.pi/180,45*math.pi/180,90*math.pi/180,135*math.pi/180,90*math.pi/180,90*math.pi/180]
targetVel=[-60*math.pi/180,-20*math.pi/180,0,0,0,0]
movementData={"id":"movSeq2","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
targetConfig=[0,0,0,0,0,0]
targetVel=[0,0,0,0,0,0]
movementData={"id":"movSeq3","type":"mov","targetConfig":targetConfig,"targetVel":targetVel,"maxVel":maxVel,"maxAccel":maxAccel}
packedMovementData=msgpack.packb(movementData)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiMovementDataFunction',[],[],[],packedMovementData,sim.simx_opmode_oneshot)
# Execute second and third movement sequence:
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq2',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm1,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
sim.simxCallScriptFunction(client.id,targetArm2,sim.sim_scripttype_childscript,'legacyRapiExecuteMovement',[],[],[],'movSeq3',sim.simx_opmode_oneshot)
# Wait until above 2 movement sequences finished executing:
waitForMovementExecuted1('movSeq3')
waitForMovementExecuted1('movSeq3')
sim.simxStopSimulation(client.id,sim.simx_opmode_blocking)
sim.simxGetStringSignal(client.id,client.stringSignalName1,sim.simx_opmode_discontinue)
sim.simxGetStringSignal(client.id,client.stringSignalName2,sim.simx_opmode_discontinue)
sim.simxGetPingTime(client.id)
# Now close the connection to CoppeliaSim:
sim.simxFinish(client.id)
else:
print ('Failed connecting to remote API server')
| 838 | -8 | 142 |
5365b1a75e6e4065d922ee76ed2d551b3103a993 | 1,187 | py | Python | MulticoreTSNE/test/test_installed.py | vu-minh/Multicore-TSNE | 5a8c51d3357c2df2b0f414bf49cb3e7c57a2bfb6 | [
"BSD-3-Clause"
] | null | null | null | MulticoreTSNE/test/test_installed.py | vu-minh/Multicore-TSNE | 5a8c51d3357c2df2b0f414bf49cb3e7c57a2bfb6 | [
"BSD-3-Clause"
] | null | null | null | MulticoreTSNE/test/test_installed.py | vu-minh/Multicore-TSNE | 5a8c51d3357c2df2b0f414bf49cb3e7c57a2bfb6 | [
"BSD-3-Clause"
] | null | null | null | # test modified version of MulticoreTSNE
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.datasets import load_digits
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
from matplotlib import pyplot as plt
import multiprocessing
print(TSNE.__version__)
ncpu = multiprocessing.cpu_count()
ncpu_used = int(ncpu * 0.75)
X, y = load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
tsne = TSNE(
n_jobs=ncpu_used,
n_iter_without_progress=100,
min_grad_norm=1e-04,
perplexity=65,
verbose=1,
)
Z = tsne.fit_transform(X)
print("KL loss", tsne.kl_divergence_)
progress_errors = tsne.progress_errors_
progress_errors = progress_errors[np.where(progress_errors > 0)]
print("Loss by iter", progress_errors)
plt.figure(figsize=(5, 2))
plt.plot(progress_errors)
plt.savefig("temp_test_installed_loss.png")
error_per_point = tsne.error_per_point_
sizes = (
MinMaxScaler(feature_range=(32, 160))
.fit_transform(error_per_point.reshape(-1, 1))
.reshape(1, -1)
)
plt.figure(figsize=(6, 6))
plt.scatter(Z[:, 0], Z[:, 1], c=y, s=sizes, alpha=0.4, cmap="jet")
plt.savefig("temp_test_installed_scatter.png")
| 23.74 | 66 | 0.755687 | # test modified version of MulticoreTSNE
from MulticoreTSNE import MulticoreTSNE as TSNE
from sklearn.datasets import load_digits
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import numpy as np
from matplotlib import pyplot as plt
import multiprocessing
print(TSNE.__version__)
ncpu = multiprocessing.cpu_count()
ncpu_used = int(ncpu * 0.75)
X, y = load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
tsne = TSNE(
n_jobs=ncpu_used,
n_iter_without_progress=100,
min_grad_norm=1e-04,
perplexity=65,
verbose=1,
)
Z = tsne.fit_transform(X)
print("KL loss", tsne.kl_divergence_)
progress_errors = tsne.progress_errors_
progress_errors = progress_errors[np.where(progress_errors > 0)]
print("Loss by iter", progress_errors)
plt.figure(figsize=(5, 2))
plt.plot(progress_errors)
plt.savefig("temp_test_installed_loss.png")
error_per_point = tsne.error_per_point_
sizes = (
MinMaxScaler(feature_range=(32, 160))
.fit_transform(error_per_point.reshape(-1, 1))
.reshape(1, -1)
)
plt.figure(figsize=(6, 6))
plt.scatter(Z[:, 0], Z[:, 1], c=y, s=sizes, alpha=0.4, cmap="jet")
plt.savefig("temp_test_installed_scatter.png")
| 0 | 0 | 0 |
9f7dcf344055bbecc074837cdfebbbfe22fb18b8 | 2,364 | py | Python | pythonlearn/Database/TestDBTool.py | yc19890920/Learn | 3990e75b469225ba7b430539ef9a16abe89eb863 | [
"Apache-2.0"
] | 1 | 2021-01-11T06:30:44.000Z | 2021-01-11T06:30:44.000Z | pythonlearn/Database/TestDBTool.py | yc19890920/Learn | 3990e75b469225ba7b430539ef9a16abe89eb863 | [
"Apache-2.0"
] | 23 | 2020-02-12T02:35:49.000Z | 2022-02-11T03:45:40.000Z | pythonlearn/Database/TestDBTool.py | yc19890920/Learn | 3990e75b469225ba7b430539ef9a16abe89eb863 | [
"Apache-2.0"
] | 2 | 2020-04-08T15:39:46.000Z | 2020-10-10T10:13:09.000Z | #-*- coding: utf8 -*-
import sys
from lib import SimpleDBTool, DBTool, DBFactory
if __name__ == "__main__":
sql = 'SELECT username FROM core_customer Limit 1;'
res = SimpleDBTool.query('edm_web', sql)
print res
# (('test',),)
res = SimpleDBTool.redis.incr('test:123', 2)
print res
# 2
###########################################
dbkit = DBTool.DBToolKit()
res = dbkit.init_pool('edm_web')
if not res: sys.exit(1)
res = do_query('edm_web', sql)
print res
# (('test',),)
redis = dbkit.get_redis_connection()
res = redis.incr('test:123', 2)
print res
# 4
res = dbkit.init_mongo()
res = mongo_find_one({"addr": '1@qq.com'})
print res
# None
#############################################
mysql_edm_web_obj = DBFactory.getDBObject('mysql', 'edm_web')
sql = 'SELECT username FROM core_customer Limit 1;'
res = mysql_edm_web_obj.query(sql)
print res
pgsql_edm_web_obj = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj.query(sql)
print res
# [('test22',)]
mongo_obj = DBFactory.getDBObject('mongo', 'mongo')
mongo = mongo_obj.get_mongo_collection('mail')
res = mongo.find_one({"addr": '1@qq.com'})
print res
# None
#############################################
pgsql_edm_web_obj2 = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj2.query(sql)
print res
# [('test22',)]
print id(pgsql_edm_web_obj), id(pgsql_edm_web_obj2)
# 140476245351888 140476245351888
| 27.172414 | 75 | 0.576565 | #-*- coding: utf8 -*-
import sys
from lib import SimpleDBTool, DBTool, DBFactory
def do_query(dbtype, sql):
try:
res = dbkit.query(dbtype, sql)
except BaseException, e:
res=dbkit.process_exception(dbtype, e)
if not res: return None
res=dbkit.query(dbtype, sql)
return res
def mongo_find_one(vals=None):
mongo = dbkit.get_mongo_collection('mail')
try :
res = mongo.find_one(vals)
except BaseException as e:
res=dbkit.process_exception('mongo', e)
if not res: return None
mongo = dbkit.get_mongo_collection('mail')
res = mongo.find_one({"addr": '1@qq.com'})
return res
if __name__ == "__main__":
sql = 'SELECT username FROM core_customer Limit 1;'
res = SimpleDBTool.query('edm_web', sql)
print res
# (('test',),)
res = SimpleDBTool.redis.incr('test:123', 2)
print res
# 2
###########################################
dbkit = DBTool.DBToolKit()
res = dbkit.init_pool('edm_web')
if not res: sys.exit(1)
res = do_query('edm_web', sql)
print res
# (('test',),)
redis = dbkit.get_redis_connection()
res = redis.incr('test:123', 2)
print res
# 4
res = dbkit.init_mongo()
res = mongo_find_one({"addr": '1@qq.com'})
print res
# None
#############################################
mysql_edm_web_obj = DBFactory.getDBObject('mysql', 'edm_web')
sql = 'SELECT username FROM core_customer Limit 1;'
res = mysql_edm_web_obj.query(sql)
print res
pgsql_edm_web_obj = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj.query(sql)
print res
# [('test22',)]
mongo_obj = DBFactory.getDBObject('mongo', 'mongo')
mongo = mongo_obj.get_mongo_collection('mail')
res = mongo.find_one({"addr": '1@qq.com'})
print res
# None
#############################################
pgsql_edm_web_obj2 = DBFactory.getDBObject('postgresql', 'mail_relay')
sql = 'SELECT username FROM auth_user Limit 1;'
res = pgsql_edm_web_obj2.query(sql)
print res
# [('test22',)]
print id(pgsql_edm_web_obj), id(pgsql_edm_web_obj2)
# 140476245351888 140476245351888
| 558 | 0 | 50 |
10471533365991c020b0b3acfb10c0b0dd460be2 | 367 | py | Python | tests/scraper/urls.py | c3pko/GovScrape | ac58e167539802f7f3b45cdfd4e7a2d99f66bc45 | [
"BSD-3-Clause"
] | null | null | null | tests/scraper/urls.py | c3pko/GovScrape | ac58e167539802f7f3b45cdfd4e7a2d99f66bc45 | [
"BSD-3-Clause"
] | null | null | null | tests/scraper/urls.py | c3pko/GovScrape | ac58e167539802f7f3b45cdfd4e7a2d99f66bc45 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^request_type_method/(?P<test_case>\w+).html$', views.request_type_method),
url(r'^header_body_data/(?P<test_case>\w+).html$', views.header_body_data),
url(r'^form_data/(?P<test_case>\w+).html$', views.form_data),
url(r'^cookies/(?P<test_case>\w+).html$', views.cookies),
] | 33.363636 | 85 | 0.683924 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^request_type_method/(?P<test_case>\w+).html$', views.request_type_method),
url(r'^header_body_data/(?P<test_case>\w+).html$', views.header_body_data),
url(r'^form_data/(?P<test_case>\w+).html$', views.form_data),
url(r'^cookies/(?P<test_case>\w+).html$', views.cookies),
] | 0 | 0 | 0 |
d9b31f6f9fcb19fdd11385ad9feb1454cbacfa59 | 1,217 | py | Python | src/speed.py | joseph-mccarthy/internet-monitor | ad5fefd355588777609c350f4dd1876ffc076d71 | [
"MIT"
] | null | null | null | src/speed.py | joseph-mccarthy/internet-monitor | ad5fefd355588777609c350f4dd1876ffc076d71 | [
"MIT"
] | 3 | 2022-02-25T12:32:00.000Z | 2022-02-25T12:32:43.000Z | src/speed.py | joseph-mccarthy/internet-monitor | ad5fefd355588777609c350f4dd1876ffc076d71 | [
"MIT"
] | null | null | null | from datetime import datetime
import subprocess
from time import sleep
from models.result import Result
import json
from database import Base, db_session, engine
from schedule import every, repeat, run_pending
@repeat(every(30).minutes)
if __name__ == '__main__':
run_speed_test()
while True:
run_pending()
sleep(1)
| 23.862745 | 113 | 0.695974 | from datetime import datetime
import subprocess
from time import sleep
from models.result import Result
import json
from database import Base, db_session, engine
from schedule import every, repeat, run_pending
def init_db():
Base.metadata.create_all(bind=engine)
@repeat(every(30).minutes)
def run_speed_test():
init_db()
json_data = speed_test_cli()
result = populate_model(json_data)
db_session.add(result)
db_session.commit()
def populate_model(json_data):
result: Result = Result()
result.download = json_data['download']['bandwidth']
result.upload = json_data['upload']['bandwidth']
result.ping = json_data['ping']['latency']
result.timestamp = datetime.now()
return result
def speed_test_cli():
print_message("Speed Test Started")
response = subprocess.Popen(
'speedtest --f=json --accept-license --accept-gdpr -a', shell=True, stdout=subprocess.PIPE).stdout.read()
json_data = json.loads(response)
return json_data
def print_message(message: str):
print(datetime.now().strftime("%d/%m/%Y-%H:%M:%S") + "\t" + message)
if __name__ == '__main__':
run_speed_test()
while True:
run_pending()
sleep(1)
| 755 | 0 | 114 |
a8cc2e0926234d102d417243df4da55e0905feb0 | 341 | py | Python | commands/deviot_change_build_folder.py | tablatronix/Deviot | ac77241301d607304b2b4d07adad3a707cfadca2 | [
"Apache-2.0"
] | null | null | null | commands/deviot_change_build_folder.py | tablatronix/Deviot | ac77241301d607304b2b4d07adad3a707cfadca2 | [
"Apache-2.0"
] | null | null | null | commands/deviot_change_build_folder.py | tablatronix/Deviot | ac77241301d607304b2b4d07adad3a707cfadca2 | [
"Apache-2.0"
] | null | null | null | from sublime_plugin import WindowCommand
from ..libraries.tools import save_setting
from ..libraries.paths import folder_explorer
class DeviotChangeBuildFolderCommand(WindowCommand):
"""
Adds extra libraries folder path from the settings
""" | 31 | 66 | 0.771261 | from sublime_plugin import WindowCommand
from ..libraries.tools import save_setting
from ..libraries.paths import folder_explorer
class DeviotChangeBuildFolderCommand(WindowCommand):
"""
Adds extra libraries folder path from the settings
"""
def run(self):
folder_explorer(key='build_folder', callback=save_setting) | 60 | 0 | 27 |
138b3eb40e45f653eae794f17af495268e2e55c0 | 7,344 | py | Python | ss_baselines/savi/models/visual_cnn.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 171 | 2020-08-21T06:45:19.000Z | 2022-03-30T03:52:15.000Z | ss_baselines/savi/models/visual_cnn.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 69 | 2020-09-19T02:44:10.000Z | 2022-03-17T03:57:55.000Z | ss_baselines/savi/models/visual_cnn.py | tynguyen/sound-spaces | b196f3a36b4076752400cbf186e9cf2e160cc3c2 | [
"CC-BY-4.0"
] | 41 | 2020-08-25T06:58:24.000Z | 2022-03-23T05:04:47.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from ss_baselines.common.utils import Flatten
from habitat_sim.utils.common import d3_40_colors_rgb
class VisualCNN(nn.Module):
r"""A Simple 3-Conv CNN followed by a fully connected layer
Takes in observations and produces an embedding of the rgb and/or depth components
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
"""
def _conv_output_dim(
self, dimension, padding, dilation, kernel_size, stride
):
r"""Calculates the output height and width based on the input
height and width to the convolution layer.
ref: https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
@property
@property
@property
@property
def convert_semantics_to_rgb(semantics):
r"""Converts semantic IDs to RGB images.
"""
semantics = semantics.long() % 40
mapping_rgb = torch.from_numpy(d3_40_colors_rgb).to(semantics.device)
semantics_r = torch.take(mapping_rgb[:, 0], semantics)
semantics_g = torch.take(mapping_rgb[:, 1], semantics)
semantics_b = torch.take(mapping_rgb[:, 2], semantics)
semantics_rgb = torch.stack([semantics_r, semantics_g, semantics_b], -1)
return semantics_rgb | 36.537313 | 108 | 0.564815 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from ss_baselines.common.utils import Flatten
from habitat_sim.utils.common import d3_40_colors_rgb
class VisualCNN(nn.Module):
r"""A Simple 3-Conv CNN followed by a fully connected layer
Takes in observations and produces an embedding of the rgb and/or depth components
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
"""
def __init__(self, observation_space, output_size, extra_rgb=False):
super().__init__()
self._output_size = output_size
if "rgb" in observation_space.spaces and not extra_rgb:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
else:
self._n_input_rgb = 0
if "depth" in observation_space.spaces:
self._n_input_depth = observation_space.spaces["depth"].shape[2]
else:
self._n_input_depth = 0
if "semantic" in observation_space.spaces:
self._n_input_semantic = 6
else:
self._n_input_semantic = 0
# kernel size for different CNN layers
self._cnn_layers_kernel_size = [(8, 8), (4, 4), (3, 3)]
# strides for different CNN layers
self._cnn_layers_stride = [(4, 4), (2, 2), (2, 2)]
if self._n_input_rgb > 0:
cnn_dims = np.array(
observation_space.spaces["rgb"].shape[:2], dtype=np.float32
)
elif self._n_input_depth > 0:
cnn_dims = np.array(
observation_space.spaces["depth"].shape[:2], dtype=np.float32
)
elif self._n_input_semantic > 0:
cnn_dims = np.array(
observation_space.spaces["semantic"].shape[:2], dtype=np.float32
)
if self.is_blind:
self.cnn = nn.Sequential()
else:
self._input_shape = (self._n_input_rgb + self._n_input_depth + self._n_input_semantic,
int(cnn_dims[0]), int(cnn_dims[1]))
for kernel_size, stride in zip(
self._cnn_layers_kernel_size, self._cnn_layers_stride
):
cnn_dims = self._conv_output_dim(
dimension=cnn_dims,
padding=np.array([0, 0], dtype=np.float32),
dilation=np.array([1, 1], dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
self.cnn = nn.Sequential(
nn.Conv2d(
in_channels=self._n_input_rgb + self._n_input_depth + self._n_input_semantic,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[0],
stride=self._cnn_layers_stride[0],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[1],
stride=self._cnn_layers_stride[1],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[2],
stride=self._cnn_layers_stride[2],
),
# nn.ReLU(True),
Flatten(),
nn.Linear(64 * cnn_dims[0] * cnn_dims[1], output_size),
nn.ReLU(True),
)
self.layer_init()
def _conv_output_dim(
self, dimension, padding, dilation, kernel_size, stride
):
r"""Calculates the output height and width based on the input
height and width to the convolution layer.
ref: https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
def layer_init(self):
for layer in self.cnn:
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("relu")
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
@property
def is_blind(self):
return self._n_input_rgb + self._n_input_depth + self._n_input_semantic == 0
@property
def input_shape(self):
return self._input_shape
@property
def output_shape(self):
return 1, self._output_size
@property
def feature_dims(self):
return self._output_size
def forward(self, observations):
cnn_input = []
if self._n_input_rgb > 0:
rgb_observations = observations["rgb"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
rgb_observations = rgb_observations.permute(0, 3, 1, 2)
rgb_observations = rgb_observations / 255.0 # normalize RGB
cnn_input.append(rgb_observations)
if self._n_input_depth > 0:
depth_observations = observations["depth"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
depth_observations = depth_observations.permute(0, 3, 1, 2)
cnn_input.append(depth_observations)
if self._n_input_semantic > 0:
semantic_observations = convert_semantics_to_rgb(observations["semantic"]).float()
semantic_object_observations = observations["semantic_object"].float()
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
semantic_observations = torch.cat([semantic_observations, semantic_object_observations], dim=-1)
semantic_observations = semantic_observations.permute(0, 3, 1, 2) / 255.0
cnn_input.append(semantic_observations)
cnn_input = torch.cat(cnn_input, dim=1)
return self.cnn(cnn_input)
def convert_semantics_to_rgb(semantics):
r"""Converts semantic IDs to RGB images.
"""
semantics = semantics.long() % 40
mapping_rgb = torch.from_numpy(d3_40_colors_rgb).to(semantics.device)
semantics_r = torch.take(mapping_rgb[:, 0], semantics)
semantics_g = torch.take(mapping_rgb[:, 1], semantics)
semantics_b = torch.take(mapping_rgb[:, 2], semantics)
semantics_rgb = torch.stack([semantics_r, semantics_g, semantics_b], -1)
return semantics_rgb | 4,963 | 0 | 185 |
30c03032c16fd11c0e154a4288113ee4121aa696 | 11,928 | py | Python | workflows/utils.py | blueicepl/django-workflows | a8d0c2d3c56644c45ede714391d51c78d0bb1660 | [
"BSD-3-Clause"
] | null | null | null | workflows/utils.py | blueicepl/django-workflows | a8d0c2d3c56644c45ede714391d51c78d0bb1660 | [
"BSD-3-Clause"
] | null | null | null | workflows/utils.py | blueicepl/django-workflows | a8d0c2d3c56644c45ede714391d51c78d0bb1660 | [
"BSD-3-Clause"
] | null | null | null | # django imports
from django.contrib.contenttypes.models import ContentType
# permissions imports
import permissions.utils
from permissions.models import Role
# workflows imports
from workflows.conf import settings
from workflows.models import (StateInheritanceBlock, State, StateObjectHistory, StateObjectRelation,
StatePermissionRelation, Transition, Workflow, WorkflowModelRelation,
WorkflowObjectRelation, WorkflowPermissionRelation)
from workflows.signals import before_transition, after_transition, before_state_change, after_state_change
def get_objects_for_workflow(workflow):
"""Returns all objects which have passed workflow.
**Parameters:**
workflow
The workflow for which the objects are returned. Can be a Workflow
instance or a string with the workflow name.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return []
return workflow.get_objects()
def remove_workflow(ctype_or_obj):
"""Removes the workflow from the passed content type or object. After this
function has been called the content type or object has no workflow
anymore.
If ctype_or_obj is an object the workflow is removed from the object not
from the belonging content type.
If ctype_or_obj is an content type the workflow is removed from the
content type not from instances of the content type (if they have an own
workflow)
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any LFC Django model
instance.
"""
if isinstance(ctype_or_obj, ContentType):
remove_workflow_from_model(ctype_or_obj)
else:
remove_workflow_from_object(ctype_or_obj)
def remove_workflow_from_model(ctype):
"""Removes the workflow from passed content type. After this function has
been called the content type has no workflow anymore (the instances might
have own ones).
ctype
The content type from which the passed workflow should be removed.
Must be a ContentType instance.
"""
# First delete all states, inheritance blocks and permissions from ctype's
# instances which have passed workflow.
workflow = get_workflow_for_model(ctype)
for obj in get_objects_for_workflow(workflow):
# Only take care of the given ctype.
obj_ctype = ContentType.objects.get_for_model(obj)
if ctype != obj_ctype:
continue
try:
ctype = ContentType.objects.get_for_model(obj)
sor = StateObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except StateObjectRelation.DoesNotExist:
pass
else:
sor.delete()
# Reset all permissions
permissions.utils.reset(obj)
try:
wmr = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
pass
else:
wmr.delete()
def remove_workflow_from_object(obj):
"""Removes the workflow from the passed object. After this function has
been called the object has no *own* workflow anymore (it might have one
via its content type).
obj
The object from which the passed workflow should be set. Must be a
Django Model instance.
"""
try:
wor = WorkflowObjectRelation.objects.get(content_type=obj)
except WorkflowObjectRelation.DoesNotExist:
pass
else:
wor.delete()
# Reset all permissions
permissions.utils.reset(obj)
# Set initial of object's content types workflow (if there is one)
set_initial_state(obj)
def set_workflow(ctype_or_obj, workflow):
"""Sets the workflow for passed content type or object. See the specific
methods for more information.
**Parameters:**
workflow
The workflow which should be set to the object or model.
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any Django model
instance.
"""
return workflow.set_to(ctype_or_obj)
def set_workflow_for_object(obj, workflow):
"""Sets the passed workflow to the passed object.
If the object has already the given workflow nothing happens. Otherwise
the object gets the passed workflow and the state is set to the workflow's
initial state.
**Parameters:**
workflow
The workflow which should be set to the object. Can be a Workflow
instance or a string with the workflow name.
obj
The object which gets the passed workflow.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_object(obj)
def set_workflow_for_model(ctype, workflow):
"""Sets the passed workflow to the passed content type. If the content
type has already an assigned workflow the workflow is overwritten.
The objects which had the old workflow must updated explicitely.
**Parameters:**
workflow
The workflow which should be set to passend content type. Must be a
Workflow instance.
ctype
The content type to which the passed workflow should be assigned. Can
be any Django model instance
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_model(ctype)
def get_workflow(obj):
"""Returns the workflow for the passed object. It takes it either from
the passed object or - if the object doesn't have a workflow - from the
passed object's ContentType.
**Parameters:**
object
The object for which the workflow should be returend. Can be any
Django model instance.
"""
workflow = get_workflow_for_object(obj)
if workflow is not None:
return workflow
ctype = ContentType.objects.get_for_model(obj)
return get_workflow_for_model(ctype)
def get_workflow_for_object(obj):
"""Returns the workflow for the passed object.
**Parameters:**
obj
The object for which the workflow should be returned. Can be any
Django model instance.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
wor = WorkflowObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except WorkflowObjectRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_workflow_for_model(ctype):
"""Returns the workflow for the passed model.
**Parameters:**
ctype
The content type for which the workflow should be returned. Must be
a Django ContentType instance.
"""
if not isinstance(ctype, ContentType):
ctype = ContentType.objects.get_for_model(ctype)
try:
wor = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_state(obj):
"""Returns the current workflow state for the passed object.
**Parameters:**
obj
The object for which the workflow state should be returned. Can be any
Django model instance.
"""
ctype = ContentType.objects.get_for_model(obj)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
except StateObjectRelation.DoesNotExist:
return None
else:
return sor.state
def set_state(obj, state):
"""
Sets the state for ``obj`` to ``state`` and updates
the permissions for the object.
**Parameters:**
obj
The object for which the workflow state should be set. Can be any
Django model instance.
state
The state which should be set to the passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(state, basestring):
state = State.objects.get(codename=state)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
initial_state = sor.state
before_state_change.send(sender=obj, from_state=initial_state, to_state=state)
sor.state = state
sor.save()
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=initial_state, to_state=state)
except StateObjectRelation.DoesNotExist:
before_state_change.send(sender=obj, from_state=None, to_state=state)
StateObjectRelation.objects.create(content=obj, state=state)
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=None, to_state=state)
update_permissions(obj)
def set_initial_state(obj):
"""
Sets the workflow initial state to ``obj``.
"""
wf = get_workflow(obj)
if wf is not None:
set_state(obj, wf.get_initial_state())
def get_allowed_transitions(obj, user):
"""Returns all allowed transitions for passed object and user. Takes the
current state of the object into account.
**Parameters:**
obj
The object for which the transitions should be returned.
user
The user for which the transitions are allowed.
"""
state = get_state(obj)
if state is None:
return []
return state.get_allowed_transitions(obj, user)
def do_transition(obj, transition, user):
"""Processes the passed transition to the passed object (if allowed).
"""
if not isinstance(transition, Transition):
try:
transition = Transition.objects.get(codename=transition)
except Transition.DoesNotExist:
return False
transitions = get_allowed_transitions(obj, user)
if transition in transitions:
initial_state = get_state(obj)
before_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
if transition.destination is not None and transition.destination != initial_state:
set_state(obj, transition.destination)
after_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
return True
else:
return False
def update_permissions(obj):
"""Updates the permissions of the passed object according to the object's
current workflow state.
"""
workflow = get_workflow(obj)
state = get_state(obj)
# Remove all permissions for the workflow
# for role in Role.objects.all():
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_permission(obj, role, wpr.permission)
perms = [wpr.permission for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow)]
permissions.utils.remove_permission(obj, Role.objects.all(), perms)
# Grant permission for the state
for spr in StatePermissionRelation.objects.filter(state=state):
permissions.utils.grant_permission(obj, spr.role, spr.permission)
# Remove all inheritance blocks from the object
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_inheritance_block(obj, wpr.permission)
# Add inheritance blocks of this state to the object
# for sib in StateInheritanceBlock.objects.filter(state=state):
# permissions.utils.add_inheritance_block(obj, sib.permission)
| 32.325203 | 106 | 0.697015 | # django imports
from django.contrib.contenttypes.models import ContentType
# permissions imports
import permissions.utils
from permissions.models import Role
# workflows imports
from workflows.conf import settings
from workflows.models import (StateInheritanceBlock, State, StateObjectHistory, StateObjectRelation,
StatePermissionRelation, Transition, Workflow, WorkflowModelRelation,
WorkflowObjectRelation, WorkflowPermissionRelation)
from workflows.signals import before_transition, after_transition, before_state_change, after_state_change
def get_objects_for_workflow(workflow):
"""Returns all objects which have passed workflow.
**Parameters:**
workflow
The workflow for which the objects are returned. Can be a Workflow
instance or a string with the workflow name.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return []
return workflow.get_objects()
def remove_workflow(ctype_or_obj):
"""Removes the workflow from the passed content type or object. After this
function has been called the content type or object has no workflow
anymore.
If ctype_or_obj is an object the workflow is removed from the object not
from the belonging content type.
If ctype_or_obj is an content type the workflow is removed from the
content type not from instances of the content type (if they have an own
workflow)
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any LFC Django model
instance.
"""
if isinstance(ctype_or_obj, ContentType):
remove_workflow_from_model(ctype_or_obj)
else:
remove_workflow_from_object(ctype_or_obj)
def remove_workflow_from_model(ctype):
"""Removes the workflow from passed content type. After this function has
been called the content type has no workflow anymore (the instances might
have own ones).
ctype
The content type from which the passed workflow should be removed.
Must be a ContentType instance.
"""
# First delete all states, inheritance blocks and permissions from ctype's
# instances which have passed workflow.
workflow = get_workflow_for_model(ctype)
for obj in get_objects_for_workflow(workflow):
# Only take care of the given ctype.
obj_ctype = ContentType.objects.get_for_model(obj)
if ctype != obj_ctype:
continue
try:
ctype = ContentType.objects.get_for_model(obj)
sor = StateObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except StateObjectRelation.DoesNotExist:
pass
else:
sor.delete()
# Reset all permissions
permissions.utils.reset(obj)
try:
wmr = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
pass
else:
wmr.delete()
def remove_workflow_from_object(obj):
"""Removes the workflow from the passed object. After this function has
been called the object has no *own* workflow anymore (it might have one
via its content type).
obj
The object from which the passed workflow should be set. Must be a
Django Model instance.
"""
try:
wor = WorkflowObjectRelation.objects.get(content_type=obj)
except WorkflowObjectRelation.DoesNotExist:
pass
else:
wor.delete()
# Reset all permissions
permissions.utils.reset(obj)
# Set initial of object's content types workflow (if there is one)
set_initial_state(obj)
def set_workflow(ctype_or_obj, workflow):
"""Sets the workflow for passed content type or object. See the specific
methods for more information.
**Parameters:**
workflow
The workflow which should be set to the object or model.
ctype_or_obj
The content type or the object to which the passed workflow should be
set. Can be either a ContentType instance or any Django model
instance.
"""
return workflow.set_to(ctype_or_obj)
def set_workflow_for_object(obj, workflow):
"""Sets the passed workflow to the passed object.
If the object has already the given workflow nothing happens. Otherwise
the object gets the passed workflow and the state is set to the workflow's
initial state.
**Parameters:**
workflow
The workflow which should be set to the object. Can be a Workflow
instance or a string with the workflow name.
obj
The object which gets the passed workflow.
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_object(obj)
def set_workflow_for_model(ctype, workflow):
"""Sets the passed workflow to the passed content type. If the content
type has already an assigned workflow the workflow is overwritten.
The objects which had the old workflow must updated explicitely.
**Parameters:**
workflow
The workflow which should be set to passend content type. Must be a
Workflow instance.
ctype
The content type to which the passed workflow should be assigned. Can
be any Django model instance
"""
if not isinstance(workflow, Workflow):
try:
workflow = Workflow.objects.get(name=workflow)
except Workflow.DoesNotExist:
return False
workflow.set_to_model(ctype)
def get_workflow(obj):
"""Returns the workflow for the passed object. It takes it either from
the passed object or - if the object doesn't have a workflow - from the
passed object's ContentType.
**Parameters:**
object
The object for which the workflow should be returend. Can be any
Django model instance.
"""
workflow = get_workflow_for_object(obj)
if workflow is not None:
return workflow
ctype = ContentType.objects.get_for_model(obj)
return get_workflow_for_model(ctype)
def get_workflow_for_object(obj):
"""Returns the workflow for the passed object.
**Parameters:**
obj
The object for which the workflow should be returned. Can be any
Django model instance.
"""
try:
ctype = ContentType.objects.get_for_model(obj)
wor = WorkflowObjectRelation.objects.get(content_id=obj.id, content_type=ctype)
except WorkflowObjectRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_workflow_for_model(ctype):
"""Returns the workflow for the passed model.
**Parameters:**
ctype
The content type for which the workflow should be returned. Must be
a Django ContentType instance.
"""
if not isinstance(ctype, ContentType):
ctype = ContentType.objects.get_for_model(ctype)
try:
wor = WorkflowModelRelation.objects.get(content_type=ctype)
except WorkflowModelRelation.DoesNotExist:
return None
else:
return wor.workflow
def get_state(obj):
"""Returns the current workflow state for the passed object.
**Parameters:**
obj
The object for which the workflow state should be returned. Can be any
Django model instance.
"""
ctype = ContentType.objects.get_for_model(obj)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
except StateObjectRelation.DoesNotExist:
return None
else:
return sor.state
def set_state(obj, state):
"""
Sets the state for ``obj`` to ``state`` and updates
the permissions for the object.
**Parameters:**
obj
The object for which the workflow state should be set. Can be any
Django model instance.
state
The state which should be set to the passed object.
"""
ctype = ContentType.objects.get_for_model(obj)
if isinstance(state, basestring):
state = State.objects.get(codename=state)
try:
sor = StateObjectRelation.objects.get(content_type=ctype, content_id=obj.id)
initial_state = sor.state
before_state_change.send(sender=obj, from_state=initial_state, to_state=state)
sor.state = state
sor.save()
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=initial_state, to_state=state)
except StateObjectRelation.DoesNotExist:
before_state_change.send(sender=obj, from_state=None, to_state=state)
StateObjectRelation.objects.create(content=obj, state=state)
if settings.WORKFLOWS_ENABLE_STATE_HISTORY:
StateObjectHistory.objects.create(content_type=ctype, content_id=obj.id, state=state)
after_state_change.send(sender=obj, from_state=None, to_state=state)
update_permissions(obj)
def set_initial_state(obj):
"""
Sets the workflow initial state to ``obj``.
"""
wf = get_workflow(obj)
if wf is not None:
set_state(obj, wf.get_initial_state())
def get_allowed_transitions(obj, user):
"""Returns all allowed transitions for passed object and user. Takes the
current state of the object into account.
**Parameters:**
obj
The object for which the transitions should be returned.
user
The user for which the transitions are allowed.
"""
state = get_state(obj)
if state is None:
return []
return state.get_allowed_transitions(obj, user)
def do_transition(obj, transition, user):
"""Processes the passed transition to the passed object (if allowed).
"""
if not isinstance(transition, Transition):
try:
transition = Transition.objects.get(codename=transition)
except Transition.DoesNotExist:
return False
transitions = get_allowed_transitions(obj, user)
if transition in transitions:
initial_state = get_state(obj)
before_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
if transition.destination is not None and transition.destination != initial_state:
set_state(obj, transition.destination)
after_transition.send(sender=obj, from_state=initial_state, transition=transition, user=user)
return True
else:
return False
def update_permissions(obj):
"""Updates the permissions of the passed object according to the object's
current workflow state.
"""
workflow = get_workflow(obj)
state = get_state(obj)
# Remove all permissions for the workflow
# for role in Role.objects.all():
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_permission(obj, role, wpr.permission)
perms = [wpr.permission for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow)]
permissions.utils.remove_permission(obj, Role.objects.all(), perms)
# Grant permission for the state
for spr in StatePermissionRelation.objects.filter(state=state):
permissions.utils.grant_permission(obj, spr.role, spr.permission)
# Remove all inheritance blocks from the object
# for wpr in WorkflowPermissionRelation.objects.filter(workflow=workflow):
# permissions.utils.remove_inheritance_block(obj, wpr.permission)
# Add inheritance blocks of this state to the object
# for sib in StateInheritanceBlock.objects.filter(state=state):
# permissions.utils.add_inheritance_block(obj, sib.permission)
| 0 | 0 | 0 |
33905cbd909f88fcf7117f15ddb32118a33659e0 | 22,354 | py | Python | tests/test_compare_dictconfig_vs_dict.py | sugatoray/omegaconf | edf9e86493a14b0e909e956d9bae59b9861ef9c5 | [
"BSD-3-Clause"
] | 1,091 | 2018-09-06T17:27:12.000Z | 2022-03-31T13:47:45.000Z | tests/test_compare_dictconfig_vs_dict.py | sugatoray/omegaconf | edf9e86493a14b0e909e956d9bae59b9861ef9c5 | [
"BSD-3-Clause"
] | 624 | 2019-06-11T20:53:19.000Z | 2022-03-30T20:44:25.000Z | tests/test_compare_dictconfig_vs_dict.py | sugatoray/omegaconf | edf9e86493a14b0e909e956d9bae59b9861ef9c5 | [
"BSD-3-Clause"
] | 71 | 2019-06-14T05:32:45.000Z | 2022-03-27T19:52:35.000Z | """
This file compares DictConfig methods with the corresponding
methods of standard python's dict.
The following methods are compared:
__contains__
__delitem__
__eq__
__getitem__
__setitem__
get
pop
keys
values
items
We have separate test classes for the following cases:
TestUntypedDictConfig: for DictConfig without a set key_type
TestPrimitiveTypeDunderMethods: for DictConfig where key_type is primitive
TestEnumTypeDunderMethods: for DictConfig where key_type is Enum
"""
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional
from pytest import fixture, mark, param, raises
from omegaconf import DictConfig, OmegaConf
from omegaconf.errors import ConfigKeyError, ConfigTypeError, KeyValidationError
from tests import Enum1
@fixture(
params=[
"str",
1,
3.1415,
True,
Enum1.FOO,
]
)
def key(request: Any) -> Any:
"""A key to test indexing into DictConfig."""
return request.param
@fixture
def python_dict(data: Dict[Any, Any]) -> Dict[Any, Any]:
"""Just a standard python dictionary, to be used in comparison with DictConfig."""
return deepcopy(data)
@fixture(params=[None, False, True])
@mark.parametrize(
"data",
[
param({"a": 10}, id="str"),
param({1: "a"}, id="int"),
param({123.45: "a"}, id="float"),
param({True: "a"}, id="bool"),
param({Enum1.FOO: "foo"}, id="Enum1"),
],
)
class TestUntypedDictConfig:
"""Compare DictConfig with python dict in the case where key_type is not set."""
@fixture
def cfg(self, python_dict: Any, struct_mode: Optional[bool]) -> DictConfig:
"""Create a DictConfig instance from the given data"""
cfg: DictConfig = DictConfig(content=python_dict)
OmegaConf.set_struct(cfg, struct_mode)
return cfg
def test__setitem__(
self, python_dict: Any, cfg: DictConfig, key: Any, struct_mode: Optional[bool]
) -> None:
"""Ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg:
with raises(ConfigKeyError):
cfg[key] = "sentinel"
else:
python_dict[key] = "sentinel"
cfg[key] = "sentinel"
assert python_dict == cfg
def test__getitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
with raises(ConfigKeyError):
cfg[key]
else:
assert result == cfg[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
with raises(ConfigKeyError):
del cfg[key]
else:
del cfg[key]
assert key not in cfg
@mark.parametrize("struct_mode", [True])
def test__delitem__struct_mode(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __delitem__ fails in struct_mode"""
with raises(ConfigTypeError):
del cfg[key]
def test__contains__(self, python_dict: Any, cfg: Any, key: Any) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg)
def test_get(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key) == cfg.get(key)
def test_get_with_default(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key, "DEFAULT") == cfg.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg.pop(key)
else:
assert result == cfg.pop(key)
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
assert python_dict.pop(key, "DEFAULT") == cfg.pop(key, "DEFAULT")
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key, "DEFAULT")
@fixture
def cfg_typed(
python_dict: Any, cfg_key_type: Any, struct_mode: Optional[bool]
) -> DictConfig:
"""Create a DictConfig instance that has strongly-typed keys"""
cfg_typed: DictConfig = DictConfig(content=python_dict, key_type=cfg_key_type)
OmegaConf.set_struct(cfg_typed, struct_mode)
return cfg_typed
@mark.parametrize(
"cfg_key_type,data",
[(str, {"a": 10}), (int, {1: "a"}), (float, {123.45: "a"}), (bool, {True: "a"})],
)
class TestPrimitiveTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is a primitive type."""
def test__setitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg_typed:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key] = "sentinel"
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure ensure that struct-mode __delitem__ raises ConfigTypeError or KeyValidationError"""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__primitive_typed(
self, python_dict: Any, cfg_typed: Any, key: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg_typed)
def test_get_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key, "DEFAULT") == cfg_typed.get(key, "DEFAULT")
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.pop(key, "DEFAULT") == cfg_typed.pop(key, "DEFAULT")
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("cfg_key_type,data", [(Enum1, {Enum1.FOO: "foo"})])
class TestEnumTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is an Enum type."""
@fixture
def key_coerced(self, key: Any, cfg_key_type: Any) -> Any:
"""
This handles key coersion in the special case where DictConfig key_type
is a subclass of Enum: keys of type `str` or `int` are coerced to `key_type`.
See https://github.com/omry/omegaconf/pull/484#issuecomment-765772019
"""
assert issubclass(cfg_key_type, Enum)
if type(key) == str and key in [e.name for e in cfg_key_type]:
return cfg_key_type[key]
elif type(key) == int and key in [e.value for e in cfg_key_type]:
return cfg_key_type(key)
else:
return key
def test__setitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key_coerced not in cfg_typed:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key_coerced] = "sentinel"
if isinstance(key_coerced, cfg_key_type):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key_coerced]
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key_coerced]
assert key_coerced not in python_dict
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __delitem__ errors in struct mode"""
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__enum_typed(
self, python_dict: Any, cfg_typed: Any, key: Any, key_coerced: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key_coerced in python_dict) == (key in cfg_typed)
def test_get_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced, "DEFAULT") == cfg_typed.get(
key, "DEFAULT"
)
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
try:
result = python_dict.pop(key_coerced)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.pop(key_coerced, "DEFAULT") == cfg_typed.pop(
key, "DEFAULT"
)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) errors in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
| 34.496914 | 101 | 0.594659 | """
This file compares DictConfig methods with the corresponding
methods of standard python's dict.
The following methods are compared:
__contains__
__delitem__
__eq__
__getitem__
__setitem__
get
pop
keys
values
items
We have separate test classes for the following cases:
TestUntypedDictConfig: for DictConfig without a set key_type
TestPrimitiveTypeDunderMethods: for DictConfig where key_type is primitive
TestEnumTypeDunderMethods: for DictConfig where key_type is Enum
"""
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional
from pytest import fixture, mark, param, raises
from omegaconf import DictConfig, OmegaConf
from omegaconf.errors import ConfigKeyError, ConfigTypeError, KeyValidationError
from tests import Enum1
@fixture(
params=[
"str",
1,
3.1415,
True,
Enum1.FOO,
]
)
def key(request: Any) -> Any:
"""A key to test indexing into DictConfig."""
return request.param
@fixture
def python_dict(data: Dict[Any, Any]) -> Dict[Any, Any]:
"""Just a standard python dictionary, to be used in comparison with DictConfig."""
return deepcopy(data)
@fixture(params=[None, False, True])
def struct_mode(request: Any) -> Optional[bool]:
struct_mode: Optional[bool] = request.param
return struct_mode
@mark.parametrize(
"data",
[
param({"a": 10}, id="str"),
param({1: "a"}, id="int"),
param({123.45: "a"}, id="float"),
param({True: "a"}, id="bool"),
param({Enum1.FOO: "foo"}, id="Enum1"),
],
)
class TestUntypedDictConfig:
"""Compare DictConfig with python dict in the case where key_type is not set."""
@fixture
def cfg(self, python_dict: Any, struct_mode: Optional[bool]) -> DictConfig:
"""Create a DictConfig instance from the given data"""
cfg: DictConfig = DictConfig(content=python_dict)
OmegaConf.set_struct(cfg, struct_mode)
return cfg
def test__setitem__(
self, python_dict: Any, cfg: DictConfig, key: Any, struct_mode: Optional[bool]
) -> None:
"""Ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg:
with raises(ConfigKeyError):
cfg[key] = "sentinel"
else:
python_dict[key] = "sentinel"
cfg[key] = "sentinel"
assert python_dict == cfg
def test__getitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
with raises(ConfigKeyError):
cfg[key]
else:
assert result == cfg[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
with raises(ConfigKeyError):
del cfg[key]
else:
del cfg[key]
assert key not in cfg
@mark.parametrize("struct_mode", [True])
def test__delitem__struct_mode(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __delitem__ fails in struct_mode"""
with raises(ConfigTypeError):
del cfg[key]
def test__contains__(self, python_dict: Any, cfg: Any, key: Any) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg)
def test__eq__(self, python_dict: Any, cfg: Any, key: Any) -> None:
assert python_dict == cfg
def test_get(self, python_dict: Any, cfg: DictConfig, key: Any) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key) == cfg.get(key)
def test_get_with_default(
self, python_dict: Any, cfg: DictConfig, key: Any
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
assert python_dict.get(key, "DEFAULT") == cfg.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg.pop(key)
else:
assert result == cfg.pop(key)
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
assert python_dict.pop(key, "DEFAULT") == cfg.pop(key, "DEFAULT")
assert python_dict.keys() == cfg.keys()
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_struct_mode(
self,
python_dict: Any,
cfg: DictConfig,
key: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode."""
with raises(ConfigTypeError):
cfg.pop(key, "DEFAULT")
def test_keys(self, python_dict: Any, cfg: Any) -> None:
assert python_dict.keys() == cfg.keys()
def test_values(self, python_dict: Any, cfg: Any) -> None:
assert list(python_dict.values()) == list(cfg.values())
def test_items(self, python_dict: Any, cfg: Any) -> None:
assert list(python_dict.items()) == list(cfg.items())
@fixture
def cfg_typed(
python_dict: Any, cfg_key_type: Any, struct_mode: Optional[bool]
) -> DictConfig:
"""Create a DictConfig instance that has strongly-typed keys"""
cfg_typed: DictConfig = DictConfig(content=python_dict, key_type=cfg_key_type)
OmegaConf.set_struct(cfg_typed, struct_mode)
return cfg_typed
@mark.parametrize(
"cfg_key_type,data",
[(str, {"a": 10}), (int, {1: "a"}), (float, {123.45: "a"}), (bool, {True: "a"})],
)
class TestPrimitiveTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is a primitive type."""
def test__setitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key not in cfg_typed:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key] = "sentinel"
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key]
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key]
assert key not in python_dict
except KeyError:
if isinstance(key, cfg_key_type) or (
cfg_key_type == bool and key in (0, 1)
):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure ensure that struct-mode __delitem__ raises ConfigTypeError or KeyValidationError"""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__primitive_typed(
self, python_dict: Any, cfg_typed: Any, key: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key in python_dict) == (key in cfg_typed)
def test__eq__primitive_typed(
self, python_dict: Any, cfg_typed: Any, key: Any
) -> None:
assert python_dict == cfg_typed
def test_get_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.get(key, "DEFAULT") == cfg_typed.get(key, "DEFAULT")
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
try:
result = python_dict.pop(key)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode."""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_primitive_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key, cfg_key_type) or (cfg_key_type == bool and key in (0, 1)):
assert python_dict.pop(key, "DEFAULT") == cfg_typed.pop(key, "DEFAULT")
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_primitive_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
def test_keys_primitive_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert python_dict.keys() == cfg_typed.keys()
def test_values_primitive_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert list(python_dict.values()) == list(cfg_typed.values())
def test_items_primitive_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert list(python_dict.items()) == list(cfg_typed.items())
@mark.parametrize("cfg_key_type,data", [(Enum1, {Enum1.FOO: "foo"})])
class TestEnumTypeDunderMethods:
"""Compare DictConfig with python dict in the case where key_type is an Enum type."""
@fixture
def key_coerced(self, key: Any, cfg_key_type: Any) -> Any:
"""
This handles key coersion in the special case where DictConfig key_type
is a subclass of Enum: keys of type `str` or `int` are coerced to `key_type`.
See https://github.com/omry/omegaconf/pull/484#issuecomment-765772019
"""
assert issubclass(cfg_key_type, Enum)
if type(key) == str and key in [e.name for e in cfg_key_type]:
return cfg_key_type[key]
elif type(key) == int and key in [e.value for e in cfg_key_type]:
return cfg_key_type(key)
else:
return key
def test__setitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
struct_mode: Optional[bool],
) -> None:
"""When DictConfig keys are strongly typed,
ensure that __setitem__ has same effect on python dict and on DictConfig."""
if struct_mode and key_coerced not in cfg_typed:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key] = "sentinel"
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
else:
python_dict[key_coerced] = "sentinel"
if isinstance(key_coerced, cfg_key_type):
cfg_typed[key] = "sentinel"
assert python_dict == cfg_typed
else:
with raises(KeyValidationError):
cfg_typed[key] = "sentinel"
def test__getitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __getitem__ has same result with python dict as with DictConfig."""
try:
result = python_dict[key_coerced]
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
cfg_typed[key]
else:
with raises(KeyValidationError):
cfg_typed[key]
else:
assert result == cfg_typed[key]
@mark.parametrize("struct_mode", [False, None])
def test__delitem__enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""When Dictconfig keys are strongly typed,
ensure that __delitem__ has same result with python dict as with DictConfig."""
try:
del python_dict[key_coerced]
assert key_coerced not in python_dict
except KeyError:
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigKeyError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
else:
del cfg_typed[key]
assert key not in cfg_typed
@mark.parametrize("struct_mode", [True])
def test__delitem__enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __delitem__ errors in struct mode"""
if isinstance(key_coerced, cfg_key_type):
with raises(ConfigTypeError):
del cfg_typed[key]
else:
with raises(KeyValidationError):
del cfg_typed[key]
def test__contains__enum_typed(
self, python_dict: Any, cfg_typed: Any, key: Any, key_coerced: Any
) -> None:
"""Ensure that __contains__ has same result with python dict as with DictConfig."""
assert (key_coerced in python_dict) == (key in cfg_typed)
def test__eq__enum_typed(self, python_dict: Any, cfg_typed: Any, key: Any) -> None:
assert python_dict == cfg_typed
def test_get_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced) == cfg_typed.get(key)
else:
with raises(KeyValidationError):
cfg_typed.get(key)
def test_get_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that __getitem__ has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.get(key_coerced, "DEFAULT") == cfg_typed.get(
key, "DEFAULT"
)
else:
with raises(KeyValidationError):
cfg_typed.get(key, "DEFAULT")
@mark.parametrize("struct_mode", [False, None])
def test_pop_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
try:
result = python_dict.pop(key_coerced)
except KeyError:
with raises(ConfigKeyError):
cfg_typed.pop(key)
else:
assert result == cfg_typed.pop(key)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [True])
def test_pop_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop fails in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
@mark.parametrize("struct_mode", [False, None])
def test_pop_with_default_enum_typed(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) has same result with python dict as with DictConfig."""
if isinstance(key_coerced, cfg_key_type):
assert python_dict.pop(key_coerced, "DEFAULT") == cfg_typed.pop(
key, "DEFAULT"
)
assert python_dict.keys() == cfg_typed.keys()
else:
with raises(KeyValidationError):
cfg_typed.pop(key, "DEFAULT")
@mark.parametrize("struct_mode", [True])
def test_pop_with_default_enum_typed_struct_mode(
self,
python_dict: Any,
cfg_typed: DictConfig,
key: Any,
key_coerced: Any,
cfg_key_type: Any,
) -> None:
"""Ensure that pop(..., DEFAULT) errors in struct mode"""
with raises(ConfigTypeError):
cfg_typed.pop(key)
def test_keys_enum_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert python_dict.keys() == cfg_typed.keys()
def test_values_enum_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert list(python_dict.values()) == list(cfg_typed.values())
def test_items_enum_typed(self, python_dict: Any, cfg_typed: Any) -> None:
assert list(python_dict.items()) == list(cfg_typed.items())
| 1,400 | 0 | 346 |
1145b7a8266e2c22d186806589c80fdba3f337f5 | 1,366 | py | Python | GetGradientIndices/GetGradientIndices.py | s2atoru/RayStationScripts | ffdc9217b20b13cc99d3dd52f06b2cc6c5d3fe52 | [
"MIT"
] | 3 | 2020-04-11T14:08:04.000Z | 2021-11-18T05:47:52.000Z | GetGradientIndices/GetGradientIndices.py | s2atoru/RayStationScripts | ffdc9217b20b13cc99d3dd52f06b2cc6c5d3fe52 | [
"MIT"
] | null | null | null | GetGradientIndices/GetGradientIndices.py | s2atoru/RayStationScripts | ffdc9217b20b13cc99d3dd52f06b2cc6c5d3fe52 | [
"MIT"
] | 3 | 2019-12-20T19:23:17.000Z | 2021-01-11T15:20:33.000Z | from connect import *
import clr
import wpf
clr.AddReference("PresentationFramework")
clr.AddReference("PresentationCore")
from System.Collections.Generic import List, Dictionary
from System.Windows import MessageBox
import sys, os
import json
RayStationScriptsPath = os.environ["USERPROFILE"] + r"\DeskTop\RayStationScripts" + "\\"
dllsPath = RayStationScriptsPath + "Dlls"
print "Dlls path: " + dllsPath
sys.path.append(dllsPath)
scriptsPath = RayStationScriptsPath + "Scripts"
print "Scripts path: " + scriptsPath
sys.path.append(scriptsPath)
clr.AddReference("BrainDoseIndices")
from BrainDoseIndices.Views import MainWindow
from BrainDoseIndices.Models import StructureDetail
from Helpers import GetStructureSet, GetRoiDetails
from Helpers import MakeMarginAddedRoi, MakeRingRoi, MakeRoiSubtractedRoi
try:
plan = get_current("Plan")
except:
MessageBox.Show("Plan is not selected. Select Plan")
sys.exit()
structureSet = plan.GetStructureSet()
roiDetails = GetRoiDetails(structureSet)
structureDetails = List[StructureDetail]()
for key, value in roiDetails.items():
if value["HasContours"]:
structureDetail = StructureDetail();
structureDetail.Name = key
structureDetail.Volume = value["Volume"]
structureDetails.Add(structureDetail)
mainWindow = MainWindow(structureDetails)
mainWindow.ShowDialog();
| 26.784314 | 88 | 0.777452 | from connect import *
import clr
import wpf
clr.AddReference("PresentationFramework")
clr.AddReference("PresentationCore")
from System.Collections.Generic import List, Dictionary
from System.Windows import MessageBox
import sys, os
import json
RayStationScriptsPath = os.environ["USERPROFILE"] + r"\DeskTop\RayStationScripts" + "\\"
dllsPath = RayStationScriptsPath + "Dlls"
print "Dlls path: " + dllsPath
sys.path.append(dllsPath)
scriptsPath = RayStationScriptsPath + "Scripts"
print "Scripts path: " + scriptsPath
sys.path.append(scriptsPath)
clr.AddReference("BrainDoseIndices")
from BrainDoseIndices.Views import MainWindow
from BrainDoseIndices.Models import StructureDetail
from Helpers import GetStructureSet, GetRoiDetails
from Helpers import MakeMarginAddedRoi, MakeRingRoi, MakeRoiSubtractedRoi
try:
plan = get_current("Plan")
except:
MessageBox.Show("Plan is not selected. Select Plan")
sys.exit()
structureSet = plan.GetStructureSet()
roiDetails = GetRoiDetails(structureSet)
structureDetails = List[StructureDetail]()
for key, value in roiDetails.items():
if value["HasContours"]:
structureDetail = StructureDetail();
structureDetail.Name = key
structureDetail.Volume = value["Volume"]
structureDetails.Add(structureDetail)
mainWindow = MainWindow(structureDetails)
mainWindow.ShowDialog();
| 0 | 0 | 0 |
46b8bdc8d60743f38624eefb4f370d8626ed3106 | 6,977 | py | Python | pywiface/interface.py | k3an3/pywiface | d8caaa6f974df0e340309d49e3c77ba1a9dd96e8 | [
"MIT"
] | null | null | null | pywiface/interface.py | k3an3/pywiface | d8caaa6f974df0e340309d49e3c77ba1a9dd96e8 | [
"MIT"
] | null | null | null | pywiface/interface.py | k3an3/pywiface | d8caaa6f974df0e340309d49e3c77ba1a9dd96e8 | [
"MIT"
] | null | null | null | import struct
import subprocess
import threading
from time import sleep
from scapy.layers.dot11 import Dot11, Dot11Elt, sendp, Dot11Deauth, RadioTap
from termcolor import cprint
from pywiface.models import Station, AP
from pywiface.threads import ScannerThread
| 35.060302 | 111 | 0.53447 | import struct
import subprocess
import threading
from time import sleep
from scapy.layers.dot11 import Dot11, Dot11Elt, sendp, Dot11Deauth, RadioTap
from termcolor import cprint
from pywiface.models import Station, AP
from pywiface.threads import ScannerThread
class WirelessInterface:
def __init__(self, name: str, essid: str = None, monitor_mode: bool = False, channel=None):
self.name = name
self.monitor_mode = monitor_mode
if monitor_mode:
self.set_monitor_mode()
self.stations = {}
self.aps = {}
self.lock = threading.Lock()
self.channel_lock = threading.Lock()
self.ap_sema = threading.Semaphore(0)
self.sta_sema = threading.Semaphore(0)
self.essid = essid
self.bssid = None
self.scan_thread = None
self._channel = channel
self.hop = True
if channel:
self.set_channel(channel)
self.hop = False
def set_up(self):
subprocess.run(['/bin/ip', 'link', 'set', self.name, 'up'])
def set_down(self):
subprocess.run(['/bin/ip', 'link', 'set', self.name, 'down'])
def set_monitor_mode(self):
self.set_down()
subprocess.run(['/sbin/iw', 'dev', self.name, 'set', 'monitor', 'none'])
self.set_up()
self.monitor_mode = True
def set_managed_mode(self):
self.set_down()
subprocess.run(['/sbin/iw', 'dev', self.name, 'set', 'type', 'managed'])
self.set_up()
self.monitor_mode = False
def get_frequency(self):
return struct.pack("<h", 2407 + (self._channel * 5))
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self.set_channel(value)
def set_channel(self, channel):
# in USA reg domain
if 1 <= channel <= 11:
subprocess.run(['/sbin/iw', 'dev', self.name, 'set', 'channel', str(channel)])
self._channel = channel
def set_mac(self, mac):
self.set_down()
subprocess.run(['/bin/ip', 'link', 'set', 'dev', self.name, 'address', mac], stdout=subprocess.DEVNULL)
self.set_up()
def reset_mac(self):
self.set_down()
subprocess.run(['/usr/bin/macchanger', '-p', self.name], stdout=subprocess.DEVNULL)
self.set_up()
def spoof_mac(self):
self.set_down()
subprocess.run(['/usr/bin/macchanger', '-A', self.name], stdout=subprocess.DEVNULL)
self.set_up()
class MonitorInterface(WirelessInterface):
def __init__(self, name: str, channel=None):
super().__init__(name, monitor_mode=True, channel=channel)
def deauth(self, target_mac: str, source_mac: str, count=1, burst_count=200, channel=None, reason=7):
self.channel_lock.acquire()
if channel:
self.set_channel(channel)
pkt = RadioTap() / Dot11(type=0, subtype=12, addr1=target_mac, addr2=source_mac,
addr3=self.bssid) / Dot11Deauth(reason=reason)
for i in range(count):
cprint("DEAUTH!!!", 'red')
for j in range(burst_count):
self.inject(pkt)
if count > 1:
sleep(.1)
self.channel_lock.release()
def get_new_station(self) -> Station:
self.sta_sema.acquire()
target = next((self.stations[client] for client in self.stations if self.stations[client].new), None)
target.new = False
return target
def get_new_ap(self) -> AP:
self.ap_sema.acquire()
target = next((self.aps[ap] for ap in self.aps if self.aps[ap].new), None)
target.new = False
return target
def inject(self, pkt, inter=0):
sendp(pkt, iface=self.name, inter=inter, verbose=False)
def scan(self):
self.scan_thread = ScannerThread(self)
self.scan_thread.start()
def stop_scan(self):
self.scan_thread.stop()
self.scan_thread.join()
def ap_passes_test(self, pkt):
return (pkt.haslayer(Dot11) and pkt.type == 0 and pkt.subtype == 8
and not self.aps.get(pkt.addr3))
def sta_passes_test(self, pkt):
# Just for readability/sanity
client_mgmt_subtypes = (0, 2, 4)
check = False
if pkt.haslayer(Dot11):
if (pkt.type == 0 and (not self.bssid or pkt.addr3 == self.bssid)
and pkt.subtype in client_mgmt_subtypes):
check = True
elif pkt.type == 2 and (pkt.addr1 == self.bssid or not self.bssid):
check = True
if check:
return not (self.stations.get(pkt.addr2) or self.stations.get(pkt.addr1))
@staticmethod
def sta_mac(pkt):
return pkt.addr1 if not pkt.addr1 == pkt.addr3 else pkt.addr2
def scan_callback(self, pkt):
try:
# Management/Beacon
if self.ap_passes_test(pkt):
self.lock.acquire()
self.ap_sema.release()
# http://stackoverflow.com/a/21664038
essid, channel, w = None, None, None
bssid = pkt.addr3
crypto = ""
cap = pkt.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}"
"{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split('+')
p = pkt[Dot11Elt]
while isinstance(p, Dot11Elt):
if p.ID == 0:
try:
essid = p.info.decode()
except UnicodeDecodeError as e:
print(p.info)
essid = p.info
elif p.ID == 3:
try:
channel = ord(p.info)
except TypeError as e:
print(p.info)
channel = p.info
elif p.ID == 48:
crypto = "WPA2"
w = p.info[18:19]
elif p.ID == 221 and p.info.startswith(b'\x00P\xf2\x01\x01\x00'):
crypto = "WPA"
p = p.payload
if not crypto:
if 'privacy' in cap:
crypto = "WEP"
else:
crypto = "OPN"
self.aps[bssid] = AP(bssid, essid, crypto, channel, w)
self.lock.release()
elif self.sta_passes_test(pkt):
self.lock.acquire()
try:
channel = pkt[Dot11Elt:3].info
except IndexError:
channel = self.channel
mac = self.sta_mac(pkt)
self.stations[mac] = Station(mac, channel, pkt.addr3)
self.sta_sema.release()
self.lock.release()
except Exception as e:
print(pkt)
raise e
| 5,971 | 695 | 46 |
c51ca5a63460e6b71b17600a9fc18cf4f02eecdc | 1,016 | py | Python | src/python/pants/backend/jvm/jvm_debug_config.py | arloherrine/pants | 5f98f7734590eb21a2992a4c28415f838a2e6927 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/jvm_debug_config.py | arloherrine/pants | 5f98f7734590eb21a2992a4c28415f838a2e6927 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/jvm_debug_config.py | arloherrine/pants | 5f98f7734590eb21a2992a4c28415f838a2e6927 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
# TOOD(Eric Ayers): There is no task or goal named 'jvm' as used in the config section where these parameters are located.
# We might need to rename these whem merging together the config and the new options system.
class JvmDebugConfig(object):
"""A utility class to consolodate fetching JVM flags needed for debugging from the configuration."""
@staticmethod
@staticmethod
| 40.64 | 122 | 0.73622 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
# TOOD(Eric Ayers): There is no task or goal named 'jvm' as used in the config section where these parameters are located.
# We might need to rename these whem merging together the config and the new options system.
class JvmDebugConfig(object):
"""A utility class to consolodate fetching JVM flags needed for debugging from the configuration."""
@staticmethod
def debug_args(config):
return config.getlist('jvm', 'debug_args', default=[
'-Xdebug',
'-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address={debug_port}'
.format(debug_port=JvmDebugConfig.debug_port(config)),
])
@staticmethod
def debug_port(config):
return config.getint('jvm', 'debug_port', default=5005)
| 284 | 0 | 48 |
93804d0b3ddc0d7aa008ce064cfd1d0214729038 | 2,593 | py | Python | mpylib/mpylib/qstrs.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | mpylib/mpylib/qstrs.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 38 | 2019-08-28T01:46:31.000Z | 2022-03-17T05:46:51.000Z | mpylib/mpylib/qstrs.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | # static qstrs, should be sorted
# extracted from micropython/py/makeqstrdata.py
static_qstr_list = [
"",
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"\n",
" ",
"*",
"/",
"<module>",
"_",
"__call__",
"__class__",
"__delitem__",
"__enter__",
"__exit__",
"__getattr__",
"__getitem__",
"__hash__",
"__init__",
"__int__",
"__iter__",
"__len__",
"__main__",
"__module__",
"__name__",
"__new__",
"__next__",
"__qualname__",
"__repr__",
"__setitem__",
"__str__",
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"EOFError",
"Ellipsis",
"Exception",
"GeneratorExit",
"ImportError",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NoneType",
"NotImplementedError",
"OSError",
"OverflowError",
"RuntimeError",
"StopIteration",
"SyntaxError",
"SystemExit",
"TypeError",
"ValueError",
"ZeroDivisionError",
"abs",
"all",
"any",
"append",
"args",
"bool",
"builtins",
"bytearray",
"bytecode",
"bytes",
"callable",
"chr",
"classmethod",
"clear",
"close",
"const",
"copy",
"count",
"dict",
"dir",
"divmod",
"end",
"endswith",
"eval",
"exec",
"extend",
"find",
"format",
"from_bytes",
"get",
"getattr",
"globals",
"hasattr",
"hash",
"id",
"index",
"insert",
"int",
"isalpha",
"isdigit",
"isinstance",
"islower",
"isspace",
"issubclass",
"isupper",
"items",
"iter",
"join",
"key",
"keys",
"len",
"list",
"little",
"locals",
"lower",
"lstrip",
"main",
"map",
"micropython",
"next",
"object",
"open",
"ord",
"pop",
"popitem",
"pow",
"print",
"range",
"read",
"readinto",
"readline",
"remove",
"replace",
"repr",
"reverse",
"rfind",
"rindex",
"round",
"rsplit",
"rstrip",
"self",
"send",
"sep",
"set",
"setattr",
"setdefault",
"sort",
"sorted",
"split",
"start",
"startswith",
"staticmethod",
"step",
"stop",
"str",
"strip",
"sum",
"super",
"throw",
"to_bytes",
"tuple",
"type",
"update",
"upper",
"utf-8",
"value",
"values",
"write",
"zip",
]
| 15.163743 | 71 | 0.473583 | # static qstrs, should be sorted
# extracted from micropython/py/makeqstrdata.py
static_qstr_list = [
"",
"__dir__", # Put __dir__ after empty qstr for builtin dir() to work
"\n",
" ",
"*",
"/",
"<module>",
"_",
"__call__",
"__class__",
"__delitem__",
"__enter__",
"__exit__",
"__getattr__",
"__getitem__",
"__hash__",
"__init__",
"__int__",
"__iter__",
"__len__",
"__main__",
"__module__",
"__name__",
"__new__",
"__next__",
"__qualname__",
"__repr__",
"__setitem__",
"__str__",
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"EOFError",
"Ellipsis",
"Exception",
"GeneratorExit",
"ImportError",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NoneType",
"NotImplementedError",
"OSError",
"OverflowError",
"RuntimeError",
"StopIteration",
"SyntaxError",
"SystemExit",
"TypeError",
"ValueError",
"ZeroDivisionError",
"abs",
"all",
"any",
"append",
"args",
"bool",
"builtins",
"bytearray",
"bytecode",
"bytes",
"callable",
"chr",
"classmethod",
"clear",
"close",
"const",
"copy",
"count",
"dict",
"dir",
"divmod",
"end",
"endswith",
"eval",
"exec",
"extend",
"find",
"format",
"from_bytes",
"get",
"getattr",
"globals",
"hasattr",
"hash",
"id",
"index",
"insert",
"int",
"isalpha",
"isdigit",
"isinstance",
"islower",
"isspace",
"issubclass",
"isupper",
"items",
"iter",
"join",
"key",
"keys",
"len",
"list",
"little",
"locals",
"lower",
"lstrip",
"main",
"map",
"micropython",
"next",
"object",
"open",
"ord",
"pop",
"popitem",
"pow",
"print",
"range",
"read",
"readinto",
"readline",
"remove",
"replace",
"repr",
"reverse",
"rfind",
"rindex",
"round",
"rsplit",
"rstrip",
"self",
"send",
"sep",
"set",
"setattr",
"setdefault",
"sort",
"sorted",
"split",
"start",
"startswith",
"staticmethod",
"step",
"stop",
"str",
"strip",
"sum",
"super",
"throw",
"to_bytes",
"tuple",
"type",
"update",
"upper",
"utf-8",
"value",
"values",
"write",
"zip",
]
| 0 | 0 | 0 |
6ab48e69b127581975afe0c0eae51c555a418412 | 8,658 | py | Python | ryu/tests/integrated/vrrp_common.py | zeinsteinz/ryu | c7614805bb8fff4f2093cd01ece6c6e518ec5f3a | [
"Apache-2.0"
] | 11 | 2015-06-19T03:46:20.000Z | 2020-08-21T02:22:30.000Z | ryu/tests/integrated/vrrp_common.py | zeinsteinz/ryu | c7614805bb8fff4f2093cd01ece6c6e518ec5f3a | [
"Apache-2.0"
] | null | null | null | ryu/tests/integrated/vrrp_common.py | zeinsteinz/ryu | c7614805bb8fff4f2093cd01ece6c6e518ec5f3a | [
"Apache-2.0"
] | 9 | 2015-05-22T09:00:08.000Z | 2021-01-24T02:46:36.000Z | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import random
from ryu.base import app_manager
from ryu.lib import hub
from ryu.lib import mac as lib_mac
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import api as vrrp_api
from ryu.services.protocols.vrrp import event as vrrp_event
_VRID = 7
_PRIMARY_IP_ADDRESS0 = '10.0.0.2'
_PRIMARY_IP_ADDRESS1 = '10.0.0.3'
| 39.898618 | 78 | 0.544468 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import random
from ryu.base import app_manager
from ryu.lib import hub
from ryu.lib import mac as lib_mac
from ryu.lib.packet import vrrp
from ryu.services.protocols.vrrp import api as vrrp_api
from ryu.services.protocols.vrrp import event as vrrp_event
_VRID = 7
_PRIMARY_IP_ADDRESS0 = '10.0.0.2'
_PRIMARY_IP_ADDRESS1 = '10.0.0.3'
class VRRPCommon(app_manager.RyuApp):
_IFNAME0 = None
_IFNAME1 = None
def __init__(self, *args, **kwargs):
super(VRRPCommon, self).__init__(*args, **kwargs)
def _main(self):
self._main_version(vrrp.VRRP_VERSION_V3)
self._main_version(vrrp.VRRP_VERSION_V2)
print "done!"
def _main_version(self, vrrp_version):
self._main_version_priority(vrrp_version,
vrrp.VRRP_PRIORITY_ADDRESS_OWNER)
self._main_version_priority(vrrp_version,
vrrp.VRRP_PRIORITY_BACKUP_MAX)
self._main_version_priority(vrrp_version,
vrrp.VRRP_PRIORITY_BACKUP_DEFAULT)
self._main_version_priority(vrrp_version,
vrrp.VRRP_PRIORITY_BACKUP_MIN)
def _main_version_priority(self, vrrp_version, priority):
self._main_version_priority_sleep(vrrp_version, priority, False)
self._main_version_priority_sleep(vrrp_version, priority, True)
def _check(self, vrrp_api, instances):
while True:
while True:
rep = vrrp_api.vrrp_list(self)
if len(rep.instance_list) >= len(instances) * 2:
if any(i.state == vrrp_event.VRRP_STATE_INITIALIZE
for i in rep.instance_list):
continue
break
print len(rep.instance_list), '/', len(instances) * 2
time.sleep(1)
# for i in rep.instance_list:
# print i.instance_name, i.monitor_name, i.config, \
# i.interface, i.state
assert len(rep.instance_list) == len(instances) * 2
num_of_master = 0
d = dict(((i.instance_name, i) for i in rep.instance_list))
bad = 0
for i in rep.instance_list:
assert i.state in (vrrp_event.VRRP_STATE_MASTER,
vrrp_event.VRRP_STATE_BACKUP)
if i.state == vrrp_event.VRRP_STATE_MASTER:
num_of_master += 1
vr = instances[i.config.vrid]
if (vr[0].config.priority > vr[1].config.priority and
i.instance_name == vr[1].instance_name) or \
(vr[0].config.priority < vr[1].config.priority and
i.instance_name == vr[0].instance_name):
if i.state == vrrp_event.VRRP_STATE_MASTER:
print "bad master:"
print d[vr[0].instance_name].state, \
d[vr[0].instance_name].config.priority
print d[vr[1].instance_name].state, \
d[vr[1].instance_name].config.priority
bad += 1
# assert i.state != vrrp_event.VRRP_STATE_MASTER
if bad > 0:
# this could be a transient state
print bad, "bad masters"
time.sleep(1)
continue
if num_of_master >= len(instances):
assert num_of_master == len(instances)
break
print num_of_master, '/', len(instances)
time.sleep(1)
continue
def _main_version_priority_sleep(self, vrrp_version, priority, do_sleep):
app_mgr = app_manager.AppManager.get_instance()
self.logger.debug('%s', app_mgr.applications)
vrrp_mgr = app_mgr.applications['VRRPManager']
step = 5
instances = {}
for vrid in xrange(1, 256, step):
if vrid == _VRID:
continue
print "vrid", vrid
l = {}
prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN,
min(vrrp.VRRP_PRIORITY_BACKUP_MAX, vrid))
rep0 = self._configure_vrrp_router(vrrp_version,
prio,
_PRIMARY_IP_ADDRESS0,
self._IFNAME0,
vrid)
assert not rep0.instance_name is None
l[0] = rep0
prio = max(vrrp.VRRP_PRIORITY_BACKUP_MIN,
min(vrrp.VRRP_PRIORITY_BACKUP_MAX, 256 - vrid))
rep1 = self._configure_vrrp_router(vrrp_version,
prio,
_PRIMARY_IP_ADDRESS1,
self._IFNAME1,
vrid)
assert not rep1.instance_name is None
l[1] = rep1
instances[vrid] = l
print "vrid", _VRID
l = {}
rep0 = self._configure_vrrp_router(vrrp_version, priority,
_PRIMARY_IP_ADDRESS0,
self._IFNAME0, _VRID)
assert not rep0.instance_name is None
l[0] = rep0
rep1 = self._configure_vrrp_router(
vrrp_version, vrrp.VRRP_PRIORITY_BACKUP_DEFAULT,
_PRIMARY_IP_ADDRESS1, self._IFNAME1, _VRID)
assert not rep1.instance_name is None
l[1] = rep1
instances[_VRID] = l
self.logger.debug('%s', vrrp_mgr._instances)
if do_sleep:
print "priority", priority
print "waiting for instances starting"
self._check(vrrp_api, instances)
for vrid in instances.keys():
if vrid == _VRID:
continue
which = vrid & 1
new_priority = int(random.uniform(vrrp.VRRP_PRIORITY_BACKUP_MIN,
vrrp.VRRP_PRIORITY_BACKUP_MAX))
i = instances[vrid][which]
vrrp_api.vrrp_config_change(self, i.instance_name,
priority=new_priority)
i.config.priority = new_priority
if do_sleep:
print "priority shuffled"
self._check(vrrp_api, instances)
for vrid in instances.keys():
if vrid == _VRID:
continue
which = vrid & 1
vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
vrrp_api.vrrp_shutdown(self, instances[_VRID][0].instance_name)
if do_sleep:
print "shutting down instances"
while True:
rep = vrrp_api.vrrp_list(self)
if len(rep.instance_list) <= len(instances):
break
print "left", len(rep.instance_list)
time.sleep(1)
assert len(rep.instance_list) == len(instances)
print "waiting for the rest becoming master"
while True:
rep = vrrp_api.vrrp_list(self)
if all(i.state == vrrp_event.VRRP_STATE_MASTER
for i in rep.instance_list):
break
time.sleep(1)
vrrp_api.vrrp_shutdown(self, instances[_VRID][1].instance_name)
for vrid in instances.keys():
if vrid == _VRID:
continue
which = 1 - (vrid & 1)
vrrp_api.vrrp_shutdown(self, instances[vrid][which].instance_name)
print "waiting for instances shutting down"
while True:
rep = vrrp_api.vrrp_list(self)
if not rep.instance_list:
break
print "left", len(rep.instance_list)
time.sleep(1)
| 7,392 | 218 | 23 |
7d266afa6ca16a8b9eba3310dd82268527f64aa4 | 12,926 | py | Python | flsuite/parLaser.py | sfeister/flsuite | 348c207f72f3bea3877afef46ab11cc472722f57 | [
"MIT"
] | 4 | 2018-12-01T18:07:39.000Z | 2019-12-16T12:55:26.000Z | flsuite/parLaser.py | sfeister/flsuite | 348c207f72f3bea3877afef46ab11cc472722f57 | [
"MIT"
] | null | null | null | flsuite/parLaser.py | sfeister/flsuite | 348c207f72f3bea3877afef46ab11cc472722f57 | [
"MIT"
] | 2 | 2019-03-10T05:22:28.000Z | 2019-12-16T12:57:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
parLaser.py: Defines a class for writing laser strings into a flash.par file.
Created by Scott Feister on Wed Feb 14 13:39:38 2018
See two examples towards the bottom of this document.
Note: Make sure to use FLASH setup flags to increase beam and pulse count as needed.
E.g. ./setup LaserSlab -auto ed_maxPulses=60 ed_maxBeams=20 ed_maxPulseSections=60
############# EXAMPLE SCRIPT #############
from flsuite.parLaser import parLaser, parLasers
import numpy as np
# Example 1: Three lasers, each with different parameters
las1 = parLaser(1, laslbl="Second Laser, 808 nm")
las1.lens = [20, 20, 30]
las1.targ = [20, 30, 40]
las1.powers = np.array([1,2,3,4,5])
las1.times = np.array([10,11,12,13,14])
las1.wavelength = 0.808
las2 = parLaser(2, laslbl="Second Laser, Many rays")
las2.lens = [15, 15, 23]
las2.targ = [22, 22, 41]
las2.powers = np.array([1,2,3,4,5,6,7])
las2.times = np.array([10,11,12,13,14,15,16])
las2.numberOfRays = 10000
las3 = parLaser(3, laslbl="Third Laser, Gaussian profile")
las3.lens = [14, 14, 16]
las3.targ = [40, 50, 52]
las3.powers = np.array([2,2.5,3])
las3.times = np.array([10,11,12])
las3.crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
las3.gaussianExponent = 4.0 # 4.0 for supergaussian profile
las3.gaussianRadiusMajor = 0.048
las3.gaussianRadiusMinor = 0.048
las1.write('laser1.txt', 'w')
las2.write('laser2.txt', 'w')
las3.write('laser3.txt', 'w')
print("Three lasers written to 'laser1.txt', 'laser2.txt', 'laser3.txt'")
## Example 2: Make a whole bunch of the same laser, but with lens at varying x value
# Uses the "parlasers" class
l = parLasers(10) # Laser list
for i in range(len(l)):
l[i].lens = [i*10, 0, 0] # This is the only thing changing between the ten lasers!
l[i].targ = [5, 5, 5]
l[i].powers = np.array([1,2,3,4,5])
l[i].times = np.array([10,11,12,13,14])
l[i].numberOfRays = 10000
l[i].crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
l[i].gaussianExponent = 4.0 # 4.0 for supergaussian profile
l[i].gaussianRadiusMajor = 0.048
l[i].gaussianRadiusMinor = 0.048
l.write('tenlasers.txt', 'w')
print("Ten lasers written to 'tenlasers.txt'")
######### END OF EXAMPLE SCRIPT #########
"""
import numpy as np
class parLasers(list):
""" A list of parLaser objects """
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def maxPulseSections(self):
""" Compute the maximum number of pulse sections among the lasers """
self.validate()
maxPS = 0
for i in range(len(self)):
maxPS = max(maxPS, len(self[i].powers))
return maxPS
def validate(self):
""" Check the parLasers object for simple mistakes, raising Exceptions.
"""
for i in range(len(self)):
self[i].validate()
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
class parLaser:
""" A class containing the parameters of a single beam and pulse flash.par (runtime parameters) input """
def __init__(self, lasnum, laslbl=None):
""" Initialize a parLaser object.
Input parameters:
lasnum Integer, any number >0 to identify this pulse/beam combination in the flash.par file
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
Almost all items available to define a laser pulse and beam in a flash.par file are replicated here.
'None' values are initialized for most parameters. This is deliberate.
Change values from 'None' and they be written into the flash.par file string.
For values that are left as 'None', we avoid writing these parameters to the flash.par file string.
"""
# Special variables for this class
self.lasnum = int(lasnum) # A number for the laser, considered both the beam-number and pulse-number
self.laslbl = laslbl # A label for the laser, e.g. 'Quad24', which is put into the title
self.lens = None # A 3-element list or array with values for [lensx, lensy, lensz]
self.targ = None # A 3-element list or array with values for [targetx, targety, targetz]
self.times = None # Will become an array of times for the laser pulse
self.powers = None # Will become an array of powers for the laser pulse
# Basically initializes everything else found under the "ed_XXXX_1" header
self.gridNAngularTics = None
self.gridNSemiAxisMajorTics = None
self.gridNSemiAxisMinorTics = None
self.numberOfRays = None
self.gaussianCenterMajor = None
self.gaussianCenterMinor = None
self.gaussianExponent = None
self.gaussianRadiusMajor = None
self.gaussianRadiusMinor = None
self.gridDeltaSemiAxisMajor = None
self.gridDeltaSemiAxisMinor = None
self.initialRaySpeed = None
self.lensSemiAxisMajor = None
self.wavelength = None
self.semiAxisMajorTorsionAngle = None
self.targetSemiAxisMajor = None
self.targetSemiAxisMinor = None
self.crossSectionFunctionType = None
self.gridType = None
self.semiAxisMajorTorsionAxis = None
self.ignoreBoundaryCondition = None
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def validate(self):
""" Check the parLaser object for simple mistakes, raising Exceptions.
These mistakes would otherwise result in invalid flash.par strings.
Mistakes checked for include:
* Using a laser number less than 1 (flash.par requires pulse/beam numbers to be 1 or greater)
* Specifying unequal numbers of powers and times
* Leaving out x,y, or z position values in the lens and target definitions
"""
# Check for valid laser number (Should be an integer greater than 0)
if self.lasnum < 1:
raise Exception("Problem with pulse or beam with 'lasnum' of {}: 'lasnum' must be an integer greater than zero.".format(self.lasnum))
# Check that variables 'powers' and 'times' were both set as 1D arrays (or lists)
if not (hasattr(self.powers, '__len__') and (not isinstance(self.powers, str))):
raise Exception('Problem with pulse {}: Powers are not specified as a list or 1D array.'.format(self.lasnum))
if not (hasattr(self.times, '__len__') and (not isinstance(self.times, str))):
raise Exception('Problem with pulse {}: Times are not specified as a list or 1D array.'.format(self.lasnum))
# Check that equal numbers of powers and times are specified (one-to-one between powers and times)
if len(self.powers) != len(self.times):
raise Exception("Problem with pulse {}: Powers and times have different numbers of elements.".format(self.lasnum))
# Do some checks for lens and target arrays
if not (hasattr(self.lens, '__len__') and (not isinstance(self.lens, str))):
raise Exception('Problem with beam: Lens is not specified as a 3-element list or array.')
if not (hasattr(self.targ, '__len__') and (not isinstance(self.targ, str))):
raise Exception('Problem with beam: Targ is not specified as a 3-element list or array.')
if len(self.lens) != 3:
raise Exception('Problem with beam: Lens has less or more than 3 elements.')
if len(self.targ) != 3:
raise Exception('Problem with beam: Targ has less or more than 3 elements.')
def makePar(self):
""" Generate a string which can be copied and pasted into a flash.par file. """
## PERFORM CHECK
self.validate() # Don't move forward without validating the laser beam and pulse parameters
## INITIALIZE PAR STRING
par = ''
par += "\n"
if self.laslbl is not None:
par += "###### BEAM/PULSE COMBO #{}: {}\n".format(self.lasnum, self.laslbl)
else:
par += "###### BEAM/PULSE COMBO #{}\n".format(self.lasnum)
par += "#### Automatically-generated by parLaser.py version 0.0.1\n"
## ADD PULSE TO PAR STRING
# Write a pulse header for human readability
if self.laslbl is not None:
par += "## Begin pulse {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin pulse {}:\n".format(self.lasnum)
# Write powers and times
par += "ed_numberOfSections_{} = {}\n".format(self.lasnum, len(self.powers))
for i, power in enumerate(self.powers, start=1):
par += "ed_power_{}_{} = {}\n".format(self.lasnum, i, power)
for i, time in enumerate(self.times, start=1):
par += "ed_time_{}_{} = {}\n".format(self.lasnum, i, time)
## ADD BEAM TO PAR STRING
# Write a beam header for human readability
par += "\n"
if self.laslbl is not None:
par += "## Begin beam {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin beam {}:\n".format(self.lasnum)
# Associate the pulse with this beam
par += "ed_pulseNumber_{} = {}\n".format(self.lasnum, self.lasnum)
# Write lens and target parameters
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_lens{}_{} = {}\n".format(dim, self.lasnum, self.lens[i])
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_target{}_{} = {}\n".format(dim, self.lasnum, self.targ[i])
# Write all remaining beam parameters (anything not set to 'None')
keys_remaining = set(self.__dict__.keys()) - set(["lasnum", "laslbl", "lens", "targ", "powers", "times"]) # A list of properties of the parLaser object, excluding those items that we just wrote into the par string
for key in keys_remaining:
if getattr(self, key) is not None:
if isinstance(getattr(self,key), str):
par += 'ed_{}_{} = "{}"\n'.format(key, self.lasnum, getattr(self, key))
else:
par += 'ed_{}_{} = {}\n'.format(key, self.lasnum, getattr(self, key))
return par
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
if __name__ == "__main__":
pass
| 44.116041 | 221 | 0.607071 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
parLaser.py: Defines a class for writing laser strings into a flash.par file.
Created by Scott Feister on Wed Feb 14 13:39:38 2018
See two examples towards the bottom of this document.
Note: Make sure to use FLASH setup flags to increase beam and pulse count as needed.
E.g. ./setup LaserSlab -auto ed_maxPulses=60 ed_maxBeams=20 ed_maxPulseSections=60
############# EXAMPLE SCRIPT #############
from flsuite.parLaser import parLaser, parLasers
import numpy as np
# Example 1: Three lasers, each with different parameters
las1 = parLaser(1, laslbl="Second Laser, 808 nm")
las1.lens = [20, 20, 30]
las1.targ = [20, 30, 40]
las1.powers = np.array([1,2,3,4,5])
las1.times = np.array([10,11,12,13,14])
las1.wavelength = 0.808
las2 = parLaser(2, laslbl="Second Laser, Many rays")
las2.lens = [15, 15, 23]
las2.targ = [22, 22, 41]
las2.powers = np.array([1,2,3,4,5,6,7])
las2.times = np.array([10,11,12,13,14,15,16])
las2.numberOfRays = 10000
las3 = parLaser(3, laslbl="Third Laser, Gaussian profile")
las3.lens = [14, 14, 16]
las3.targ = [40, 50, 52]
las3.powers = np.array([2,2.5,3])
las3.times = np.array([10,11,12])
las3.crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
las3.gaussianExponent = 4.0 # 4.0 for supergaussian profile
las3.gaussianRadiusMajor = 0.048
las3.gaussianRadiusMinor = 0.048
las1.write('laser1.txt', 'w')
las2.write('laser2.txt', 'w')
las3.write('laser3.txt', 'w')
print("Three lasers written to 'laser1.txt', 'laser2.txt', 'laser3.txt'")
## Example 2: Make a whole bunch of the same laser, but with lens at varying x value
# Uses the "parlasers" class
l = parLasers(10) # Laser list
for i in range(len(l)):
l[i].lens = [i*10, 0, 0] # This is the only thing changing between the ten lasers!
l[i].targ = [5, 5, 5]
l[i].powers = np.array([1,2,3,4,5])
l[i].times = np.array([10,11,12,13,14])
l[i].numberOfRays = 10000
l[i].crossSectionFunctionType = "gaussian2D" # 2D Gaussian Beam
l[i].gaussianExponent = 4.0 # 4.0 for supergaussian profile
l[i].gaussianRadiusMajor = 0.048
l[i].gaussianRadiusMinor = 0.048
l.write('tenlasers.txt', 'w')
print("Ten lasers written to 'tenlasers.txt'")
######### END OF EXAMPLE SCRIPT #########
"""
import numpy as np
class parLasers(list):
""" A list of parLaser objects """
def __init__(self, size=1):
if size < 1 or type(size) is not int:
raise Exception("Size (the number of lasers) was set to {}, but must be an integer greater or equal to 1.".format(self.size))
super(parLasers, self).__init__([None]*size) # Inherit from the list class
for i in range(size):
self[i] = parLaser(i + 1)
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def maxPulseSections(self):
""" Compute the maximum number of pulse sections among the lasers """
self.validate()
maxPS = 0
for i in range(len(self)):
maxPS = max(maxPS, len(self[i].powers))
return maxPS
def validate(self):
""" Check the parLasers object for simple mistakes, raising Exceptions.
"""
for i in range(len(self)):
self[i].validate()
def makePar(self):
par = ''
par += "# ~~~~~~~~~~~~~~~ PYTHON-GENERATED LASERS SECTION ~~~~~~~~~~~~~\n"
par += "# ~~~~ Automatically-generated by parLaser.py version 0.0.1 ~~~\n"
par += "# ~~ Note: Make sure to use FLASH setup flags to increase beam and pulse count as needed.\n"
par += "# ~~ E.g.:\n"
par += "# ./setup LaserSlab -auto ed_maxPulses={} ed_maxBeams={} ed_maxPulseSections={}\n\n".format(len(self), len(self), self.maxPulseSections())
par += 'ed_numberOfPulses = ' + str(len(self)) + "\n"
par += 'ed_numberOfBeams = ' + str(len(self)) + "\n"
# Iterate over the pulse/beam combos
for i in range(len(self)):
par += str(self[i])
par += "# ~~~~~~~~~~~ END OF PYTHON-GENERATED LASERS SECTION ~~~~~~~~~~~~~\n"
return par
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
class parLaser:
""" A class containing the parameters of a single beam and pulse flash.par (runtime parameters) input """
def __init__(self, lasnum, laslbl=None):
""" Initialize a parLaser object.
Input parameters:
lasnum Integer, any number >0 to identify this pulse/beam combination in the flash.par file
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
Almost all items available to define a laser pulse and beam in a flash.par file are replicated here.
'None' values are initialized for most parameters. This is deliberate.
Change values from 'None' and they be written into the flash.par file string.
For values that are left as 'None', we avoid writing these parameters to the flash.par file string.
"""
# Special variables for this class
self.lasnum = int(lasnum) # A number for the laser, considered both the beam-number and pulse-number
self.laslbl = laslbl # A label for the laser, e.g. 'Quad24', which is put into the title
self.lens = None # A 3-element list or array with values for [lensx, lensy, lensz]
self.targ = None # A 3-element list or array with values for [targetx, targety, targetz]
self.times = None # Will become an array of times for the laser pulse
self.powers = None # Will become an array of powers for the laser pulse
# Basically initializes everything else found under the "ed_XXXX_1" header
self.gridNAngularTics = None
self.gridNSemiAxisMajorTics = None
self.gridNSemiAxisMinorTics = None
self.numberOfRays = None
self.gaussianCenterMajor = None
self.gaussianCenterMinor = None
self.gaussianExponent = None
self.gaussianRadiusMajor = None
self.gaussianRadiusMinor = None
self.gridDeltaSemiAxisMajor = None
self.gridDeltaSemiAxisMinor = None
self.initialRaySpeed = None
self.lensSemiAxisMajor = None
self.wavelength = None
self.semiAxisMajorTorsionAngle = None
self.targetSemiAxisMajor = None
self.targetSemiAxisMinor = None
self.crossSectionFunctionType = None
self.gridType = None
self.semiAxisMajorTorsionAxis = None
self.ignoreBoundaryCondition = None
def __str__(self):
""" String representation of the parLaser object """
return self.makePar()
def validate(self):
""" Check the parLaser object for simple mistakes, raising Exceptions.
These mistakes would otherwise result in invalid flash.par strings.
Mistakes checked for include:
* Using a laser number less than 1 (flash.par requires pulse/beam numbers to be 1 or greater)
* Specifying unequal numbers of powers and times
* Leaving out x,y, or z position values in the lens and target definitions
"""
# Check for valid laser number (Should be an integer greater than 0)
if self.lasnum < 1:
raise Exception("Problem with pulse or beam with 'lasnum' of {}: 'lasnum' must be an integer greater than zero.".format(self.lasnum))
# Check that variables 'powers' and 'times' were both set as 1D arrays (or lists)
if not (hasattr(self.powers, '__len__') and (not isinstance(self.powers, str))):
raise Exception('Problem with pulse {}: Powers are not specified as a list or 1D array.'.format(self.lasnum))
if not (hasattr(self.times, '__len__') and (not isinstance(self.times, str))):
raise Exception('Problem with pulse {}: Times are not specified as a list or 1D array.'.format(self.lasnum))
# Check that equal numbers of powers and times are specified (one-to-one between powers and times)
if len(self.powers) != len(self.times):
raise Exception("Problem with pulse {}: Powers and times have different numbers of elements.".format(self.lasnum))
# Do some checks for lens and target arrays
if not (hasattr(self.lens, '__len__') and (not isinstance(self.lens, str))):
raise Exception('Problem with beam: Lens is not specified as a 3-element list or array.')
if not (hasattr(self.targ, '__len__') and (not isinstance(self.targ, str))):
raise Exception('Problem with beam: Targ is not specified as a 3-element list or array.')
if len(self.lens) != 3:
raise Exception('Problem with beam: Lens has less or more than 3 elements.')
if len(self.targ) != 3:
raise Exception('Problem with beam: Targ has less or more than 3 elements.')
def makePar(self):
""" Generate a string which can be copied and pasted into a flash.par file. """
## PERFORM CHECK
self.validate() # Don't move forward without validating the laser beam and pulse parameters
## INITIALIZE PAR STRING
par = ''
par += "\n"
if self.laslbl is not None:
par += "###### BEAM/PULSE COMBO #{}: {}\n".format(self.lasnum, self.laslbl)
else:
par += "###### BEAM/PULSE COMBO #{}\n".format(self.lasnum)
par += "#### Automatically-generated by parLaser.py version 0.0.1\n"
## ADD PULSE TO PAR STRING
# Write a pulse header for human readability
if self.laslbl is not None:
par += "## Begin pulse {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin pulse {}:\n".format(self.lasnum)
# Write powers and times
par += "ed_numberOfSections_{} = {}\n".format(self.lasnum, len(self.powers))
for i, power in enumerate(self.powers, start=1):
par += "ed_power_{}_{} = {}\n".format(self.lasnum, i, power)
for i, time in enumerate(self.times, start=1):
par += "ed_time_{}_{} = {}\n".format(self.lasnum, i, time)
## ADD BEAM TO PAR STRING
# Write a beam header for human readability
par += "\n"
if self.laslbl is not None:
par += "## Begin beam {} ({}):\n".format(self.lasnum, self.laslbl)
else:
par += "## Begin beam {}:\n".format(self.lasnum)
# Associate the pulse with this beam
par += "ed_pulseNumber_{} = {}\n".format(self.lasnum, self.lasnum)
# Write lens and target parameters
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_lens{}_{} = {}\n".format(dim, self.lasnum, self.lens[i])
for i, dim in enumerate(["X", "Y", "Z"]):
par += "ed_target{}_{} = {}\n".format(dim, self.lasnum, self.targ[i])
# Write all remaining beam parameters (anything not set to 'None')
keys_remaining = set(self.__dict__.keys()) - set(["lasnum", "laslbl", "lens", "targ", "powers", "times"]) # A list of properties of the parLaser object, excluding those items that we just wrote into the par string
for key in keys_remaining:
if getattr(self, key) is not None:
if isinstance(getattr(self,key), str):
par += 'ed_{}_{} = "{}"\n'.format(key, self.lasnum, getattr(self, key))
else:
par += 'ed_{}_{} = {}\n'.format(key, self.lasnum, getattr(self, key))
return par
def write(self, file, mode="a"):
""" Open file and write the par string from this parLaser.
Input parameters:
filename String, filename of to be written
mode String, file open mode to be passed into Python's open() call. Options include 'a' for append (if file exists), 'w' for truncate/overwrite, 'x' for exclusive creation, failing if file exists.
"""
with open(file, mode) as f:
f.write(str(self))
if __name__ == "__main__":
pass
| 1,205 | 0 | 57 |
f706c0c3e8cc27ca3edb6a7b17a579cd796d7cc7 | 6,962 | py | Python | tests/test_integration.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 44 | 2019-08-07T12:01:07.000Z | 2021-09-02T16:50:51.000Z | tests/test_integration.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 9 | 2020-10-26T13:08:32.000Z | 2021-09-16T02:13:58.000Z | tests/test_integration.py | prototypefund/lazycluster | e6fbd69dbd73ec9bf101a502f25f7afdf0579f66 | [
"Apache-2.0"
] | 9 | 2019-09-18T07:52:09.000Z | 2022-02-11T13:48:19.000Z | import os
import re
import sys
import time
from subprocess import PIPE, run
from types import ModuleType
from typing import Union
import docker
import requests
import storm.__main__ as storm
from lazycluster import Runtime, RuntimeGroup, RuntimeManager, RuntimeTask
from .config import RUNTIME_DOCKER_IMAGE, RUNTIME_NAMES, WORKSPACE_PORT
def setup_module(module: ModuleType) -> None:
""" setup any state specific to the execution of the given module."""
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
_start_runtime_container(runtime_name, docker_client)
# Sleep a moment to give all processes time to start within the Workspace containers
time.sleep(15)
for runtime_name in RUNTIME_NAMES:
_setup_ssh_connection_to_runtime(runtime_name)
def teardown_module(module: ModuleType) -> None:
"""teardown any state that was previously setup with a setup_module
method.
"""
_remove_runtimes()
# -------------------------------------------------------------------------
| 32.685446 | 195 | 0.657282 | import os
import re
import sys
import time
from subprocess import PIPE, run
from types import ModuleType
from typing import Union
import docker
import requests
import storm.__main__ as storm
from lazycluster import Runtime, RuntimeGroup, RuntimeManager, RuntimeTask
from .config import RUNTIME_DOCKER_IMAGE, RUNTIME_NAMES, WORKSPACE_PORT
def setup_module(module: ModuleType) -> None:
""" setup any state specific to the execution of the given module."""
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
_start_runtime_container(runtime_name, docker_client)
# Sleep a moment to give all processes time to start within the Workspace containers
time.sleep(15)
for runtime_name in RUNTIME_NAMES:
_setup_ssh_connection_to_runtime(runtime_name)
def teardown_module(module: ModuleType) -> None:
"""teardown any state that was previously setup with a setup_module
method.
"""
_remove_runtimes()
class TestRuntime:
def test_setup(self) -> None:
for runtime_name in RUNTIME_NAMES:
completed_process = run(
f"ssh {runtime_name} 'echo $WORKSPACE_NAME'",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
assert completed_process.stderr == b"", "The stderr is not emtpy"
stdout = completed_process.stdout.decode("UTF-8").replace("\n", "")
assert stdout == runtime_name, "Stdout is not equal to the runtime_name"
if not RUNTIME_NAMES:
raise RuntimeError("No runtime names in integration/config.py configured")
Runtime(RUNTIME_NAMES[0])
def test_echo(self) -> None:
runtime_name = RUNTIME_NAMES[len(RUNTIME_NAMES) - 1]
rt = Runtime(runtime_name)
msg = f"Hello Runtime {runtime_name}"
assert rt.echo(msg).rstrip("\n") == msg
def test_working(self) -> None:
runtime_name = RUNTIME_NAMES[0]
exp_working_dir = "/etc"
rt = Runtime(runtime_name, working_dir=exp_working_dir)
act_working_dir = rt.echo("${PWD}").rstrip("\n")
assert exp_working_dir == act_working_dir
task = RuntimeTask("get-working-dir").run_command("echo ${PWD}")
rt.execute_task(task, execute_async=False)
assert exp_working_dir == rt.execution_log(task.name)[0].rstrip("\n").rstrip(
"\r"
)
class TestRuntimeGroup:
def test_creation(self) -> None:
runtime_group = RuntimeGroup(hosts=RUNTIME_NAMES)
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
class TestRuntimeManager:
def test_create_group(self) -> None:
runtime_group = RuntimeManager().create_group()
for runtime_name in RUNTIME_NAMES:
assert runtime_name in runtime_group._runtimes
assert isinstance(runtime_group._runtimes[runtime_name], Runtime)
# -------------------------------------------------------------------------
def _remove_runtimes() -> None:
docker_client = docker.from_env()
for runtime_name in RUNTIME_NAMES:
try:
runtime_container = docker_client.containers.get(runtime_name)
runtime_container.remove(force=True)
except docker.errors.NotFound:
# TODO: handle create a docker container if not running as containerized test
print(f"Conatiner {runtime_name} not found")
# Delete ssh config as well, because the ssh setup fails
# when testing against multiple python versions
storm.delete(runtime_name)
def _get_current_container_id() -> str:
return run(
"awk -F/ '{ print $NF }' /proc/1/cpuset",
shell=True,
stdout=PIPE,
stderr=PIPE,
encoding="UTF-8",
).stdout.rstrip("\n")
def _start_runtime_container(name: str, client: docker.DockerClient) -> None:
try:
container = client.containers.run(
RUNTIME_DOCKER_IMAGE,
name=name,
environment={"WORKSPACE_NAME": name},
detach=True,
)
except docker.errors.APIError:
_remove_runtimes()
raise
container.reload()
ip_address = container.attrs["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
os.environ[name] = ip_address
_wait_until_started(ip_address, WORKSPACE_PORT)
def _setup_ssh_connection_to_runtime(runtime_name: str) -> None:
runtime_host = os.getenv(runtime_name, "localhost")
response = requests.get(
f"http://{runtime_host}:{WORKSPACE_PORT}/tooling/ssh/setup-command?origin=http://{runtime_host}:{WORKSPACE_PORT}"
)
ssh_script_runner_regex = rf'^\/bin\/bash <\(curl -s --insecure "(http:\/\/{runtime_host}:{WORKSPACE_PORT}\/shared\/ssh\/setup\?token=[a-z0-9]+&host={runtime_host}&port={WORKSPACE_PORT})"\)$'
pattern = re.compile(ssh_script_runner_regex)
match = pattern.match(response.text)
assert match, "SSH setup script url not found"
# Execute the ssh setup script and automatically pass an ssh connection name to the script
script_url = match.groups()[0]
r = requests.get(script_url)
setup_script_path = "./setup-ssh.sh"
_remove_file_if_exists(setup_script_path)
with open(setup_script_path, "w") as file:
file.write(r.text)
# make the file executable for the user
os.chmod(setup_script_path, 0o744)
completed_process = run(
[f'/bin/bash -c "{setup_script_path}"'],
input=runtime_name,
encoding="ascii",
shell=True,
stdout=PIPE,
stderr=PIPE,
)
# child = pexpect.spawn(f"/bin/bash {setup_script_path}", encoding="UTF-8")
# child.expect("Provide a name .*")
# child.sendline(runtime_name)
# child.expect("remote_ikernel was detected .*")
# child.sendline("no")
# child.expect("Do you want to add this connection as mountable SFTP storage .*")
# child.sendline("no")
# child.close()
_remove_file_if_exists(setup_script_path)
assert completed_process.stderr == ""
assert "Connection successful!" in completed_process.stdout
def _wait_until_started(ip_address: str, workspace_port: Union[str, int]) -> None:
index = 0
health_url = f"http://{ip_address}:{str(workspace_port)}/healthy"
response = None
while response is None or (response.status_code != 200 and index < 15):
index += 1
time.sleep(1)
try:
response = requests.get(health_url, allow_redirects=False, timeout=2)
except requests.ConnectionError:
# Catch error that is raised when the workspace container is not reachable yet
pass
if index == 15:
print("The workspace did not start")
sys.exit(-1)
def _remove_file_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass
| 5,560 | 3 | 339 |
b4898730c1b69db642035dbd15ca66760a4419c7 | 17,407 | py | Python | pylayers/antprop/channelc.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | pylayers/antprop/channelc.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | pylayers/antprop/channelc.py | ArtashesH/PylayersWith3D | e4c35279a7da121d9a68282a6a0c3decfba696b4 | [
"MIT"
] | null | null | null | # -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channelc
VectChannel Class
=================
.. autosummary::
:toctree: generated/
VectChannel.__init__
VectChannel.show3_old
VectChannel.show3
ScalChannel Class
=================
.. autosummary::
:toctree: generated/
ScalChannel.__init__
ScalChannel.info
ScalChannel.imshow
ScalChannel.apply
ScalChannel.applywavC
ScalChannel.applywavB
ScalChannel.applywavA
ScalChannel.doddoa
ScalChannel.wavefig
ScalChannel.rayfig
VectLOS Class
=============
.. autosummary::
:toctree: generated/
VectLOS.__init__
VectLOS.cir
"""
import doctest
import pdb
import numpy as np
import scipy as sp
import pylab as plt
import struct as stru
from pylayers.antprop.channel import *
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
from pylayers.antprop.raysc import GrRay3D
from pylayers.util.project import *
class VectChannel(Ctilde):
""" container for a vector representation of the propagation channel
Attributes
-----------
Ctt FUsignal (Nray x Nf canal )
Cpp
Cpt
Ctp
built in vec2scal1
Frt Fusignal (Nray x Nf antenna )
Frp
Ftt
Ftp
fGHz : frequency
tauk : delay
tang : dod
rang : doa
Methods
-------
init(S,itx,irx)
S is a simulation object, itx and irx are index of tx and rx
show(display=False,mode='linear')
display vect channel
doadod()
scatter plot DoA - DoD
vec2scal(fGHz)
build scal channel without antenna
vec2scal1(fGHz)
build scal channel with antenna
"""
def __init__(self, S, itx, irx, transpose=False):
"""
Parameters
----------
S
Simulation
itx
tx number
irx
rx number
transpose
antenna transposition indicator
"""
# .. todo::
#
# a verifier -ravel-
self.fail = False
_filefield = S.dfield[itx][irx]
filefield = pyu.getlong(_filefield,pstruc['DIRTUD'])
_filetauk = S.dtauk[itx][irx]
filetauk = pyu.getlong(_filetauk,pstruc['DIRTUD'])
_filetang = S.dtang[itx][irx]
filetang = pyu.getlong(_filetang,pstruc['DIRTUD'])
_filerang = S.drang[itx][irx]
filerang = pyu.getlong(_filerang,pstruc['DIRTUD'])
"""
.. todo::
Revoir Freq
"""
# old version
#freq = S.freq()
#self.freq = freq
self.fGHz = S.fGHz
#
# pour show3 de gr on a besoin de filetra et indoor
# pas beau
#
self.filetra = S.dtra[itx][irx]
self.L = S.L
#try:
# fo = open(filetauk, "rb")
#except:
# self.fail=True
# print "file ",filetauk, " is unreachable"
# decode filetauk
#if not self.fail:
# nray_tauk = unpack('i',fo.read(4))[0]
# print "nb rayons dans .tauk : ",nray_tauk
# buf = fo.read()
# fo.close()
# nray = len(buf)/8
# print "nb rayons 2: ",nray
# self.tauk = ndarray(shape=nray,buffer=buf)
# if nray_tauk != nray:
# print itx , irx
# print nray_tauk - nray
#self.tauk = self.tauk
Ctilde.__init__(self)
self.load(filefield, transpose)
# decode the angular files (.tang and .rang)
# #try:
# fo = open(filetang, "rb")
# except:
# self.fail=True
# print "file ",filetang, " is unreachable"
# if not self.fail:
# nray_tang = unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # coorectif Bug evalfield
# tmp = ndarray(shape=(nray_tang,2),buffer=buf)
# self.tang = tmp[0:nray,:]
# try:
# fo = open(filerang, "rb")
# except:
# self.fail=True
# print "file ",filerang, " is unreachable"
#
# if not self.fail:
# nray_rang = stru.unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # correctif Bug evalfield
# tmp = ndarray(shape=(nray_rang,2),buffer=buf)
# self.rang = tmp[0:nray,:]
#sh = shape(self.Ctt.y)
"""
.. todo::
Express Ftt and Ftp in global frame from Tt and ant_tx
Express Frt and Frp in global frame from Tt and ant_tx
"""
#self.Ftt = FUsignal(fGHz,np.ones(sh))
#self.Ftp = FUsignal(fGHz,np.zeros(sh))
#self.Frt = FUsignal(fGHz,np.ones(sh))
#self.Frp = FUsignal(fGHz,np.zeros(sh))
def show3_old(self, id=0):
""" geomview visualization old version
This function provides a complete ray tracing vsualization
of the channel structure. The rays are color coded as a fonction
of their energy.
Parameters
----------
id : int
index of filetra
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(id) + "_col.list",pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(Emax)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([1, 0, 0]) # red
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.1 * Emax) & (Es < 0.5 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 0, 1]) # blue
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.01 * Emax) & (Es < 0.1 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 1]) # cyan
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.001 * Emax) & (Es < 0.01 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 0]) # green
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where(Es < 0.001 * Emax)[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([1, 1, 0]) # yellow
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def show3(self, seuildb=100):
""" geomview vizualization
This function provides a complete ray tracing visualization
of the radio channel. Rays are color coded as a fonction of
their energy.
Parameters
----------
seuildb : float
default 100
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(seuildb) + "_col.list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(v)
db = 20 * np.log10(Es)
c = 1 - (db > -seuildb) * (db + seuildb) / seuildb
app = round(np.log10(Es / Emax))
lw = app - min(app)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([c[i], c[i], c[i]])
l = int(lw[i])
fileray = r.show3(False, False, col, j, l)
#fileray =r.show3(False,False,col,j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
class ScalChannel(object):
"""
DEPRECATED
ScalChannel Class :
The ScalChannel is obtained from combination of the propagation
channel and the antenna transfer function from both transmitting
and receiving antennas
Members
-------
H : FUDSignal
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tauk :
delay ray k in ns
"""
def info(self):
""" display information
"""
#print 'Ftt,Ftp,Frt,Frp'
#print 'dod,doa,tau'
#print 'H - FUDsignal '
print ('tau min , tau max :', min(self.tau), max(self.tau))
self.H.info()
def imshow(self):
""" imshow vizualization of H
"""
self.H
sh = np.shape(self.H.y)
itau = np.arange(len(self.tau))
plt.imshow(abs(self.H.y))
plt.show()
def apply(self, W):
""" Apply a FUsignal W to the ScalChannel.
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Notes
-----
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the ScalChannei, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUDsignal H
"""
H = self.H
U = H * W
V = bs.FUDsignal(U.x, U.y, H.tau0)
return(V)
def applywavC(self, w, dxw):
""" apply waveform method C
Parameters
----------
w :
waveform
dxw
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
Parameters
----------
Wgam :
waveform including gamma factor
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
#
# return a FUDsignal
#
Y = self.apply(Wgam)
#ri = Y.ft1(500,0)
# Le fftshift est activé
ri = Y.ft1(500, 1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
"""
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0, 'natural')
ri.translate(-Tw)
return(ri)
def doddoa(self):
""" doddoa() : DoD / DoA diagram
"""
dod = self.dod
doa = self.doa
#
#col = 1 - (10*np.log10(Etot)-Emin)/(Emax-Emin)
Etot = self.H.energy()
Etot = Etot / max(Etot)
al = 180 / np.pi
col = 10 * np.log10(Etot)
print (len(dod[:, 0]), len(dod[:, 1]), len(col[:]))
plt.subplot(121)
plt.scatter(dod[:, 0] * al, dod[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
a = colorbar()
#a.set_label('dB')
plt.xlabel("$\\theta_t(\degree)$", fontsize=18)
plt.ylabel('$\phi_t(\degree)$', fontsize=18)
title('DoD')
plt.subplot(122)
plt.scatter(doa[:, 0] * al, doa[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
b = colorbar()
b.set_label('dB')
plt.title('DoA')
plt.xlabel("$\\theta_r(\degree)$", fontsize=18)
plt.ylabel("$\phi_r (\degree)$", fontsize=18)
plt.show()
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
#r.require('graphics')
#r.postscript('fig.eps')
#r('par(mfrow=c(2,2))')
#Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
#r.dev_off()
#os.system("gv fig.eps ")
#y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k, :])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print (" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print (" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# on s'arrange que Hk.x[0]==Wk.x[0]
# if Wk.x[0]!=Hk.x[0]:
# x=arange(Wk.x[0],Hk.x[0],dx)
# if Hk.x[0]!=x[0]:
# Hk.x=hstack((x,Hk.x[1:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
# else:
# Hk.x=hstack((x,Hk.x[0:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
#
self.Hk = Hk
self.Wk = Wk
Rk = Hk * Wk
self.Rk = Rk
rk = Rk.iftshift()
plot(rk.x, rk.y, col)
return(rk)
| 26.097451 | 106 | 0.481128 | # -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channelc
VectChannel Class
=================
.. autosummary::
:toctree: generated/
VectChannel.__init__
VectChannel.show3_old
VectChannel.show3
ScalChannel Class
=================
.. autosummary::
:toctree: generated/
ScalChannel.__init__
ScalChannel.info
ScalChannel.imshow
ScalChannel.apply
ScalChannel.applywavC
ScalChannel.applywavB
ScalChannel.applywavA
ScalChannel.doddoa
ScalChannel.wavefig
ScalChannel.rayfig
VectLOS Class
=============
.. autosummary::
:toctree: generated/
VectLOS.__init__
VectLOS.cir
"""
import doctest
import pdb
import numpy as np
import scipy as sp
import pylab as plt
import struct as stru
from pylayers.antprop.channel import *
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
from pylayers.antprop.raysc import GrRay3D
from pylayers.util.project import *
class VectChannel(Ctilde):
""" container for a vector representation of the propagation channel
Attributes
-----------
Ctt FUsignal (Nray x Nf canal )
Cpp
Cpt
Ctp
built in vec2scal1
Frt Fusignal (Nray x Nf antenna )
Frp
Ftt
Ftp
fGHz : frequency
tauk : delay
tang : dod
rang : doa
Methods
-------
init(S,itx,irx)
S is a simulation object, itx and irx are index of tx and rx
show(display=False,mode='linear')
display vect channel
doadod()
scatter plot DoA - DoD
vec2scal(fGHz)
build scal channel without antenna
vec2scal1(fGHz)
build scal channel with antenna
"""
def __init__(self, S, itx, irx, transpose=False):
"""
Parameters
----------
S
Simulation
itx
tx number
irx
rx number
transpose
antenna transposition indicator
"""
# .. todo::
#
# a verifier -ravel-
self.fail = False
_filefield = S.dfield[itx][irx]
filefield = pyu.getlong(_filefield,pstruc['DIRTUD'])
_filetauk = S.dtauk[itx][irx]
filetauk = pyu.getlong(_filetauk,pstruc['DIRTUD'])
_filetang = S.dtang[itx][irx]
filetang = pyu.getlong(_filetang,pstruc['DIRTUD'])
_filerang = S.drang[itx][irx]
filerang = pyu.getlong(_filerang,pstruc['DIRTUD'])
"""
.. todo::
Revoir Freq
"""
# old version
#freq = S.freq()
#self.freq = freq
self.fGHz = S.fGHz
#
# pour show3 de gr on a besoin de filetra et indoor
# pas beau
#
self.filetra = S.dtra[itx][irx]
self.L = S.L
#try:
# fo = open(filetauk, "rb")
#except:
# self.fail=True
# print "file ",filetauk, " is unreachable"
# decode filetauk
#if not self.fail:
# nray_tauk = unpack('i',fo.read(4))[0]
# print "nb rayons dans .tauk : ",nray_tauk
# buf = fo.read()
# fo.close()
# nray = len(buf)/8
# print "nb rayons 2: ",nray
# self.tauk = ndarray(shape=nray,buffer=buf)
# if nray_tauk != nray:
# print itx , irx
# print nray_tauk - nray
#self.tauk = self.tauk
Ctilde.__init__(self)
self.load(filefield, transpose)
# decode the angular files (.tang and .rang)
# #try:
# fo = open(filetang, "rb")
# except:
# self.fail=True
# print "file ",filetang, " is unreachable"
# if not self.fail:
# nray_tang = unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # coorectif Bug evalfield
# tmp = ndarray(shape=(nray_tang,2),buffer=buf)
# self.tang = tmp[0:nray,:]
# try:
# fo = open(filerang, "rb")
# except:
# self.fail=True
# print "file ",filerang, " is unreachable"
#
# if not self.fail:
# nray_rang = stru.unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # correctif Bug evalfield
# tmp = ndarray(shape=(nray_rang,2),buffer=buf)
# self.rang = tmp[0:nray,:]
#sh = shape(self.Ctt.y)
"""
.. todo::
Express Ftt and Ftp in global frame from Tt and ant_tx
Express Frt and Frp in global frame from Tt and ant_tx
"""
#self.Ftt = FUsignal(fGHz,np.ones(sh))
#self.Ftp = FUsignal(fGHz,np.zeros(sh))
#self.Frt = FUsignal(fGHz,np.ones(sh))
#self.Frp = FUsignal(fGHz,np.zeros(sh))
def show3_old(self, id=0):
""" geomview visualization old version
This function provides a complete ray tracing vsualization
of the channel structure. The rays are color coded as a fonction
of their energy.
Parameters
----------
id : int
index of filetra
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(id) + "_col.list",pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(Emax)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([1, 0, 0]) # red
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.1 * Emax) & (Es < 0.5 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 0, 1]) # blue
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.01 * Emax) & (Es < 0.1 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 1]) # cyan
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.001 * Emax) & (Es < 0.01 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 0]) # green
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where(Es < 0.001 * Emax)[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([1, 1, 0]) # yellow
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def show3(self, seuildb=100):
""" geomview vizualization
This function provides a complete ray tracing visualization
of the radio channel. Rays are color coded as a fonction of
their energy.
Parameters
----------
seuildb : float
default 100
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(seuildb) + "_col.list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(v)
db = 20 * np.log10(Es)
c = 1 - (db > -seuildb) * (db + seuildb) / seuildb
app = round(np.log10(Es / Emax))
lw = app - min(app)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([c[i], c[i], c[i]])
l = int(lw[i])
fileray = r.show3(False, False, col, j, l)
#fileray =r.show3(False,False,col,j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
class ScalChannel(object):
"""
DEPRECATED
ScalChannel Class :
The ScalChannel is obtained from combination of the propagation
channel and the antenna transfer function from both transmitting
and receiving antennas
Members
-------
H : FUDSignal
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tauk :
delay ray k in ns
"""
def __init__(self, VC, Ftt, Ftp, Frt, Frp):
self.Ftt = Ftt
self.Ftp = Ftp
self.Frt = Frt
self.Frp = Frp
t1 = VC.Ctt * Frt + VC.Cpt * Frp
t2 = VC.Ctp * Frt + VC.Cpp * Frp
t3 = t1 * Ftt + t2 * Ftp
self.dod = VC.tang
self.doa = VC.rang
self.tau = VC.tauk
self.H = bs.FUDsignal(t3.x, t3.y, VC.tauk)
# thresholding of rays
if (VC.nray > 1):
indices = self.H.enthrsh()
self.dod = self.dod[indices, :]
self.doa = self.doa[indices, :]
self.tau = self.tau[indices, :]
def info(self):
""" display information
"""
#print 'Ftt,Ftp,Frt,Frp'
#print 'dod,doa,tau'
#print 'H - FUDsignal '
print ('tau min , tau max :', min(self.tau), max(self.tau))
self.H.info()
def imshow(self):
""" imshow vizualization of H
"""
self.H
sh = np.shape(self.H.y)
itau = np.arange(len(self.tau))
plt.imshow(abs(self.H.y))
plt.show()
def apply(self, W):
""" Apply a FUsignal W to the ScalChannel.
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Notes
-----
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the ScalChannei, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUDsignal H
"""
H = self.H
U = H * W
V = bs.FUDsignal(U.x, U.y, H.tau0)
return(V)
def applywavC(self, w, dxw):
""" apply waveform method C
Parameters
----------
w :
waveform
dxw
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
Parameters
----------
Wgam :
waveform including gamma factor
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
#
# return a FUDsignal
#
Y = self.apply(Wgam)
#ri = Y.ft1(500,0)
# Le fftshift est activé
ri = Y.ft1(500, 1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
"""
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0, 'natural')
ri.translate(-Tw)
return(ri)
def doddoa(self):
""" doddoa() : DoD / DoA diagram
"""
dod = self.dod
doa = self.doa
#
#col = 1 - (10*np.log10(Etot)-Emin)/(Emax-Emin)
Etot = self.H.energy()
Etot = Etot / max(Etot)
al = 180 / np.pi
col = 10 * np.log10(Etot)
print (len(dod[:, 0]), len(dod[:, 1]), len(col[:]))
plt.subplot(121)
plt.scatter(dod[:, 0] * al, dod[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
a = colorbar()
#a.set_label('dB')
plt.xlabel("$\\theta_t(\degree)$", fontsize=18)
plt.ylabel('$\phi_t(\degree)$', fontsize=18)
title('DoD')
plt.subplot(122)
plt.scatter(doa[:, 0] * al, doa[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
b = colorbar()
b.set_label('dB')
plt.title('DoA')
plt.xlabel("$\\theta_r(\degree)$", fontsize=18)
plt.ylabel("$\phi_r (\degree)$", fontsize=18)
plt.show()
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
#r.require('graphics')
#r.postscript('fig.eps')
#r('par(mfrow=c(2,2))')
#Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
#r.dev_off()
#os.system("gv fig.eps ")
#y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k, :])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print (" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print (" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# on s'arrange que Hk.x[0]==Wk.x[0]
# if Wk.x[0]!=Hk.x[0]:
# x=arange(Wk.x[0],Hk.x[0],dx)
# if Hk.x[0]!=x[0]:
# Hk.x=hstack((x,Hk.x[1:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
# else:
# Hk.x=hstack((x,Hk.x[0:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
#
self.Hk = Hk
self.Wk = Wk
Rk = Hk * Wk
self.Rk = Rk
rk = Rk.iftshift()
plot(rk.x, rk.y, col)
return(rk)
class VectLOS(Ctilde):
def __init__(self, d, fmin=2, fmax=11, Nf=180):
self.tauk = np.array([d / 0.3])
fGHz = np.linspace(fmin, fmax, Nf)
c1 = 1.0 / d * np.ones(len(fGHz))
c2 = zeros(len(fGHz))
c1.reshape(1, Nf)
c2.reshape(1, Nf)
self.freq = freq
self.Ctt = bs.FUsignal(fGHz, c1)
self.Ctp = bs.FUsignal(fGHz, c2)
self.Cpt = bs.FUsignal(fGHz, c2)
self.Cpp = bs.FUsignal(fGHz, c1)
self.tang = array([0])
self.rang = array([0])
self.nray = 1
def cir(self, wav):
""" Channel Impulse Response
Parameters
----------
wav :
"""
SCO = self.vec2scal()
ciro = SCO.applywavB(wav.sfg)
return(ciro)
| 1,100 | 245 | 49 |
a1aefec19956aaaf90961f63cc4d8d7af92c3124 | 3,653 | py | Python | src/exceptions.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | null | null | null | src/exceptions.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | 7 | 2016-08-12T15:12:43.000Z | 2020-06-07T03:19:13.000Z | src/exceptions.py | 211tbc/synthesis | 55b4dcb85b7a2ed5fbc46b1740c8ca0ab80248a4 | [
"Unlicense"
] | null | null | null | class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE. | 40.142857 | 93 | 0.667944 | class Error(Exception):
"""Base class for exceptions in this module."""
pass
class DuplicateXMLDocumentError(Exception):
def __init__(self, *args):
message = "Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2])
print(message)
self.message = message
class UndefinedXMLWriter(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class DatabaseAuthenticationError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class SoftwareCompatibilityError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class XSDError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DBLayerNotFoundError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class VPNFailure(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class FTPUploadFailureError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class KeyboardInterrupt(Error):
def __init__(self, *args):
print("Intercepted Keyboard Interupt")
class FileNotFoundError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DataFormatError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InvalidSSNError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class EthnicityPickNotFound(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE. | 1,340 | 173 | 720 |
102dc5f9ab25eb9842c8b7062094e4b118970be6 | 1,575 | py | Python | ledgertools/cli.py | point8/ledgertools | a9d5bdd928b9aefbf5aa920810a1c92630e8e694 | [
"MIT"
] | 1 | 2017-12-11T14:28:45.000Z | 2017-12-11T14:28:45.000Z | ledgertools/cli.py | point8/ledgertools | a9d5bdd928b9aefbf5aa920810a1c92630e8e694 | [
"MIT"
] | null | null | null | ledgertools/cli.py | point8/ledgertools | a9d5bdd928b9aefbf5aa920810a1c92630e8e694 | [
"MIT"
] | null | null | null | import json
import click
import pickle
from ledgertools.read import read_file
from ledgertools.version import __version__
@click.group(help='CLI tools for working with ledger')
@click.version_option(version=__version__, prog_name='Ledger Tools')
@cli.command(help='Import ledger style file')
@click.option('-f', '--file', 'in_file', help='Input file name', prompt='Input file name')
@click.option('-n', '--name', default='transactions.json', help='Output file name')
@click.option('-p', '--pickle', 'as_pickle', is_flag=True, help='Output as pickle file')
@click.option('--run-checks', 'run_checks', is_flag=True, help='Run standard checks on data')
@click.option('--stdout', is_flag=True, help='Output to stdout, supresses output files')
| 37.5 | 93 | 0.688889 | import json
import click
import pickle
from ledgertools.read import read_file
from ledgertools.version import __version__
@click.group(help='CLI tools for working with ledger')
@click.version_option(version=__version__, prog_name='Ledger Tools')
def cli():
pass
@cli.command(help='Import ledger style file')
@click.option('-f', '--file', 'in_file', help='Input file name', prompt='Input file name')
@click.option('-n', '--name', default='transactions.json', help='Output file name')
@click.option('-p', '--pickle', 'as_pickle', is_flag=True, help='Output as pickle file')
@click.option('--run-checks', 'run_checks', is_flag=True, help='Run standard checks on data')
@click.option('--stdout', is_flag=True, help='Output to stdout, supresses output files')
def read(in_file, name, as_pickle, run_checks, stdout):
click.secho(f'Reading input file: {in_file}', fg='green')
transactions = read_file(in_file, run_checks)
if stdout:
print(json.dumps(transactions, sort_keys=True, indent=4, ensure_ascii=False))
return 0
if as_pickle:
name = name.replace('json', 'pkl')
with open(name, 'wb') as out_file:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(transactions, out_file, pickle.HIGHEST_PROTOCOL)
else:
with open(name, 'w', encoding='utf-8') as out_file:
json.dump(transactions, out_file, sort_keys=True, indent=4, ensure_ascii=False)
if not stdout:
click.secho(f'Saving output to: {name}', fg='green')
def main():
cli()
| 764 | 0 | 67 |
9b41f598b01bc9858f8f7f6429f240ca5155b6dd | 2,258 | py | Python | reporting_tool.py | puneetjain-/Log-Analysis-Project | f71c947e02dac9238a9724c8046b90d47f86aaed | [
"MIT"
] | null | null | null | reporting_tool.py | puneetjain-/Log-Analysis-Project | f71c947e02dac9238a9724c8046b90d47f86aaed | [
"MIT"
] | null | null | null | reporting_tool.py | puneetjain-/Log-Analysis-Project | f71c947e02dac9238a9724c8046b90d47f86aaed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import psycopg2
import os
# Database Name
DB_NAME = "news"
# Filename
FILENAME = "log_analysis.txt"
# queries
first_query = "select title,views from view_article limit 3"
second_query = "select * from view_author"
third_query = "select * from view_error_log where percent_error > 1"
# to store results
first_query_dict = dict()
first_query_dict['title'] = """\n1. The 3 most popular articles \
of all time are:\n"""
second_query_dict = dict()
second_query_dict['title'] = """\n2. The most popular article \
authors of all time are:\n"""
third_query_dict = dict()
third_query_dict['title'] = """"\n3. Days with more than 1% of \
request that lead to an error:\n"""
def connect_db_get_query_result(query):
"""connects to DB and gets query results"""
db = psycopg2.connect(database=DB_NAME)
c = db.cursor()
c.execute(query)
results = c.fetchall()
db.close()
return results
def display_query_result(query_result):
"""prints reports generated from query"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' views'+'\n')
f.write(output)
print(output, end='')
f.close()
def display_request_error_result(query_result):
"""displays % of requests lead to errors"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' %'+'\n')
f.write(output)
print(output, end='')
f.close()
# main starts
if __name__ == "__main__":
print("Fetching the data from the Database...")
if os.path.isfile(FILENAME):
os.remove(FILENAME)
# stores query result
first_query_dict['results'] = connect_db_get_query_result(first_query)
second_query_dict['results'] = connect_db_get_query_result(second_query)
third_query_dict['results'] = connect_db_get_query_result(third_query)
# print formatted output
display_query_result(first_query_dict)
display_query_result(second_query_dict)
display_request_error_result(third_query_dict)
| 28.225 | 76 | 0.681577 | #!/usr/bin/env python3
import psycopg2
import os
# Database Name
DB_NAME = "news"
# Filename
FILENAME = "log_analysis.txt"
# queries
first_query = "select title,views from view_article limit 3"
second_query = "select * from view_author"
third_query = "select * from view_error_log where percent_error > 1"
# to store results
first_query_dict = dict()
first_query_dict['title'] = """\n1. The 3 most popular articles \
of all time are:\n"""
second_query_dict = dict()
second_query_dict['title'] = """\n2. The most popular article \
authors of all time are:\n"""
third_query_dict = dict()
third_query_dict['title'] = """"\n3. Days with more than 1% of \
request that lead to an error:\n"""
def connect_db_get_query_result(query):
"""connects to DB and gets query results"""
db = psycopg2.connect(database=DB_NAME)
c = db.cursor()
c.execute(query)
results = c.fetchall()
db.close()
return results
def display_query_result(query_result):
"""prints reports generated from query"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' views'+'\n')
f.write(output)
print(output, end='')
f.close()
def display_request_error_result(query_result):
"""displays % of requests lead to errors"""
print(query_result['title'])
f = open(FILENAME, 'a')
f.write(query_result['title'])
for result in query_result['results']:
output = ('\t'+str(result[0])+' ---> '+str(result[1])+' %'+'\n')
f.write(output)
print(output, end='')
f.close()
# main starts
if __name__ == "__main__":
print("Fetching the data from the Database...")
if os.path.isfile(FILENAME):
os.remove(FILENAME)
# stores query result
first_query_dict['results'] = connect_db_get_query_result(first_query)
second_query_dict['results'] = connect_db_get_query_result(second_query)
third_query_dict['results'] = connect_db_get_query_result(third_query)
# print formatted output
display_query_result(first_query_dict)
display_query_result(second_query_dict)
display_request_error_result(third_query_dict)
| 0 | 0 | 0 |
8ec17677a09da10d2e29124c3d63a04b022b71ba | 6,946 | py | Python | app/candidates/import_candidates.py | ACTtaiwan/CongressionalGuide | 36067ee16973a2adaf8450b827012c148ee65376 | [
"MIT"
] | 8 | 2016-05-19T03:19:02.000Z | 2019-03-12T18:53:58.000Z | app/candidates/import_candidates.py | ACTtaiwan/CongressionalGuide | 36067ee16973a2adaf8450b827012c148ee65376 | [
"MIT"
] | 49 | 2016-05-14T06:50:28.000Z | 2016-09-30T23:57:01.000Z | app/candidates/import_candidates.py | ACTtaiwan/CongressionalGuide | 36067ee16973a2adaf8450b827012c148ee65376 | [
"MIT"
] | 6 | 2016-07-23T19:20:58.000Z | 2019-08-13T22:12:10.000Z | #!/usr/bin/python
import sqlite3, json, os
import logging, sys
from collections import defaultdict
from nameparser import HumanName
#
# This script moves candidate information from filename.json into the sqlite3 database
#
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
#
# The order matter when we want to insert the value, current schema:
# CREATE TABLE candidates (firstName TEXT, lastName TEXT, prefix TEXT, suffix TEXT, party TEXT, chamber TEXT, state TEXT, district INTEGER, incumbent INTEGER, source TEXT, bioguideId TEXT PRIMARY KEY UNIQUE, fecId TEXT UNIQUE, website TEXT, email TEXT UNIQUE, facebook TEXT UNIQUE, twitter TEXT UNIQUE, youtube TEXT UNIQUE, img_src TEXT, questionnaire_response TEXT, gen_election_candidate INTEGER DEFAULT (0), duplicate INTEGER, candidate_url TEXT UNIQUE);
logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
dbpath = '../../db/db.sqlite3'
if not (dbpath and os.path.isfile(dbpath)):
print 'db file not found'
exit()
try:
db = sqlite3.connect(dbpath)
c = db.cursor()
except sqlite3.Error:
print 'sqlite3 error'
db.close()
#jsonpath = '/root/CongressionalGuide/app/candidates/import.json'
jsonpath = str(sys.argv[1])
if not (jsonpath and os.path.isfile(jsonpath)):
print 'candidates json file not found'
exit()
congressman = json.load(open(jsonpath))
# check first/last name pair
# if exists, update_query
# else insert_query
update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, gen_election_candidate = ?, incumbent = ?, district = ? where firstName like ? and lastName like ? and state = ?'
#update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, source = ?, gen_election_candidate = ?, incumbent = ? where firstName like ? and lastName like ? and state = ? and district = ?'
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_query = 'INSERT INTO candidates VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
for human in congressman:
firstName=(None,)
lastName=(None,)
prefix=(None,)
suffix=(None,)
party=(None,)
chamber=(None,)
state=(None,)
district=(0,)
incumbent=(None,)
bioguideId=(None,)
fecId=(None,)
source=('ballotpedia',)
website=(None,)
email=(None,)
facebook=(None,)
twitter=(None,)
youtube=(None,)
img_src=(None,)
questionnaire_response=(None,)
#TODO: NH primary election 9/13, their candidate will have null value here
gen_election_candidate=(None,)
candidate_url=(None,)
duplicate=(None,)
mesg=''
for k,v in human.iteritems():
mesg += '(k,v)=(' + k + ' ,' + str(v) + ')\n'
if k == 'name':
v = v.replace('%27','\'') #clean up scraped single quote issue
if v.endswith(')'): #handle name like 'Bill Otto (Missouri)'
lp = v.find('(')
v = v[:lp-1]
v = v.replace('%22','\"') #change nickname parenthesis to quotes
fullName = HumanName(v)
prefix = fullName.title,
if len(fullName.first) < 3: # if only 1st initial, then need to include middle name
firstName = fullName.first + ' ' + fullName.middle,
else:
firstName = fullName.first,
lastName = fullName.last,
suffix = fullName.suffix,
elif k == 'party':
party = v[0],
elif k == 'dist':
dl = [int(d) for d in v if d.isdigit()]
if len(dl) != 0:
district = int(''.join(map(str, dl))),
elif k == 'camp':
website = v,
elif k == 'twtr':
twitter = v[v.find('twitter.com')+len('twitter.com')+1:],
elif k == 'fb':
facebook = v,
elif k == 'state':
state = getStateAbbr(v),
elif k == 'pic':
img_src = v,
elif k == 'chamber':
chamber = v,
elif k == 'youtube':
youtube = v,
elif k == 'incumbent':
incumbent = v,
elif k == 'gen_election_candidate':
gen_election_candidate = v,
elif k == 'url':
candidate_url = v,
logging.debug(mesg)
match_firstName = '%'+firstName[0]+'%',
match_lastName = '%'+lastName[0]+'%',
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_values = (firstName + lastName + prefix + suffix + party + chamber + state + district + incumbent + source + bioguideId + fecId + website + email + facebook + twitter + youtube + img_src + questionnaire_response + gen_election_candidate + duplicate + candidate_url)
update_values = (candidate_url + img_src + facebook + twitter + website + youtube + gen_election_candidate + incumbent + district + match_firstName + match_lastName + state)
#update_values = (candidate_url + img_src + facebook + twitter + website + youtube + source + gen_election_candidate + incumbent + match_firstName + match_lastName + state + district)
# Match with existing Sunlight data: lastName, first word of firstName, state and district
# no district for senate
c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? ;', match_firstName + match_lastName + state )
#c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? and district = ?;', match_firstName + match_lastName + state + district)
obj = c.fetchone()
if obj[0]:
logging.info('update_values: %s', update_values)
c.execute(update_query, update_values)
else:
logging.info('insert_values: %s', insert_values)
c.execute(insert_query, insert_values)
logging.info('[OK]\n\n')
db.commit()
db.close()
| 34.216749 | 457 | 0.613591 | #!/usr/bin/python
import sqlite3, json, os
import logging, sys
from collections import defaultdict
from nameparser import HumanName
#
# This script moves candidate information from filename.json into the sqlite3 database
#
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
#
# The order matter when we want to insert the value, current schema:
# CREATE TABLE candidates (firstName TEXT, lastName TEXT, prefix TEXT, suffix TEXT, party TEXT, chamber TEXT, state TEXT, district INTEGER, incumbent INTEGER, source TEXT, bioguideId TEXT PRIMARY KEY UNIQUE, fecId TEXT UNIQUE, website TEXT, email TEXT UNIQUE, facebook TEXT UNIQUE, twitter TEXT UNIQUE, youtube TEXT UNIQUE, img_src TEXT, questionnaire_response TEXT, gen_election_candidate INTEGER DEFAULT (0), duplicate INTEGER, candidate_url TEXT UNIQUE);
logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
def getStateAbbr(s):
try:
return {
'Alabama' : 'AL',
'Montana' : 'MT',
'Alaska' : 'AK',
'Nebraska' : 'NE',
'Arizona' : 'AZ' ,
'Nevada' : 'NV',
'Arkansas' : 'AR' ,
'New Hampshire' : 'NH',
'California' : 'CA' ,
'New Jersey' : 'NJ',
'Colorado' : 'CO' ,
'New Mexico' : 'NM',
'Connecticut' : 'CT',
'New York' : 'NY',
'Delaware' : 'DE' ,
'North Carolina' : 'NC',
'Florida' : 'FL' ,
'North Dakota' : 'ND',
'Georgia' : 'GA' ,
'Ohio' : 'OH',
'Hawaii' : 'HI' ,
'Oklahoma' : 'OK',
'Idaho' : 'ID' ,
'Oregon' : 'OR',
'Illinois' : 'IL' ,
'Pennsylvania' : 'PA',
'Indiana' : 'IN' ,
'Rhode Island' : 'RI',
'Iowa' : 'IA' ,
'South Carolina' : 'SC',
'Kansas' : 'KS' ,
'South Dakota' : 'SD',
'Kentucky' : 'KY' ,
'Tennessee' : 'TN',
'Louisiana' : 'LA' ,
'Texas' : 'TX',
'Maine' : 'ME' ,
'Utah' : 'UT',
'Maryland' : 'MD' ,
'Vermont' : 'VT',
'Massachusetts' : 'MA' ,
'Virginia' : 'VA',
'Michigan' : 'MI' ,
'Washington' : 'WA',
'Minnesota' : 'MN' ,
'West Virginia' : 'WV',
'Mississippi' : 'MS' ,
'Wisconsin' : 'WI',
'Missouri' : 'MO' ,
'Wyoming' : 'WY',
}[s]
except:
print 'key ' + s + ' not found!'
return None
dbpath = '../../db/db.sqlite3'
if not (dbpath and os.path.isfile(dbpath)):
print 'db file not found'
exit()
try:
db = sqlite3.connect(dbpath)
c = db.cursor()
except sqlite3.Error:
print 'sqlite3 error'
db.close()
#jsonpath = '/root/CongressionalGuide/app/candidates/import.json'
jsonpath = str(sys.argv[1])
if not (jsonpath and os.path.isfile(jsonpath)):
print 'candidates json file not found'
exit()
congressman = json.load(open(jsonpath))
# check first/last name pair
# if exists, update_query
# else insert_query
update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, gen_election_candidate = ?, incumbent = ?, district = ? where firstName like ? and lastName like ? and state = ?'
#update_query = 'UPDATE candidates SET candidate_url = ?, img_src = ?, facebook = ?, twitter = ?, website = ?, youtube = ?, source = ?, gen_election_candidate = ?, incumbent = ? where firstName like ? and lastName like ? and state = ? and district = ?'
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_query = 'INSERT INTO candidates VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
for human in congressman:
firstName=(None,)
lastName=(None,)
prefix=(None,)
suffix=(None,)
party=(None,)
chamber=(None,)
state=(None,)
district=(0,)
incumbent=(None,)
bioguideId=(None,)
fecId=(None,)
source=('ballotpedia',)
website=(None,)
email=(None,)
facebook=(None,)
twitter=(None,)
youtube=(None,)
img_src=(None,)
questionnaire_response=(None,)
#TODO: NH primary election 9/13, their candidate will have null value here
gen_election_candidate=(None,)
candidate_url=(None,)
duplicate=(None,)
mesg=''
for k,v in human.iteritems():
mesg += '(k,v)=(' + k + ' ,' + str(v) + ')\n'
if k == 'name':
v = v.replace('%27','\'') #clean up scraped single quote issue
if v.endswith(')'): #handle name like 'Bill Otto (Missouri)'
lp = v.find('(')
v = v[:lp-1]
v = v.replace('%22','\"') #change nickname parenthesis to quotes
fullName = HumanName(v)
prefix = fullName.title,
if len(fullName.first) < 3: # if only 1st initial, then need to include middle name
firstName = fullName.first + ' ' + fullName.middle,
else:
firstName = fullName.first,
lastName = fullName.last,
suffix = fullName.suffix,
elif k == 'party':
party = v[0],
elif k == 'dist':
dl = [int(d) for d in v if d.isdigit()]
if len(dl) != 0:
district = int(''.join(map(str, dl))),
elif k == 'camp':
website = v,
elif k == 'twtr':
twitter = v[v.find('twitter.com')+len('twitter.com')+1:],
elif k == 'fb':
facebook = v,
elif k == 'state':
state = getStateAbbr(v),
elif k == 'pic':
img_src = v,
elif k == 'chamber':
chamber = v,
elif k == 'youtube':
youtube = v,
elif k == 'incumbent':
incumbent = v,
elif k == 'gen_election_candidate':
gen_election_candidate = v,
elif k == 'url':
candidate_url = v,
logging.debug(mesg)
match_firstName = '%'+firstName[0]+'%',
match_lastName = '%'+lastName[0]+'%',
# !!! UPDATE HERE WHENEVER THE DATABASE TABLE SCHEMA CHANGE !!!
insert_values = (firstName + lastName + prefix + suffix + party + chamber + state + district + incumbent + source + bioguideId + fecId + website + email + facebook + twitter + youtube + img_src + questionnaire_response + gen_election_candidate + duplicate + candidate_url)
update_values = (candidate_url + img_src + facebook + twitter + website + youtube + gen_election_candidate + incumbent + district + match_firstName + match_lastName + state)
#update_values = (candidate_url + img_src + facebook + twitter + website + youtube + source + gen_election_candidate + incumbent + match_firstName + match_lastName + state + district)
# Match with existing Sunlight data: lastName, first word of firstName, state and district
# no district for senate
c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? ;', match_firstName + match_lastName + state )
#c.execute('SELECT count(*) FROM candidates where firstName like ? and lastName like ? and state = ? and district = ?;', match_firstName + match_lastName + state + district)
obj = c.fetchone()
if obj[0]:
logging.info('update_values: %s', update_values)
c.execute(update_query, update_values)
else:
logging.info('insert_values: %s', insert_values)
c.execute(insert_query, insert_values)
logging.info('[OK]\n\n')
db.commit()
db.close()
| 1,386 | 0 | 23 |
14c8d4b4b416904d5134728fa537b9d0cee1c26c | 1,078 | py | Python | Python/seven_kyu/all_non_consecutive.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | 1 | 2019-12-20T04:09:56.000Z | 2019-12-20T04:09:56.000Z | Python/seven_kyu/all_non_consecutive.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | Python/seven_kyu/all_non_consecutive.py | Brokenshire/codewars-projects | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | [
"Apache-2.0"
] | null | null | null | # Python solution for 'Find all non-consecutive numbers' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS AND ARRAYS.
# Author: Jack Brokenshire
# Date: 05/08/2020
import unittest
def all_non_consecutive(arr):
"""
Find all the elements of an array that are non consecutive. A number is non consecutive if it is not exactly one
larger than the previous element in the array. The first element gets a pass and is never considered non consecutive.
:param arr: An array of integers.
:return: The results as an array of objects with two values i: <the index of the non-consecutive number> and n:
<the non-consecutive number>.
"""
return [{'i': i + 1, 'n': arr[i + 1]} for i in range(len(arr) - 1) if arr[i] + 1 != arr[i + 1]]
class TestAllNonConsecutive(unittest.TestCase):
"""Class to test 'all_non_consecutive' function"""
if __name__ == "__main__":
unittest.main()
| 35.933333 | 121 | 0.671614 | # Python solution for 'Find all non-consecutive numbers' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS AND ARRAYS.
# Author: Jack Brokenshire
# Date: 05/08/2020
import unittest
def all_non_consecutive(arr):
"""
Find all the elements of an array that are non consecutive. A number is non consecutive if it is not exactly one
larger than the previous element in the array. The first element gets a pass and is never considered non consecutive.
:param arr: An array of integers.
:return: The results as an array of objects with two values i: <the index of the non-consecutive number> and n:
<the non-consecutive number>.
"""
return [{'i': i + 1, 'n': arr[i + 1]} for i in range(len(arr) - 1) if arr[i] + 1 != arr[i + 1]]
class TestAllNonConsecutive(unittest.TestCase):
"""Class to test 'all_non_consecutive' function"""
def test_all_non_consecutive(self):
self.assertEqual(all_non_consecutive([1, 2, 3, 4, 6, 7, 8, 10]), [{'i': 4, 'n': 6}, {'i': 7, 'n': 10}])
if __name__ == "__main__":
unittest.main()
| 126 | 0 | 27 |
4a3fcf726ba789d5e6951d4c26522134096eb07d | 10,931 | py | Python | neutron/tests/unit/agent/linux/test_tc_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 1 | 2017-09-10T09:57:35.000Z | 2017-09-10T09:57:35.000Z | neutron/tests/unit/agent/linux/test_tc_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/tests/unit/agent/linux/test_tc_lib.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import tc_lib
from neutron.services.qos import qos_consts
from neutron.tests import base
DEVICE_NAME = "tap_device"
KERNEL_HZ_VALUE = 1000
BW_LIMIT = 2000 # [kbps]
BURST = 100 # [kbit]
LATENCY = 50 # [ms]
TC_QDISC_OUTPUT = (
'qdisc tbf 8011: root refcnt 2 rate %(bw)skbit burst %(burst)skbit '
'lat 50.0ms \n') % {'bw': BW_LIMIT, 'burst': BURST}
TC_FILTERS_OUTPUT = (
'filter protocol all pref 49152 u32 \nfilter protocol all pref '
'49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh '
'800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n '
'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n'
'drop overhead 0b \n ref 1 bind 1'
) % {'bw': BW_LIMIT, 'burst': BURST}
| 35.839344 | 79 | 0.620986 | # Copyright 2016 OVH SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import tc_lib
from neutron.services.qos import qos_consts
from neutron.tests import base
DEVICE_NAME = "tap_device"
KERNEL_HZ_VALUE = 1000
BW_LIMIT = 2000 # [kbps]
BURST = 100 # [kbit]
LATENCY = 50 # [ms]
TC_QDISC_OUTPUT = (
'qdisc tbf 8011: root refcnt 2 rate %(bw)skbit burst %(burst)skbit '
'lat 50.0ms \n') % {'bw': BW_LIMIT, 'burst': BURST}
TC_FILTERS_OUTPUT = (
'filter protocol all pref 49152 u32 \nfilter protocol all pref '
'49152 u32 fh 800: ht divisor 1 \nfilter protocol all pref 49152 u32 fh '
'800::800 order 2048 key ht 800 \n match 00000000/00000000 at 0\n '
'police 0x1e rate %(bw)skbit burst %(burst)skbit mtu 2Kb action \n'
'drop overhead 0b \n ref 1 bind 1'
) % {'bw': BW_LIMIT, 'burst': BURST}
class BaseUnitConversionTest(object):
def test_convert_to_kilobits_bare_value(self):
value = "1000"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bytes_value(self):
value = "1000b"
expected_value = 8 # kbit
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_bits_value(self):
value = "1000bit"
expected_value = tc_lib.bits_to_kilobits(1000, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabytes_value(self):
value = "1m"
expected_value = tc_lib.bits_to_kilobits(
self.base_unit ** 2 * 8, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_kilobits_megabits_value(self):
value = "1mbit"
expected_value = tc_lib.bits_to_kilobits(
self.base_unit ** 2, self.base_unit)
self.assertEqual(
expected_value,
tc_lib.convert_to_kilobits(value, self.base_unit)
)
def test_convert_to_bytes_wrong_unit(self):
value = "1Zbit"
self.assertRaises(
tc_lib.InvalidUnit,
tc_lib.convert_to_kilobits, value, self.base_unit
)
def test_bytes_to_bits(self):
test_values = [
(0, 0), # 0 bytes should be 0 bits
(1, 8) # 1 byte should be 8 bits
]
for input_bytes, expected_bits in test_values:
self.assertEqual(
expected_bits, tc_lib.bytes_to_bits(input_bytes)
)
class TestSIUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = tc_lib.SI_BASE
def test_bits_to_kilobits(self):
test_values = [
(0, 0), # 0 bites should be 0 kilobites
(1, 1), # 1 bit should be 1 kilobit
(999, 1), # 999 bits should be 1 kilobit
(1000, 1), # 1000 bits should be 1 kilobit
(1001, 2) # 1001 bits should be 2 kilobits
]
for input_bits, expected_kilobits in test_values:
self.assertEqual(
expected_kilobits,
tc_lib.bits_to_kilobits(input_bits, self.base_unit)
)
class TestIECUnitConversions(BaseUnitConversionTest, base.BaseTestCase):
base_unit = tc_lib.IEC_BASE
def test_bits_to_kilobits(self):
test_values = [
(0, 0), # 0 bites should be 0 kilobites
(1, 1), # 1 bit should be 1 kilobit
(1023, 1), # 1023 bits should be 1 kilobit
(1024, 1), # 1024 bits should be 1 kilobit
(1025, 2) # 1025 bits should be 2 kilobits
]
for input_bits, expected_kilobits in test_values:
self.assertEqual(
expected_kilobits,
tc_lib.bits_to_kilobits(input_bits, self.base_unit)
)
class TestTcCommand(base.BaseTestCase):
def setUp(self):
super(TestTcCommand, self).setUp()
self.tc = tc_lib.TcCommand(DEVICE_NAME, KERNEL_HZ_VALUE)
self.bw_limit = "%s%s" % (BW_LIMIT, tc_lib.BW_LIMIT_UNIT)
self.burst = "%s%s" % (BURST, tc_lib.BURST_UNIT)
self.latency = "%s%s" % (LATENCY, tc_lib.LATENCY_UNIT)
self.execute = mock.patch('neutron.agent.common.utils.execute').start()
def test_check_kernel_hz_lower_then_zero(self):
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, 0
)
self.assertRaises(
tc_lib.InvalidKernelHzValue,
tc_lib.TcCommand, DEVICE_NAME, -100
)
def test_get_filters_bw_limits(self):
self.execute.return_value = TC_FILTERS_OUTPUT
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertEqual(BW_LIMIT, bw_limit)
self.assertEqual(BURST, burst_limit)
def test_get_filters_bw_limits_when_output_not_match(self):
output = (
"Some different "
"output from command:"
"tc filters show dev XXX parent ffff:"
)
self.execute.return_value = output
bw_limit, burst_limit = self.tc.get_filters_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_filters_bw_limits_when_wrong_units(self):
output = TC_FILTERS_OUTPUT.replace("kbit", "Xbit")
self.execute.return_value = output
self.assertRaises(tc_lib.InvalidUnit, self.tc.get_filters_bw_limits)
def test_get_tbf_bw_limits(self):
self.execute.return_value = TC_QDISC_OUTPUT
bw_limit, burst_limit = self.tc.get_tbf_bw_limits()
self.assertEqual(BW_LIMIT, bw_limit)
self.assertEqual(BURST, burst_limit)
def test_get_tbf_bw_limits_when_wrong_qdisc(self):
output = TC_QDISC_OUTPUT.replace("tbf", "different_qdisc")
self.execute.return_value = output
bw_limit, burst_limit = self.tc.get_tbf_bw_limits()
self.assertIsNone(bw_limit)
self.assertIsNone(burst_limit)
def test_get_tbf_bw_limits_when_wrong_units(self):
output = TC_QDISC_OUTPUT.replace("kbit", "Xbit")
self.execute.return_value = output
self.assertRaises(tc_lib.InvalidUnit, self.tc.get_tbf_bw_limits)
def test_set_tbf_bw_limit(self):
self.tc.set_tbf_bw_limit(BW_LIMIT, BURST, LATENCY)
self.execute.assert_called_once_with(
["tc", "qdisc", "replace", "dev", DEVICE_NAME,
"root", "tbf", "rate", self.bw_limit,
"latency", self.latency,
"burst", self.burst],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=None
)
def test_update_filters_bw_limit(self):
self.tc.update_filters_bw_limit(BW_LIMIT, BURST)
self.execute.assert_has_calls([
mock.call(
["tc", "qdisc", "del", "dev", DEVICE_NAME, "ingress"],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=[2]
),
mock.call(
['tc', 'qdisc', 'add', 'dev', DEVICE_NAME, "ingress",
"handle", tc_lib.INGRESS_QDISC_ID],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=None
),
mock.call(
['tc', 'filter', 'add', 'dev', DEVICE_NAME,
'parent', tc_lib.INGRESS_QDISC_ID, 'protocol', 'all',
'prio', '49', 'basic', 'police',
'rate', self.bw_limit,
'burst', self.burst,
'mtu', tc_lib.MAX_MTU_VALUE,
'drop'],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=None
)]
)
def test_update_tbf_bw_limit(self):
self.tc.update_tbf_bw_limit(BW_LIMIT, BURST, LATENCY)
self.execute.assert_called_once_with(
["tc", "qdisc", "replace", "dev", DEVICE_NAME,
"root", "tbf", "rate", self.bw_limit,
"latency", self.latency,
"burst", self.burst],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=None
)
def test_delete_filters_bw_limit(self):
self.tc.delete_filters_bw_limit()
self.execute.assert_called_once_with(
["tc", "qdisc", "del", "dev", DEVICE_NAME, "ingress"],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=[2]
)
def test_delete_tbf_bw_limit(self):
self.tc.delete_tbf_bw_limit()
self.execute.assert_called_once_with(
["tc", "qdisc", "del", "dev", DEVICE_NAME, "root"],
run_as_root=True,
check_exit_code=True,
log_fail_as_error=True,
extra_ok_codes=[2]
)
def test_get_ingress_qdisc_burst_value_burst_not_none(self):
self.assertEqual(
BURST, self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, BURST)
)
def test_get_ingress_qdisc_burst_no_burst_value_given(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, None)
)
def test_get_ingress_qdisc_burst_burst_value_zero(self):
expected_burst = BW_LIMIT * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst,
self.tc.get_ingress_qdisc_burst_value(BW_LIMIT, 0)
)
def test__get_tbf_burst_value_when_burst_bigger_then_minimal(self):
result = self.tc._get_tbf_burst_value(BW_LIMIT, BURST)
self.assertEqual(BURST, result)
def test__get_tbf_burst_value_when_burst_smaller_then_minimal(self):
result = self.tc._get_tbf_burst_value(BW_LIMIT, 0)
self.assertEqual(2, result)
| 8,494 | 254 | 766 |
0d6bd2a989a8554332881f3ec9c0c17bb5907c5b | 1,996 | py | Python | study_tool/entities/card_attribute_box.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | study_tool/entities/card_attribute_box.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | study_tool/entities/card_attribute_box.py | cubeman99/russian-study-tool | b073df4694f1ad064a780088cdcb1436e1bde7e9 | [
"MIT"
] | null | null | null | import pygame
import cmg
from cmg.color import Colors
from cmg.color import Color
from cmg import math
from study_tool import card_attributes
from study_tool.card import Card
from study_tool.card_attributes import *
from study_tool.entities.entity import Entity
from study_tool.russian.word import AccentedText
from study_tool.russian.word import Word
class CardAttributeBox(Entity):
"""
A box with a word in it that can reference a card.
"""
def __init__(self, attribute, short=False, font=None):
"""Entity constructor."""
super().__init__()
self.__attribute = CardAttributes(attribute)
self.__short = short
self.__text = ""
self.__font = font
if self.__font is None:
self.__font = cmg.Font(24)
self.__padding = cmg.Vec2(8, 6)
def on_create(self):
"""Called when the entity is created."""
if self.__short:
self.__text = self.__attribute.value
else:
self.__text = card_attributes.get_card_attribute_display_name(
self.__attribute)
self.set_size(self.__font.measure(self.__text) + (self.__padding * 2))
def update(self, dt):
"""Updates the entity."""
def draw(self, g):
"""Draws the entity."""
# Determine colors
text_color = Colors.WHITE
background_color = Colors.BLACK
if self.__attribute in card_attributes.ATTRIBUTE_COLORS:
background_color = card_attributes.ATTRIBUTE_COLORS[self.__attribute]
# Draw the background
g.fill_rect(self.get_rect(),
color=background_color)
# Draw the text
g.draw_accented_text(self.get_center().x,
self.get_center().y,
text=self.__text,
font=self.__font,
color=text_color,
align=cmg.Align.Centered)
| 32.721311 | 81 | 0.604208 | import pygame
import cmg
from cmg.color import Colors
from cmg.color import Color
from cmg import math
from study_tool import card_attributes
from study_tool.card import Card
from study_tool.card_attributes import *
from study_tool.entities.entity import Entity
from study_tool.russian.word import AccentedText
from study_tool.russian.word import Word
class CardAttributeBox(Entity):
"""
A box with a word in it that can reference a card.
"""
def __init__(self, attribute, short=False, font=None):
"""Entity constructor."""
super().__init__()
self.__attribute = CardAttributes(attribute)
self.__short = short
self.__text = ""
self.__font = font
if self.__font is None:
self.__font = cmg.Font(24)
self.__padding = cmg.Vec2(8, 6)
def on_create(self):
"""Called when the entity is created."""
if self.__short:
self.__text = self.__attribute.value
else:
self.__text = card_attributes.get_card_attribute_display_name(
self.__attribute)
self.set_size(self.__font.measure(self.__text) + (self.__padding * 2))
def update(self, dt):
"""Updates the entity."""
def draw(self, g):
"""Draws the entity."""
# Determine colors
text_color = Colors.WHITE
background_color = Colors.BLACK
if self.__attribute in card_attributes.ATTRIBUTE_COLORS:
background_color = card_attributes.ATTRIBUTE_COLORS[self.__attribute]
# Draw the background
g.fill_rect(self.get_rect(),
color=background_color)
# Draw the text
g.draw_accented_text(self.get_center().x,
self.get_center().y,
text=self.__text,
font=self.__font,
color=text_color,
align=cmg.Align.Centered)
| 0 | 0 | 0 |
3813dda62f9c23200cb306784d23d1c106fcdddf | 3,299 | py | Python | nas4candle/nasapi/search/nas/ppo_a3c_sync.py | scrlnas2019/nas4candle | 318959424cc66819c816054a87bd1cb5d426e2e7 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T04:03:00.000Z | 2021-01-22T04:03:00.000Z | nas4candle/nasapi/search/nas/ppo_a3c_sync.py | scrlnas2019/nas4candle | 318959424cc66819c816054a87bd1cb5d426e2e7 | [
"BSD-3-Clause"
] | 1 | 2021-01-23T00:14:17.000Z | 2021-01-23T00:14:17.000Z | nas4candle/nasapi/search/nas/ppo_a3c_sync.py | scrlnas2019/nas4candle | 318959424cc66819c816054a87bd1cb5d426e2e7 | [
"BSD-3-Clause"
] | 2 | 2019-11-27T04:42:00.000Z | 2021-01-22T04:06:59.000Z | import os
import json
from pprint import pprint, pformat
from mpi4py import MPI
import math
from nas4candle.nasapi.evaluator import Evaluator
from nas4candle.nasapi.search import util, Search
from nas4candle.nasapi.search.nas.agent import nas_ppo_sync_a3c
logger = util.conf_logger('nas4candle.nasapi.search.nas.ppo_a3c_sync')
LAUNCHER_NODES = int(os.environ.get('BALSAM_LAUNCHER_NODES', 1))
WORKERS_PER_NODE = int(os.environ.get('nas4candle.nasapi_WORKERS_PER_NODE', 1))
class NasPPOSyncA3C(Search):
"""Neural Architecture search using proximal policy gradient with synchronous optimization.
"""
@staticmethod
if __name__ == "__main__":
args = NasPPOSyncA3C.parse_args()
search = NasPPOSyncA3C(**vars(args))
search.main()
| 35.473118 | 126 | 0.656259 | import os
import json
from pprint import pprint, pformat
from mpi4py import MPI
import math
from nas4candle.nasapi.evaluator import Evaluator
from nas4candle.nasapi.search import util, Search
from nas4candle.nasapi.search.nas.agent import nas_ppo_sync_a3c
logger = util.conf_logger('nas4candle.nasapi.search.nas.ppo_a3c_sync')
def print_logs(runner):
logger.debug('num_episodes = {}'.format(runner.global_episode))
logger.debug(' workers = {}'.format(runner.workers))
def key(d):
return json.dumps(dict(arch_seq=d['arch_seq']))
LAUNCHER_NODES = int(os.environ.get('BALSAM_LAUNCHER_NODES', 1))
WORKERS_PER_NODE = int(os.environ.get('nas4candle.nasapi_WORKERS_PER_NODE', 1))
class NasPPOSyncA3C(Search):
"""Neural Architecture search using proximal policy gradient with synchronous optimization.
"""
def __init__(self, problem, run, evaluator, **kwargs):
self.rank = MPI.COMM_WORLD.Get_rank()
if self.rank == 0:
super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
MPI.COMM_WORLD.Barrier()
if self.rank != 0:
super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
# set in super : self.problem
# set in super : self.run_func
# set in super : self.evaluator
self.num_episodes = kwargs.get('num_episodes')
if self.num_episodes is None:
self.num_episodes = math.inf
self.reward_rule = util.load_attr_from('nas4candle.nasapi.search.nas.agent.utils.'+kwargs['reward_rule'])
self.space = self.problem.space
logger.debug(f'evaluator: {type(self.evaluator)}')
self.num_agents = MPI.COMM_WORLD.Get_size()
logger.debug(f'num_agents: {self.num_agents}')
logger.debug(f'rank: {self.rank}')
@staticmethod
def _extend_parser(parser):
parser.add_argument('--num-episodes', type=int, default=None,
help='maximum number of episodes')
parser.add_argument('--reward-rule', type=str,
default='reward_for_final_timestep',
choices=[
'reward_for_all_timesteps',
'reward_for_final_timestep'
],
help='A function which describe how to spread the episodic reward on all timesteps of the corresponding episode.')
return parser
def main(self):
# Settings
num_nodes = LAUNCHER_NODES * WORKERS_PER_NODE
if num_nodes > self.num_agents:
num_episodes_per_batch = (num_nodes-self.num_agents)//self.num_agents
else:
num_episodes_per_batch = 1
if self.rank == 0:
logger.debug(f'<Rank={self.rank}> num_nodes: {num_nodes}')
logger.debug(f'<Rank={self.rank}> num_episodes_per_batch: {num_episodes_per_batch}')
logger.debug(f'<Rank={self.rank}> starting training...')
nas_ppo_sync_a3c.train(
num_episodes=self.num_episodes,
seed=2018,
space=self.problem.space,
evaluator=self.evaluator,
num_episodes_per_batch=num_episodes_per_batch,
reward_rule=self.reward_rule
)
if __name__ == "__main__":
args = NasPPOSyncA3C.parse_args()
search = NasPPOSyncA3C(**vars(args))
search.main()
| 2,419 | 0 | 126 |
a0ab1f198b124e98f2a002b281b03ad14a7212be | 2,591 | py | Python | lxmltest.py | kendalled/PythonScripts | 9c2747fa27066e5f02c98e3cb7aca578e9e51ff1 | [
"MIT"
] | null | null | null | lxmltest.py | kendalled/PythonScripts | 9c2747fa27066e5f02c98e3cb7aca578e9e51ff1 | [
"MIT"
] | null | null | null | lxmltest.py | kendalled/PythonScripts | 9c2747fa27066e5f02c98e3cb7aca578e9e51ff1 | [
"MIT"
] | null | null | null | # GetScraped V2.5.1
# github.com/kendalled
### possible regexp: [^\s@<>]+@[^\s@<>]+\.[^\s@<>]+
### Backup regexp: '[\w.]+@[\w.]+'
import requests
import re
import unicodecsv as csv
import pandas as pd
# Negative Email Endings
#TODO: remove %20 from beginning
negatives = ['domain.net','group.calendar.google','youremail.com','sample.com','yoursite.com','internet.com','companysite.com','sentry.io','domain.xxx','sentry.wixpress.com', 'example.com', 'domain.com', 'address.com', 'xxx.xxx', 'email.com', 'yourdomain.com']
# Reads website column, initializes counter variable
df = pd.read_csv('./Arab.csv')
urls = list(dict.fromkeys(df['website']))
counter = 0
final_list = []
print_list = []
# Set Response Headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
if __name__ == "__main__":
for link in urls:
print(link)
email = get_email(link)
if(email):
for mail in [elem.lower() for elem in email]:
final_list.append(mail)
counter += len(email)
if(counter >= 2001):
break
print('------------------------')
print(str(counter) + ' Email(s) found so far.')
print('------------------------')
with open('Anaheim-CA-Emails.csv', 'wb') as csvfile:
final_list = list(set(final_list))
for i in final_list:
print_list.append({'email': i})
fieldnames = ['email']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
for data in print_list:
writer.writerow(data)
print('File written!')
| 28.163043 | 260 | 0.580471 | # GetScraped V2.5.1
# github.com/kendalled
### possible regexp: [^\s@<>]+@[^\s@<>]+\.[^\s@<>]+
### Backup regexp: '[\w.]+@[\w.]+'
import requests
import re
import unicodecsv as csv
import pandas as pd
# Negative Email Endings
#TODO: remove %20 from beginning
negatives = ['domain.net','group.calendar.google','youremail.com','sample.com','yoursite.com','internet.com','companysite.com','sentry.io','domain.xxx','sentry.wixpress.com', 'example.com', 'domain.com', 'address.com', 'xxx.xxx', 'email.com', 'yourdomain.com']
# Reads website column, initializes counter variable
df = pd.read_csv('./Arab.csv')
urls = list(dict.fromkeys(df['website']))
counter = 0
final_list = []
print_list = []
# Set Response Headers
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
def get_email(url):
# Filtering Function
def filter_func(x):
ind = x.find('@')+1
print('filtering...')
return not (x[ind:] in negatives)
# Get HTML, regexp match, filter out bad emails
try:
site = requests.get(url, verify=True, headers=headers, timeout=(2, 2)).content.decode()
possible_emails = re.findall(r'[A-Za-z0-9._%+-]{3,}@[a-z]{3,}\.[a-z]{2,}(?:\.[a-z]{2,})?', site)
print('Fetched Web Page.\n')
res = list(set(filter(filter_func,possible_emails)))
# Fail case 1
except:
print('Web Page Not Found. Deleting...')
return []
# Fail case 2
if(not res):
print('No Emails Found. Deleting...')
return []
# Success
else:
print('Emails:\n')
print(res)
return res
# Backup Fail case
return []
if __name__ == "__main__":
for link in urls:
print(link)
email = get_email(link)
if(email):
for mail in [elem.lower() for elem in email]:
final_list.append(mail)
counter += len(email)
if(counter >= 2001):
break
print('------------------------')
print(str(counter) + ' Email(s) found so far.')
print('------------------------')
with open('Anaheim-CA-Emails.csv', 'wb') as csvfile:
final_list = list(set(final_list))
for i in final_list:
print_list.append({'email': i})
fieldnames = ['email']
writer = csv.DictWriter(csvfile, fieldnames = fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
for data in print_list:
writer.writerow(data)
print('File written!')
| 837 | 0 | 23 |
4c375fcbd231d24a6a5f99e8b64c2cecd0001ebc | 18,909 | py | Python | SHADE_Constraints_Optimization/constrained_problem_set.py | Xuyang-Huang/SHADE-python | 0be096e8d420e71c37ae9ceb458d2b7d0d6ba436 | [
"MIT"
] | 5 | 2021-03-21T10:34:10.000Z | 2021-06-09T03:59:58.000Z | SHADE_Constraints_Optimization/constrained_problem_set.py | Xuyang-Huang/SHADE-python | 0be096e8d420e71c37ae9ceb458d2b7d0d6ba436 | [
"MIT"
] | null | null | null | SHADE_Constraints_Optimization/constrained_problem_set.py | Xuyang-Huang/SHADE-python | 0be096e8d420e71c37ae9ceb458d2b7d0d6ba436 | [
"MIT"
] | null | null | null | import numpy as np
| 33.826476 | 243 | 0.432017 | import numpy as np
class Problem:
def __init__(self, problem_num):
assert problem_num != 0, 'Please start from problem 1'
if problem_num == 4: # RC04
self.problem = ReactorNetworkDesign()
elif problem_num == 11: # RC11
self.problem = TwoReactorProblem()
elif problem_num == 1: # RC01
self.problem = HeatExchangerNetworkDesign1()
elif problem_num == 2: # RC02
self.problem = HeatExchangerNetworkDesign2()
elif problem_num == 5: # RC05
self.problem = HaverlysPoolingProblem()
elif problem_num == 3: # RC03
self.problem = OptimalOperationOfAlkylationUnit()
elif problem_num == 14: # RC14
self.problem = MultiProductBatchPlant()
elif problem_num == 15: # RC15
self.problem = WeightMinimizationOfASpeedReducer()
elif problem_num == 28: # RC28
self.problem = RollingElementBearing()
elif problem_num == 12: # RC12
self.problem = ProcessSynthesisProblem()
elif problem_num == 22: # RC22
self.problem = PlanetaryGearTrainDesignOptimizationProblem()
self.dim = self.problem.dim
self.h_num = self.problem.h_num
self.g_num = self.problem.g_num
self.low_bounds = self.problem.low_bounds
self.up_bounds = self.problem.up_bounds
def objective_function(self, x):
return self.problem.objctive_function(x)
def constrain_h(self, x):
return self.problem.constrain_h(x)
def constrain_g(self, x):
return self.problem.constrain_g(x)
class ReactorNetworkDesign: # RC04
def __init__(self):
self.dim = 6
self.h_num = 4
self.g_num = 1
self.up_bounds = [1, 1, 1, 1, 16, 16]
self.low_bounds = [0, 0, 0, 0, 0.00001, 0.00001]
self.k1 = 0.09755988
self.k2 = 0.99 * self.k1
self.k3 = 0.0391908
self.k4 = 0.9 * self.k3
def objctive_function(self, x):
y = - x[:, 3]
return y
def constrain_h(self, x):
h = []
h.append(self.k1 * x[:, 4] * x[:, 1] + x[:, 0] - 1)
h.append(self.k3 * x[:, 4] * x[:, 2] + x[:, 2] + x[:, 0] - 1)
h.append(self.k2 * x[:, 5] * x[:, 1] - x[:, 0] + x[:, 1])
h.append(self.k4 * x[:, 5] * x[:, 3] + x[:, 1] - x[:, 0] + x[:, 3] - x[:, 2])
return h
def constrain_g(self, x):
g = []
g.append(x[:, 4] ** 0.5 + x[:, 5] ** 0.5 - 4)
return g
class TwoReactorProblem: # RC11
def __init__(self):
self.dim = 7
self.h_num = 4
self.g_num = 4
self.up_bounds = [20, 20, 10, 10, 1.49, 1.49, 40]
self.low_bounds = [0, 0, 0, 0, -0.51, -0.51, 0]
def objctive_function(self, x):
x_4 = np.round(x[:, 4])
x_5 = np.round(x[:, 5])
y = 7.5 * x_4 + 5.5 * x_5 + 7 * x[:, 2] + 6 * x[:, 3] + 5 * x[:, 6]
return y
def constrain_h(self, x):
x_4 = np.round(x[:, 4])
x_5 = np.round(x[:, 5])
z_1 = 0.9 * (1 - np.exp(-0.5 * x[:, 2])) * x[:, 0]
z_2 = 0.8 * (1 - np.exp(-0.4 * x[:, 3])) * x[:, 1]
h = []
h.append(x_4 + x_5 - 1)
h.append(z_1 + z_2 - 10)
h.append(x[:, 0] + x[:, 1] - x[:, 6])
h.append(z_1 * x_4 + z_2 * x_5 - 10)
return h
def constrain_g(self, x):
x_4 = np.round(x[:, 4])
x_5 = np.round(x[:, 5])
g = []
g.append(x[:, 2] - 10 * x_4)
g.append(x[:, 3] - 10 * x_5)
g.append(x[:, 0] - 20 * x_4)
g.append(x[:, 1] - 20 * x_5)
return g
class HeatExchangerNetworkDesign1: # RC01
def __init__(self):
self.dim = 9
self.h_num = 8
self.g_num = 0
self.up_bounds = [10,200,100,200,2000000,600,600,600,900]
self.low_bounds = [0,0,0,0,1000,0,100,100,100]
def objctive_function(self, x):
y = 35 * x[:, 0] ** 0.6 + 35 * x[:, 1] ** 0.6
return y
def constrain_h(self, x):
h = []
h.append(200 * x[:, 0] * x[:, 3] - x[:, 2])
h.append(200 * x[:, 1] * x[:, 5] - x[:, 4])
h.append(x[:, 2] - 10000 * (x[:, 6] - 100))
h.append(x[:, 4] - 10000 * (300 - x[:, 6]))
h.append(x[:, 2] - 10000 * (600 - x[:, 7]))
h.append(x[:, 4] - 10000 * (900 - x[:, 8]))
h.append(x[:, 3] * np.log(np.abs(x[:, 7] - 100) + 1e-8) - x[:, 3] * np.log((600 - x[:, 6]) + 1e-8) - x[:, 7] + x[:, 6] + 500)
h.append(x[:, 5] * np.log(np.abs(x[:, 8] - x[:, 6]) + 1e-8) - x[:, 5] * np.log(600) - x[:, 8] + x[:, 6] + 600)
return h
def constrain_g(self, x):
g = []
return g
class HaverlysPoolingProblem: # RC05
def __init__(self):
self.dim = 9
self.h_num = 4
self.g_num = 2
self.up_bounds = [100, 200, 100, 100, 100, 100, 200, 100, 200]
self.low_bounds = [0, 0, 0, 0, 0, 0, 0, 0, 0]
def objctive_function(self, x):
y = -(9*x[:,0]+15*x[:,1]-6*x[:,2]-16*x[:,3]-10*(x[:,4]+x[:,5]))
return y
def constrain_h(self, x):
h = []
h.append(x[:, 6] + x[:, 7] - x[:, 2] - x[:, 3])
h.append(x[:, 0] - x[:, 6] - x[:, 4])
h.append(x[:, 1] - x[:, 7] - x[:, 5])
h.append(x[:, 8] * x[:, 6] + x[:, 8] * x[:, 7] - 3 * x[:, 2] - x[:, 3])
return h
def constrain_g(self, x):
g = []
g.append(x[:, 8] * x[:, 6] + 2 * x[:, 4] - 2.5 * x[:, 0])
g.append(x[:, 8] * x[:, 7] + 2 * x[:, 5] - 1.5 * x[:, 1])
return g
class HeatExchangerNetworkDesign2: # RC02
def __init__(self):
self.dim = 11
self.h_num = 9
self.g_num = 0
self.up_bounds = [0.819*10**6, 1.131*10**6, 2.05*10**6,0.05074,0.05074,0.05074,200,300,300,300,400]
self.low_bounds = [10**4,10**4,10**4,0,0,0,100,100,100,100,100]
def objctive_function(self, x):
y = (x[:, 0] / (120 * x[:, 3] + 1e-10)) ** 0.6 + (x[:, 1] / (80 * x[:, 4] + 1e-10)) ** 0.6 + (
x[:, 2] / (40 * x[:, 5] + 1e-10)) ** 0.6
return y
def constrain_h(self, x):
h = []
h.append(x[:,0]-1e4*(x[:,6]-100))
h.append(x[:,1]-1e4*(x[:,7]-x[:,6]))
h.append(x[:,2]-1e4*(500-x[:,7]))
h.append(x[:,0]-1e4*(300-x[:,8]))
h.append(x[:,1]-1e4*(400-x[:,9]))
h.append(x[:,2]-1e4*(600-x[:,10]))
h.append(x[:,3]*np.log(np.abs(x[:,8]-100)+1e-8)-x[:,3]*np.log(300-x[:,6]+1e-8)-x[:,8]-x[:,6]+400)
h.append(x[:,4]*np.log(np.abs(x[:,9]-x[:,6])+1e-8)-x[:,4]*np.log(np.abs(400-x[:,7])+1e-8)-x[:,9]+x[:,6]-x[:,7]+400)
h.append(x[:,5]*np.log(np.abs(x[:,10]-x[:,7])+1e-8)-x[:,5]*np.log(100)-x[:,10]+x[:,7]+100)
return h
def constrain_g(self, x):
g = []
return g
class OptimalOperationOfAlkylationUnit: # RC03
def __init__(self):
self.dim = 7
self.h_num = 0
self.g_num = 14
self.up_bounds = [2000,100,4000,100,100,20,200]
self.low_bounds = [1000,0,2000,0,0,0,0]
def objctive_function(self, x):
y = -1.715*x[:,0]-0.035*x[:,0]*x[:,5]-4.0565*x[:,2]-10.0*x[:,1]+0.063*x[:,2]*x[:,4]
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
g = []
g.append(0.0059553571*x[:,5]**2*x[:,0]+0.88392857*x[:,2]-0.1175625*x[:,5]*x[:,0]-x[:,0])
g.append(1.1088*x[:,0]+0.1303533*x[:,0]*x[:,5]-0.0066033*x[:,0]*x[:,5]**2-x[:,2])
g.append(6.66173269*x[:,5]**2+172.39878*x[:,4]-56.596669*x[:,3]-191.20592*x[:,5]-10000)
g.append(1.08702*x[:,5]+0.32175*x[:,3]-0.03762*x[:,5]**2-x[:,4]+56.85075)
g.append(0.006198*x[:,6]*x[:,3]*x[:,2]+2462.3121*x[:,1]-25.125634*x[:,1]*x[:,3]-x[:,2]*x[:,3])
g.append(161.18996*x[:,2]*x[:,3]+5000.0*x[:,1]*x[:,3]-489510.0*x[:,1]-x[:,2]*x[:,3]*x[:,6])
g.append(0.33*x[:,6]-x[:,4]+44.333333)
g.append(0.022556*x[:,4]-0.007595*x[:,6]-1.0)
g.append(0.00061*x[:,2]-0.0005*x[:,0]-1.0)
g.append(0.819672*x[:,0]-x[:,2]+0.819672)
g.append(24500.0*x[:,1]-250.0*x[:,1]*x[:,3]-x[:,2]*x[:,3])
g.append(1020.4082*x[:,3]*x[:,1]+1.2244898*x[:,2]*x[:,3]-100000*x[:,1])
g.append(6.25*x[:,0]*x[:,5]+6.25*x[:,0]-7.625*x[:,2]-100000)
g.append(1.22*x[:,2]-x[:,5]*x[:,0]-x[:,0]+1.0)
return g
class MultiProductBatchPlant: # RC14
def __init__(self):
self.dim = 10
self.h_num = 0
self.g_num = 10
self.up_bounds = [3.49,3.49,3.49,2500,2500,2500,20,16,700,450]
self.low_bounds = [0.51,0.51,0.51,250,250,250,6,4,40,10]
# constant
self.S = np.array([[2, 3, 4], [4, 6, 3]])
self.t = np.array([[8, 20, 8], [16, 4, 4]])
self.H = 6000
self.alp = 250
self.beta = 0.6
self.Q1 = 40000
self.Q2 = 20000
def objctive_function(self, x):
# decision Variable
N1 = np.round(x[:, 0])
N2 = np.round(x[:, 1])
N3 = np.round(x[:, 2])
V1 = x[:, 3]
V2 = x[:, 4]
V3 = x[:, 5]
y = self.alp*(N1*V1**self.beta+N2*V2**self.beta+N3*V3**self.beta)
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
N1 = np.round(x[:, 0])
N2 = np.round(x[:, 1])
N3 = np.round(x[:, 2])
V1 = x[:, 3]
V2 = x[:, 4]
V3 = x[:, 5]
TL1 = x[:, 6]
TL2 = x[:, 7]
B1 = x[:, 8]
B2 = x[:, 9]
g = []
g.append(self.Q1 * TL1 / B1 + self.Q2 * TL2 / B2 - self.H)
g.append(self.S[0, 0] * B1 + self.S[1, 0] * B2 - V1)
g.append(self.S[0, 1] * B1 + self.S[1, 1] * B2 - V2)
g.append(self.S[0, 2] * B1 + self.S[1, 2] * B2 - V3)
g.append(self.t[0, 0] - N1 * TL1)
g.append(self.t[0, 1] - N2 * TL1)
g.append(self.t[0, 2] - N3 * TL1)
g.append(self.t[1, 0] - N1 * TL2)
g.append(self.t[1, 1] - N2 * TL2)
g.append(self.t[1, 2] - N3 * TL2)
return g
class WeightMinimizationOfASpeedReducer: # RC15
def __init__(self):
self.dim = 7
self.h_num = 0
self.g_num = 11
self.up_bounds = [3.6, 0.8, 28, 8.3, 8.3, 3.9, 5.5]
self.low_bounds = [2.6, 0.7, 17, 7.3, 7.3, 2.9, 5]
def objctive_function(self, x):
# decision Variable
y = 0.7854*x[:,0]*x[:,1]**2*(3.3333*x[:,2]**2+14.9334*x[:,2]-43.0934)-1.508*x[:,0]*(x[:,5]**2+x[:,6]**2)\
+7.477*(x[:,5]**3+x[:,6]**3)+0.7854*(x[:,3]*x[:,5]**2+x[:,4]*x[:,6]**2)
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
g = []
g.append(-x[:, 0] * x[:, 1] ** 2 * x[:, 2] + 27)
g.append(-x[:, 0] * x[:, 1] ** 2 * x[:, 2] ** 2 + 397.5)
g.append(-x[:, 1] * x[:, 5] ** 4 * x[:, 2] * x[:, 3] ** [-3] + 1.93)
g.append(-x[:, 1] * x[:, 6] ** 4 * x[:, 2] / x[:, 4] ** 3 + 1.93)
g.append(10 * x[:, 5] ** [-3] * np.sqrt(16.91e6 + (745 * x[:, 3] / (x[:, 1] * x[:, 2])) ** 2) - 1100)
g.append(10 * x[:, 6] ** [-3] * np.sqrt(157.5e6 + (745 * x[:, 4] / (x[:, 1] * x[:, 2])) ** 2) - 850)
g.append(x[:, 1] * x[:, 2] - 40)
g.append(-x[:, 0] / x[:, 1] + 5)
g.append(x[:, 0] / x[:, 1] - 12)
g.append(1.5 * x[:, 5] - x[:, 3] + 1.9)
g.append(1.1 * x[:, 6] - x[:, 4] + 1.9)
return g
class RollingElementBearing: # RC28
def __init__(self):
self.dim = 10
self.h_num = 0
self.g_num = 9
self.up_bounds = [150,31.5,50.49,0.6,0.6,0.5,0.7,0.4,0.1,0.85]
self.low_bounds = [125,10.5,4.51,0.515,0.515,0.4,0.6,0.3,0.02,0.6]
def objctive_function(self, x):
Dm = x[:, 0]
Db = x[:, 1]
Z = np.round(x[:, 2])
fi = x[:, 3]
fo = x[:, 4]
gamma = Db / Dm
fc = 37.91 * (1 + (1.04 * ((1 - gamma) / (1 + gamma)) ** 1.72 * (fi * (2 * fo - 1) / (fo * (2 * fi - 1))) ** 0.41) ** (10 / 3)) ** (-0.3) * (gamma ** 0.3 * (1 - gamma) ** 1.39 / (1 + gamma) ** (1 / 3)) * (2 * fi / (2 * fi - 1)) ** 0.41
fc = np.array(fc)
Z = np.array(Z)
Db = np.array(Db)
ind = np.where(Db > 25.4)
y = fc * Z ** (2 / 3) * Db ** (1.8)
y[ind] = 3.647 * fc[ind] * Z[ind] ** (2 / 3) * Db[ind] ** 1.4
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
Dm = x[:, 0]
Db = x[:, 1]
Z = np.round(x[:, 2])
fi = x[:, 3]
fo = x[:, 4]
KDmin = x[:, 5]
KDmax = x[:, 6]
eps = x[:, 7]
e = x[:, 8]
chi = x[:, 9]
D = 160
d = 90
Bw = 30
T = D - d - 2 * Db
phi_o = 2 * np.pi - 2 * np.arccos((((D - d) * 0.5 - 0.75 * T) ** 2 + (0.5 * D - 0.25 * T - Db) ** 2 - (0.5 * d + 0.25 * T) ** 2) / (2 * (0.5 * (D - d) - 0.75 * T) * (0.5 * D - 0.25 * T - Db)))
Z = np.array(Z)
Db = np.array(Db)
g = []
g.append(Z - 1 - phi_o / (2 * np.arcsin(Db / Dm)))
g.append(KDmin * (D - d) - 2 * Db)
g.append(2 * Db - KDmax * (D - d))
g.append(chi * Bw - Db)
g.append(0.5 * (D + d) - Dm)
g.append(Dm - (0.5 + e) * (D + d))
g.append(eps * Db - 0.5 * (D - Dm - Db))
g.append(0.515 - fi)
g.append(0.515 - fo)
return g
class ProcessSynthesisProblem: # RC12
def __init__(self):
self.dim = 7
self.h_num = 0
self.g_num = 9
self.up_bounds = [100,100,100,1.49,1.49,1.49,1.49]
self.low_bounds = [0,0,0,-0.51,-0.50,-0.50,-0.50]
def objctive_function(self, x):
x1 = x[:,0]
x2 = x[:,1]
x3 = x[:,2]
y1 = np.round(x[:,3])
y2 = np.round(x[:,4])
y3 = np.round(x[:,5])
y4 = np.round(x[:,6])
y = (y1-1)**2 + (y2-1)**2 + (y3-1)**2 - np.log(y4+1+1e-10) + (x1-1)**22 + (x2-2)**2 + (x3-3)**2
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
x1 = x[:,0]
x2 = x[:,1]
x3 = x[:,2]
y1 = np.round(x[:,3])
y2 = np.round(x[:,4])
y3 = np.round(x[:,5])
y4 = np.round(x[:,6])
g = []
g.append(x1 + x2 + x3 + y1 + y2 + y3 - 5)
g.append(y3**2 + x1**2 + x2**2 + x3**2 - 5.5)
g.append(x1 + y1 - 1.2)
g.append(x2 + y2 - 1.8)
g.append(x3 + y3 - 2.5)
g.append(x1 + y4 - 1.2)
g.append(y2**2 + x2**2 - 1.64)
g.append(y3**2 + x3**2 - 4.25)
g.append(y2**2 + x3**2 - 4.64)
return g
class PlanetaryGearTrainDesignOptimizationProblem: # RC22
def __init__(self):
self.dim = 9
self.h_num = 1
self.g_num = 10
self.up_bounds = [96.49,54.49,51.49,46.49,51.49,124.49,3.49,6.49,6.49]
self.low_bounds = [16.51,13.51,13.51,16.51,13.51,47.51,0.51,0.51,0.51]
def objctive_function(self, x):
x = np.round(np.abs(x))
N1 = x[:, 0]
N2 = x[:, 1]
N3 = x[:, 2]
N4 = x[:, 3]
N6 = x[:, 5]
i1 = N6 / N4
i01 = 3.11
i2 = N6 * (N1 * N3 + N2 * N4) / ((N1 * N3 * (N6 - N4)))
i02 = 1.84
iR = -(N2 * N6 / (N1 * N3))
i0R = -3.11
y = np.max([i1-i01,i2-i02,iR-i0R], 0)
return y
def constrain_h(self, x):
Pind = np.array([3, 4, 5])
N4 = x[:, 3]
N6 = x[:, 5]
p = Pind[(x[:, 6]-1).astype(np.int64)].T
h = []
h.append(np.remainder(N6-N4, p))
return h
def constrain_g(self, x):
Pind = np.array([3, 4, 5])
mind = np.array([1.75, 2, 2.25, 2.5, 2.75, 3.0])
N1 = x[:, 0]
N2 = x[:, 1]
N3 = x[:, 2]
N4 = x[:, 3]
N5 = x[:, 4]
N6 = x[:, 5]
p = Pind[(x[:, 6]-1).astype(np.int64)].T
m1 = mind[(x[:, 7]-1).astype(np.int64)].T
m2 = mind[(x[:, 8]-1).astype(np.int64)].T
Dmax = 220
dlt22 = 0.5
dlt33 = 0.5
dlt55 = 0.5
dlt35 = 0.5
dlt34 = 0.5
dlt56 = 0.5
beta = np.arccos(((N6 - N3) ** 2 + (N4 + N5) ** 2 - (N3 + N5) ** 2) / (2 * (N6 - N3) * (N4 + N5)))
g = []
g.append(m2 * (N6 + 2.5) - Dmax)
g.append(m1 * (N1 + N2) + m1 * (N2 + 2) - Dmax)
g.append(m2 * (N4 + N5) + m2 * (N5 + 2) - Dmax)
g.append(np.abs(m1 * (N1 + N2) - m2 * (N6 - N3)) - m1 - m2)
g.append(-((N1 + N2) * np.sin(np.pi / p) - N2 - 2 - dlt22))
g.append(-((N6 - N3) * np.sin(np.pi / p) - N3 - 2 - dlt33))
g.append(-((N4 + N5) * np.sin(np.pi / p) - N5 - 2 - dlt55))
if (beta == np.real(beta)).all():
g.append((N3 + N5 + 2 + dlt35) ** 2 - (
(N6 - N3) ** 2 + (N4 + N5) ** 2 - 2 * (N6 - N3) * (N4 + N5) * np.cos(2 * np.pi / p - beta)))
else:
g.append(1e6)
g.append(-(N6 - 2 * N3 - N4 - 4 - 2 * dlt34))
g.append(-(N6 - N4 - 2 * N5 - 4 - 2 * dlt56))
return g
class RobotGripperProblem: # RC24
def __init__(self):
self.dim = 7
self.h_num = 0
self.g_num = 7
self.up_bounds = [150,150,200,50,150,300,3.14]
self.low_bounds = [10,10,100,0,10,100,1]
def objctive_function(self, x):
a = x[:, 0]
b = x[:, 1]
c = x[:, 2]
e = x[:, 3]
ff = x[:, 4]
l = x[:, 5]
delta = x[:, 6]
Ymin = 50
Ymax = 100
YG = 150
Zmax = 99.9999
P = 100
alpha_0 = np.arccos((a ** 2 + l ** 2 + e ** 2 - b ** 2) / (2 * a * np.sqrt(l ** 2 + e ** 2))) + np.arctan(e / l)
beta_0 = np.arccos((b ** 2 + l ** 2 + e ** 2 - a ** 2) / (2 * b * np.sqrt(l ** 2 + e ** 2))) - np.arctan(e / l)
alpha_m = np.arccos((a ** 2 + (l - Zmax) ** 2 + e ** 2 - b ** 2) / (2 * a * np.sqrt((l - Zmax) ** 2 + e ** 2))) + np.arctan(
e / (l - Zmax))
beta_m = np.arccos((b ** 2 + (l - Zmax) ** 2 + e ** 2 - a ** 2) / (2 * b * np.sqrt((l - Zmax) ** 2 + e ** 2))) - np.arctan(
e / (l - Zmax))
for i in range(len(x.shape[0])):
f[i, 1] = -OBJ11[x[i, :], 2] - OBJ11[x[i, :], 1]
return y
def constrain_h(self, x):
h = []
return h
def constrain_g(self, x):
x1 = x[:,0]
x2 = x[:,1]
x3 = x[:,2]
y1 = np.round(x[:,3])
y2 = np.round(x[:,4])
y3 = np.round(x[:,5])
y4 = np.round(x[:,6])
g = []
g.append(x1 + x2 + x3 + y1 + y2 + y3 - 5)
g.append(y3**2 + x1**2 + x2**2 + x3**2 - 5.5)
g.append(x1 + y1 - 1.2)
g.append(x2 + y2 - 1.8)
g.append(x3 + y3 - 2.5)
g.append(x1 + y4 - 1.2)
g.append(y2**2 + x2**2 - 1.64)
g.append(y3**2 + x3**2 - 4.25)
g.append(y2**2 + x3**2 - 4.64)
return g
| 16,955 | 227 | 1,690 |
ab44b5ddf439c1e25658c0a5aa2ce5774c0a05ad | 480 | py | Python | tests/common/test_code_status.py | trickeydan/astoria | ef08ed4be4d5997751846b0cadce9aa8261ae151 | [
"MIT"
] | null | null | null | tests/common/test_code_status.py | trickeydan/astoria | ef08ed4be4d5997751846b0cadce9aa8261ae151 | [
"MIT"
] | 7 | 2020-11-29T13:03:10.000Z | 2020-12-09T17:20:25.000Z | tests/common/test_code_status.py | trickeydan/astoria | ef08ed4be4d5997751846b0cadce9aa8261ae151 | [
"MIT"
] | null | null | null | """Test the code status enum."""
from astoria.common.code_status import CodeStatus
def test_code_status_enum() -> None:
"""Test that CodeStatus has the right values."""
assert CodeStatus.STARTING.value == "code_starting"
assert CodeStatus.RUNNING.value == "code_running"
assert CodeStatus.KILLED.value == "code_killed"
assert CodeStatus.FINISHED.value == "code_finished"
assert CodeStatus.CRASHED.value == "code_crashed"
assert len(CodeStatus) == 5
| 34.285714 | 55 | 0.729167 | """Test the code status enum."""
from astoria.common.code_status import CodeStatus
def test_code_status_enum() -> None:
"""Test that CodeStatus has the right values."""
assert CodeStatus.STARTING.value == "code_starting"
assert CodeStatus.RUNNING.value == "code_running"
assert CodeStatus.KILLED.value == "code_killed"
assert CodeStatus.FINISHED.value == "code_finished"
assert CodeStatus.CRASHED.value == "code_crashed"
assert len(CodeStatus) == 5
| 0 | 0 | 0 |
5b165f5b9cea0c1e043ac7c55b5a57059b7fe758 | 496 | py | Python | tests/cpp/tests/arv/service_status/test.py | VVCAS-Sean/OpenUxAS | dcd7be29d182d278a5387908f568d6f8a06b79ee | [
"NASA-1.3"
] | 88 | 2017-08-24T07:02:01.000Z | 2022-03-18T04:34:17.000Z | tests/cpp/tests/arv/service_status/test.py | VVCAS-Sean/OpenUxAS | dcd7be29d182d278a5387908f568d6f8a06b79ee | [
"NASA-1.3"
] | 46 | 2017-06-08T18:18:08.000Z | 2022-03-15T18:24:43.000Z | tests/cpp/tests/arv/service_status/test.py | VVCAS-Sean/OpenUxAS | dcd7be29d182d278a5387908f568d6f8a06b79ee | [
"NASA-1.3"
] | 53 | 2017-06-22T14:48:05.000Z | 2022-02-15T16:59:38.000Z | import time
from pylmcp import Object
from pylmcp.server import Server
from pylmcp.uxas import AutomationRequestValidator, UxASConfig
# Create bridge configuration
bridge_cfg = UxASConfig()
bridge_cfg += AutomationRequestValidator()
with Server(bridge_cfg=bridge_cfg) as server:
try:
obj = Object(class_name='ServiceStatus', StatusType=2,
randomize=True)
server.send_msg(obj)
time.sleep(1)
print("OK")
finally:
print("Here")
| 26.105263 | 62 | 0.693548 | import time
from pylmcp import Object
from pylmcp.server import Server
from pylmcp.uxas import AutomationRequestValidator, UxASConfig
# Create bridge configuration
bridge_cfg = UxASConfig()
bridge_cfg += AutomationRequestValidator()
with Server(bridge_cfg=bridge_cfg) as server:
try:
obj = Object(class_name='ServiceStatus', StatusType=2,
randomize=True)
server.send_msg(obj)
time.sleep(1)
print("OK")
finally:
print("Here")
| 0 | 0 | 0 |
bdd867bc0a3d4b8646695f6c27b0957d0c83b2a2 | 3,666 | py | Python | test/test_Likelihood/test_LensLikelihood/test_td_mag_magnitude_likelihood.py | LBJ-Wade/hierarc_SGL | 1dc2be90f44f99e82ab7014f2027fbb077b14f98 | [
"BSD-3-Clause"
] | 5 | 2020-07-08T00:53:04.000Z | 2021-08-03T08:20:31.000Z | test/test_Likelihood/test_LensLikelihood/test_td_mag_magnitude_likelihood.py | LBJ-Wade/hierarc_SGL | 1dc2be90f44f99e82ab7014f2027fbb077b14f98 | [
"BSD-3-Clause"
] | 4 | 2020-03-30T22:12:57.000Z | 2021-04-03T06:20:52.000Z | test/test_Likelihood/test_LensLikelihood/test_td_mag_magnitude_likelihood.py | LBJ-Wade/hierarc_SGL | 1dc2be90f44f99e82ab7014f2027fbb077b14f98 | [
"BSD-3-Clause"
] | 5 | 2020-03-30T21:20:08.000Z | 2021-03-03T17:08:42.000Z | import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.Util import constants as const
from hierarc.Likelihood.LensLikelihood.td_mag_likelihood import TDMagLikelihood
from hierarc.Likelihood.LensLikelihood.td_mag_magnitude_likelihood import TDMagMagnitudeLikelihood
from lenstronomy.Util.data_util import magnitude2cps
if __name__ == '__main__':
pytest.main()
| 49.540541 | 155 | 0.694217 | import pytest
import numpy as np
import numpy.testing as npt
from lenstronomy.Util import constants as const
from hierarc.Likelihood.LensLikelihood.td_mag_likelihood import TDMagLikelihood
from hierarc.Likelihood.LensLikelihood.td_mag_magnitude_likelihood import TDMagMagnitudeLikelihood
from lenstronomy.Util.data_util import magnitude2cps
class TestMagnificationLikelihood(object):
def setup(self):
pass
def test_log_likelihood(self):
ddt = 1000 # time-delay distance in units Mpc
num = 4
magnification_model = np.ones(num) * 4
magnitude_intrinsic = 19
magnitude_zero_point = 20
amp_int = magnitude2cps(magnitude=magnitude_intrinsic, magnitude_zero_point=magnitude_zero_point)
amp_measured = magnification_model * amp_int
rel_sigma = 0.1
cov_amp_measured = np.diag((amp_measured*rel_sigma)**2)
magnification_model_magnitude = - 2.5 * np.log10(magnification_model)
magnitude_measured = magnitude_intrinsic - 2.5 * np.log10(magnification_model)
cov_magnitude_measured = np.diag(rel_sigma * 1.086 * np.ones(num)) # translating relative scatter in linear flux units to astronomical magnitudes
time_delay_measured = np.ones(num - 1) * 10
cov_td_measured = np.ones((num-1, num-1))
fermat_unit_conversion = const.Mpc / const.c / const.day_s * const.arcsec ** 2
fermat_diff = time_delay_measured / fermat_unit_conversion / ddt
model_vector = np.append(fermat_diff, magnification_model)
cov_model = np.diag((model_vector/10)**2)
cov_model_magnitude = np.diag(np.append(fermat_diff * rel_sigma, rel_sigma * 1.086 * np.ones(num)))
# un-normalized likelihood, comparing linear flux likelihood and magnification likelihood
# linear flux likelihood
likelihood = TDMagLikelihood(time_delay_measured, cov_td_measured, amp_measured, cov_amp_measured,
fermat_diff, magnification_model, cov_model, magnitude_zero_point=magnitude_zero_point)
logl = likelihood.log_likelihood(ddt=ddt, mu_intrinsic=magnitude_intrinsic)
model_vector, cov_tot = likelihood._model_cov(ddt, mu_intrinsic=magnitude_intrinsic)
sign_det, lndet = np.linalg.slogdet(cov_tot)
logl_norm = -1 / 2. * (likelihood.num_data * np.log(2 * np.pi) + lndet)
npt.assert_almost_equal(logl, logl_norm, decimal=6)
# astronomical magnitude likelihood
likelihood_magnitude = TDMagMagnitudeLikelihood(time_delay_measured, cov_td_measured, magnitude_measured,
cov_magnitude_measured, fermat_diff, magnification_model_magnitude,
cov_model_magnitude)
logl_magnitude = likelihood_magnitude.log_likelihood(ddt=ddt, mu_intrinsic=magnitude_intrinsic)
npt.assert_almost_equal(logl_magnitude - logl, 0, decimal=1)
num = 4
magnification_model = np.ones(num)
cov_td_measured = np.zeros((num - 1, num - 1))
cov_magnitude_measured = np.zeros((num, num))
magnitude_measured = magnitude_intrinsic - 2.5 * np.log10(magnification_model)
cov_model = np.zeros((num + num - 1, num + num - 1))
likelihood = TDMagMagnitudeLikelihood(time_delay_measured, cov_td_measured, magnitude_measured,
cov_magnitude_measured, fermat_diff, magnification_model_magnitude,
cov_model)
logl = likelihood.log_likelihood(ddt=ddt, mu_intrinsic=1)
assert logl == -np.inf
if __name__ == '__main__':
pytest.main()
| 3,179 | 21 | 77 |
9bd0e9754dc7f44d7a73a64df56bfd6fb5b82968 | 436 | py | Python | util.py | drgrhm/alg_conf | 7e10033366e50e0c58e6b014672fc995e69e8c46 | [
"Apache-2.0"
] | 1 | 2021-01-17T20:48:08.000Z | 2021-01-17T20:48:08.000Z | util.py | drgrhm/alg_config | 7e10033366e50e0c58e6b014672fc995e69e8c46 | [
"Apache-2.0"
] | null | null | null | util.py | drgrhm/alg_config | 7e10033366e50e0c58e6b014672fc995e69e8c46 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from math import log
day_in_seconds = 24. * 60. * 60.
def ecdf(vals, x, eps=1e-12):
"""
Compute empirical cdf: P(X <= x) over the values vals
"""
return np.sum(vals <= x, dtype=np.float32) / (np.shape(vals)[0] + eps)
def format_runtime(runtime):
""" """
return '{}s = {}m = {}h = {}d'.format(runtime, runtime / 60, runtime / 3600, runtime / (3600 * 24)) | 21.8 | 103 | 0.582569 | import numpy as np
from math import log
day_in_seconds = 24. * 60. * 60.
def log2(x):
return log(x,2)
def ecdf(vals, x, eps=1e-12):
"""
Compute empirical cdf: P(X <= x) over the values vals
"""
return np.sum(vals <= x, dtype=np.float32) / (np.shape(vals)[0] + eps)
def format_runtime(runtime):
""" """
return '{}s = {}m = {}h = {}d'.format(runtime, runtime / 60, runtime / 3600, runtime / (3600 * 24)) | 11 | 0 | 23 |
d431da3386890b3dbc9d0d5c42793aff8a2e51f9 | 20,280 | py | Python | main.py | leonhofmann/StatsToPopulation | a2ed60eaf89fe61dc31abfaf6bb30fd7cbfeb8bb | [
"Apache-2.0"
] | null | null | null | main.py | leonhofmann/StatsToPopulation | a2ed60eaf89fe61dc31abfaf6bb30fd7cbfeb8bb | [
"Apache-2.0"
] | null | null | null | main.py | leonhofmann/StatsToPopulation | a2ed60eaf89fe61dc31abfaf6bb30fd7cbfeb8bb | [
"Apache-2.0"
] | null | null | null | import collections
import pandas
import numpy
import itertools
if __name__ == "__main__":
main() | 68.282828 | 168 | 0.63856 | import collections
import pandas
import numpy
import itertools
def main():
def TestingOfSheets():
# Check if at least two columns
assert len(currentDataframe.columns) >= 2, "The sheet %s has not two but %s columns" % (
currentDataframe, len(currentDataframe.columns))
# Check if value is included
assert strValueHeader in currentDataframe.columns.values, "The sheet %s does not contain column %s" % (
currentDataframe, strValueHeader)
# Assert that values are in last column
assert currentDataframe.columns.get_loc(strValueHeader) == len(
currentDataframe.columns) - 1, "The last columns of the sheets %s are not column %s" % (
currentDataframe, strValueHeader)
# Check if values sum up to 1
#assert abs(currentDataframe[strValueHeader].sum() - 1) <= float(0.01) , "The column %s of sheet %s does not sum to 1" % (
# currentDataframe, strValueHeader)
# Check if at least one neu attribute is included
newColumn = False
for colHeader in currentDataframe.columns.values:
if not colHeader in resultDataframe:
newColumn = True
# print(currentDataframe.columns.values)
assert newColumn, "No new column in the sheet was found"
# In jedem Blatt gibt es also MIND. eine Spalte mit neuen Charakteristika
# TODO: Sicherstellen, dass jede Überschrift in einer Tabelle einzigartig ist
# TODO: Sicherstellen, dass in jeder Liste nur eine neue Charakteristik dazukommt
# TODO: Prüfen, ob in jedem Sheet jeder Eintrage einmalig ist - und nicht z.B. zweimal "EFH + Kohle" vorkommt
def LadenDerSheets():
# Loading Spreadsheet
xlsxFilterFile = pandas.ExcelFile(strFilename)
listOfSheetNames = xlsxFilterFile.sheet_names
for sheetName in listOfSheetNames:
dfSheet = xlsxFilterFile.parse(sheetName)
dictOfSheets[sheetName] = dfSheet
# TODO: Es sollte nicht notwendig sein, relative Werte in den Excel-Sheets einzugeben. Die Konvertierung kann
# intern stattfinden. Es sollten keine relativen Werte, sondern absolute Werte ausgegeben werden
def InsertNewFilterWithoutIntersection():
numberOfRows = len(previousResultDataframe.index) * len(currentDataframe.index)
newResultDataframe = pandas.DataFrame(columns=setOfNewDataframeHeaders, index=numpy.zeros(numberOfRows))
indexNewResultDataframe = 0
for indexPreviousResultDataframe, rowPreviousResultDataframe in previousResultDataframe.iterrows():
valuePrevious = rowPreviousResultDataframe[strValueHeader]
for indexCurrentDataframe, rowCurrentDataframe in currentDataframe.iterrows():
valueCurrent = rowCurrentDataframe[strValueHeader]
# Copy entries from previous Dataframe
for header in setOfPreviousDataframeHeaders:
# newResultDataframe[header][indexNewResultDataframe] = rowPreviousResultDataframe[header]
icol = newResultDataframe.columns.get_loc(header)
newResultDataframe.set_value(index=indexNewResultDataframe, col=icol, takeable=True, value=rowPreviousResultDataframe[header])
# Copy entries from current Dataframe
for header in setOfNewHeaders:
#newResultDataframe[header][indexNewResultDataframe] = rowCurrentDataframe[header]
icol = newResultDataframe.columns.get_loc(header)
newResultDataframe.set_value(index=indexNewResultDataframe, col=icol, takeable=True, value=rowCurrentDataframe[header])
# Copy new value
newValue = valuePrevious * valueCurrent
# newResultDataframe[strValueHeader][indexNewResultDataframe] = newValue
icol = newResultDataframe.columns.get_loc(strValueHeader)
newResultDataframe.set_value(index=indexNewResultDataframe, col=icol, takeable=True, value=newValue)
indexNewResultDataframe += 1
return newResultDataframe
def InsertNewFilterWithIntersection():
listOfNewAttributes = list()
for header in setOfNewHeaders:
listOfNewAttributes.append(list(set(currentDataframe[header])))
# Transposiong the list
listOfNewAttributes = list(map(list, zip(*listOfNewAttributes)))
newAttributesDataframe = pandas.DataFrame(listOfNewAttributes, columns=setOfNewHeaders)
numberOfVariationsOfIntersectingColumns = 1
for header in setOfIntersectingHeaders:
setOfItems = set(currentDataframe[header])
numberOfVariationsOfIntersectingColumns += len(setOfItems)
lengthOfCurrentDataframe = len(currentDataframe.index)
numberOfNewVariants = len(listOfNewAttributes)
numberOfRowsWhichAreAddedPerEntry = numberOfNewVariants # lengthOfCurrentDataframe / numberOfVariationsOfIntersectingColumns
numberOfRows = len(previousResultDataframe.index) * numberOfRowsWhichAreAddedPerEntry
newResultDataframe = pandas.DataFrame(columns=setOfNewDataframeHeaders, index=numpy.zeros(int(numberOfRows)))
adjustingValuesRequired = CheckIfValuesNeedToBeAdjusted()
if adjustingValuesRequired:
pass #AdjustValuesOfCurrentDataframe()
indexNewResultDataframe = 0
index = 0
for indexPreviousResultDataframe, rowPreviousResultDataframe in previousResultDataframe.iterrows():
index += 1
valuePrevious = rowPreviousResultDataframe[strValueHeader]
for indexOfNewAttributeDataframe, rowOfNewAttributeDataframe in newAttributesDataframe.iterrows():
indexNewResultDataframe = (index-1) * numberOfRowsWhichAreAddedPerEntry + indexOfNewAttributeDataframe
# Copy entries from previous Dataframe
for header in setOfPreviousDataframeHeaders:
ivalue = rowPreviousResultDataframe[header]
icol = newResultDataframe.columns.get_loc(header)
newResultDataframe.set_value(index=indexNewResultDataframe, col = icol, takeable=True, value = ivalue)
# Inserting Variations
for header in newAttributesDataframe:
ivalue = newAttributesDataframe[header][indexOfNewAttributeDataframe]
icol = newResultDataframe.columns.get_loc(header)
newResultDataframe.set_value(index=indexNewResultDataframe, col = icol, takeable=True, value = ivalue)
# Now the correct values have to be readout
sumValueCurrent = 0
numberOfColumns = len(setOfIntersectingHeaders)
if numberOfColumns == 1:
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 =rowPreviousResultDataframe[headerSearchedFor_1]
sumValueCurrent = currentDataframe.loc[currentDataframe[headerSearchedFor_1] == valueSearchedFor_1, strValueHeader].sum()
newHeader = next(iter(setOfNewHeaders))
newAttribute = rowOfNewAttributeDataframe[newHeader]
valueCurrent = currentDataframe.loc[(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[newHeader] == newAttribute),
strValueHeader].sum()
elif numberOfColumns == 2:
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 = rowPreviousResultDataframe[headerSearchedFor_1]
headerSearchedFor_2 = setOfIntersectingHeaders[1]
valueSearchedFor_2 = rowPreviousResultDataframe[headerSearchedFor_2]
sumValueCurrent = currentDataframe.loc[(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2),
strValueHeader].sum()
newHeader = next(iter(setOfNewHeaders))
newAttribute = rowOfNewAttributeDataframe[newHeader]
valueCurrent = currentDataframe.loc[(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(currentDataframe[newHeader] == newAttribute),
strValueHeader].sum()
#elif numberOfColumns == 3:
# sumvalue = currentDataframe.loc[currentDataframe[0] == row[0], currentDataframe[1] == row[1], currentDataframe[3] == row[3], strValueHeader].sum()
else:
valueCurrent = 0
if valueCurrent == 0:
#print(valueCurrent)
pass
if sumValueCurrent == 0:
print(sumValueCurrent)
finalValueCurrent = valueCurrent/sumValueCurrent
newValue = valuePrevious * finalValueCurrent
#newResultDataframe[strValueHeader][indexNewResultDataframe] = newValue
icol = newResultDataframe.columns.get_loc(strValueHeader)
newResultDataframe.set_value(index=indexNewResultDataframe, col=icol, takeable=True, value=newValue)
indexNewResultDataframe += 1
return newResultDataframe
def CheckIfValuesNeedToBeAdjusted():
valuesNeedToBeAdjusted = False
#If there is more than one attribute which defines distribution, the values in current sheet have to be matched
numberOfIntersectingAttributes = len(setOfIntersectingHeaders)
if numberOfIntersectingAttributes == 1:
valuesNeedToBeAdjusted = False
return valuesNeedToBeAdjusted
elif numberOfIntersectingAttributes == 2:
for indexOfPreviousDataframe, rowOfPreviousDataframe in previousResultDataframe.iterrows():
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 = rowOfPreviousDataframe[headerSearchedFor_1]
headerSearchedFor_2 = setOfIntersectingHeaders[1]
valueSearchedFor_2 = rowOfPreviousDataframe[headerSearchedFor_2]
targetValueInPreviousDataframe = previousResultDataframe.loc[(previousResultDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(previousResultDataframe[headerSearchedFor_2] == valueSearchedFor_2), strValueHeader].sum()
actualValueInCurrentDataframe = currentDataframe.loc[(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2), strValueHeader].sum()
if abs(targetValueInPreviousDataframe - actualValueInCurrentDataframe) > 0.0000001:
valuesNeedToBeAdjusted = True
return valuesNeedToBeAdjusted
elif numberOfIntersectingAttributes == 3:
for indexOfCurrentDataframe, rowOfCurrentDataframe in currentDataframe.iterrows():
targetValueInPreviousDataframe = rowOfCurrentDataframe[strValueHeader]
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 = rowOfCurrentDataframe[headerSearchedFor_1]
headerSearchedFor_2 = setOfIntersectingHeaders[1]
valueSearchedFor_2 = rowOfCurrentDataframe[headerSearchedFor_2]
headerSearchedFor_3 = setOfIntersectingHeaders[2]
valueSearchedFor_3 = rowOfCurrentDataframe[headerSearchedFor_3]
actualValueInCurrentDataframe = previousResultDataframe.loc[(previousResultDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(previousResultDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(previousResultDataframe[headerSearchedFor_3] == valueSearchedFor_3),
strValueHeader].sum()
actualValueInCurrentDataframe = currentDataframe.loc[(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(currentDataframe[headerSearchedFor_3] == valueSearchedFor_3),
strValueHeader].sum()
if abs(targetValueInPreviousDataframe - actualValueInCurrentDataframe) > 0.0000001:
valuesNeedToBeAdjusted = True
return valuesNeedToBeAdjusted
elif numberOfIntersectingAttributes > 3:
raise ValueError('There are more than 3 columns intersecting, this is not yet implemented')
return valuesNeedToBeAdjusted
def AdjustValuesOfCurrentDataframe():
valuesNeedToBeAdjusted = False
# If there is more than one attribute which defines distribution, the values in current sheet have to be matched
numberOfIntersectingAttributes = len(setOfIntersectingHeaders)
if numberOfIntersectingAttributes == 1:
raise ValueError('There should be no adjustment required...')
elif numberOfIntersectingAttributes == 2:
for indexOfPreviousDataframe, rowOfPreviousDataframe in previousResultDataframe.iterrows():
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 = rowOfPreviousDataframe[headerSearchedFor_1]
headerSearchedFor_2 = setOfIntersectingHeaders[1]
valueSearchedFor_2 = rowOfPreviousDataframe[headerSearchedFor_2]
targetValueInPreviousDataframe = previousResultDataframe.loc[
(previousResultDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(previousResultDataframe[headerSearchedFor_2] == valueSearchedFor_2), strValueHeader].sum()
actualValueInCurrentDataframe = currentDataframe.loc[
(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2), strValueHeader].sum()
correctionFactor = targetValueInPreviousDataframe / actualValueInCurrentDataframe
for indexOfCurrentDataframe, rowOfCurrentDataframe in currentDataframe.iterrows():
if ((rowOfCurrentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(rowOfCurrentDataframe[headerSearchedFor_2] == valueSearchedFor_2)):
rowOfCurrentDataframe[strValueHeader] = rowOfCurrentDataframe[strValueHeader] * correctionFactor
elif numberOfIntersectingAttributes == 3:
for indexOfCurrentDataframe, rowOfCurrentDataframe in currentDataframe.iterrows():
targetValueInPreviousDataframe = rowOfCurrentDataframe[strValueHeader]
headerSearchedFor_1 = setOfIntersectingHeaders[0]
valueSearchedFor_1 = rowOfCurrentDataframe[headerSearchedFor_1]
headerSearchedFor_2 = setOfIntersectingHeaders[1]
valueSearchedFor_2 = rowOfCurrentDataframe[headerSearchedFor_2]
headerSearchedFor_3 = setOfIntersectingHeaders[2]
valueSearchedFor_3 = rowOfCurrentDataframe[headerSearchedFor_3]
actualValueInCurrentDataframe = previousResultDataframe.loc[
(previousResultDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(previousResultDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(previousResultDataframe[headerSearchedFor_3] == valueSearchedFor_3),
strValueHeader].sum()
actualValueInCurrentDataframe = currentDataframe.loc[
(currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(currentDataframe[headerSearchedFor_3] == valueSearchedFor_3),
strValueHeader].sum()
correctionFactor = targetValueInPreviousDataframe / actualValueInCurrentDataframe
for indexOfCurrentDataframe, rowOfCurrentDataframe in currentDataframe.iterrows():
if ((currentDataframe[headerSearchedFor_1] == valueSearchedFor_1) &
(currentDataframe[headerSearchedFor_2] == valueSearchedFor_2) &
(currentDataframe[headerSearchedFor_3] == valueSearchedFor_3)):
rowOfCurrentDataframe[strValueHeader] = rowOfCurrentDataframe[strValueHeader] * correctionFactor
elif numberOfIntersectingAttributes > 3:
raise ValueError('There are more than 3 columns intersecting, this is not yet implemented')
# ==================================================================================================================
# Definition of files
# ==================================================================================================================
strFilename = 'StatistischeVerteilungC.xlsx' # 'filter1.xlsx'
strFilenameCSV = 'output.csv'
global strValueHeader
strValueHeader = 'value'
# Collection/Set of spreadsheets
dictOfSheets = dict()
LadenDerSheets()
# Result Dataframe
resultDataframe = {}
for sheets in dictOfSheets:
currentDataframe = dictOfSheets[sheets]
TestingOfSheets()
# ==================================================================================================================
# Working with sheets
# ==================================================================================================================
for sheets in dictOfSheets:
currentDataframe = dictOfSheets[sheets]
if not len(resultDataframe):
# resultDataframe is empty
resultDataframe = currentDataframe
else:
previousResultDataframe = resultDataframe
# Determining the dimensions
setOfIntersectingHeaders = set(previousResultDataframe.columns).intersection(set(currentDataframe.columns))
setOfIntersectingHeaders = list(filter(strValueHeader.__ne__, setOfIntersectingHeaders))
setOfPreviousDataframeHeaders = list(filter(strValueHeader.__ne__, previousResultDataframe.columns))
setOfNewHeaders = set(currentDataframe.columns) - set(previousResultDataframe.columns)
setOfNewDataframeHeaders = set(filter(strValueHeader.__ne__, set(previousResultDataframe.columns))).union(setOfNewHeaders)
setOfNewDataframeHeaders.add(strValueHeader)
dictOfAttributesOfIntersectingHeaders = {}
for header in setOfIntersectingHeaders:
dictOfAttributesOfIntersectingHeaders[header]=set(currentDataframe[header])
if len(setOfIntersectingHeaders) == 0:
newResultDataframe = InsertNewFilterWithoutIntersection()
else:
newResultDataframe = InsertNewFilterWithIntersection()
resultDataframe = newResultDataframe
resultDataframe.to_csv(strFilenameCSV) #, header=1)
print(resultDataframe)
print("The sum of all relative values is {sum}".format(sum = resultDataframe[strValueHeader].sum()))
if __name__ == "__main__":
main() | 20,150 | 0 | 25 |
1acb6e2efb2c12fe594d45893e469809f93f6d6a | 73 | py | Python | DownloadRunner/__main__.py | StanSwanborn/research_runner | d9573f6623c72f30d3525fb671394d3bb00d9911 | [
"MIT"
] | null | null | null | DownloadRunner/__main__.py | StanSwanborn/research_runner | d9573f6623c72f30d3525fb671394d3bb00d9911 | [
"MIT"
] | null | null | null | DownloadRunner/__main__.py | StanSwanborn/research_runner | d9573f6623c72f30d3525fb671394d3bb00d9911 | [
"MIT"
] | null | null | null | # This part will be responsible for downloading the zotero collected PDFs | 73 | 73 | 0.835616 | # This part will be responsible for downloading the zotero collected PDFs | 0 | 0 | 0 |
216808ea84e18ffe15a88d3fbbf74ccda2d7b6d4 | 7,358 | py | Python | app.py | muhammadwaqasmbd/bot-scrape-python | e6c0a411838ddb097a75f4810b557a245628a2d2 | [
"Apache-2.0"
] | null | null | null | app.py | muhammadwaqasmbd/bot-scrape-python | e6c0a411838ddb097a75f4810b557a245628a2d2 | [
"Apache-2.0"
] | null | null | null | app.py | muhammadwaqasmbd/bot-scrape-python | e6c0a411838ddb097a75f4810b557a245628a2d2 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
import requests, json
from flask import render_template
from RepeatedTimer import RepeatedTimer
from flask_socketio import SocketIO, emit
from threading import Thread
from gevent import monkey as curious_george
import redis
import datetime as dt
from rejson import Client, Path
from SetEncoder import SetEncoder
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
r1 = redis.StrictRedis(host='localhost', port=6379, db=1, charset="utf-8", decode_responses=True)
r2 = redis.StrictRedis(host='localhost', port=6379, db=2, charset="utf-8", decode_responses=True)
r3 = redis.StrictRedis(host='localhost', port=6379, db=3, charset="utf-8", decode_responses=True)
r4 = redis.StrictRedis(host='localhost', port=6379, db=4, charset="utf-8", decode_responses=True)
async_mode = "gevent"
curious_george.patch_all(ssl=False)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socket_ = SocketIO(app,async_mode=async_mode)
thread = None
@app.route("/")
@socket_.on('start_process', namespace='/start')
if __name__ == "__main__":
socket_.run(app, debug=True) | 40.651934 | 161 | 0.638625 | from flask import Flask
import requests, json
from flask import render_template
from RepeatedTimer import RepeatedTimer
from flask_socketio import SocketIO, emit
from threading import Thread
from gevent import monkey as curious_george
import redis
import datetime as dt
from rejson import Client, Path
from SetEncoder import SetEncoder
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
r1 = redis.StrictRedis(host='localhost', port=6379, db=1, charset="utf-8", decode_responses=True)
r2 = redis.StrictRedis(host='localhost', port=6379, db=2, charset="utf-8", decode_responses=True)
r3 = redis.StrictRedis(host='localhost', port=6379, db=3, charset="utf-8", decode_responses=True)
r4 = redis.StrictRedis(host='localhost', port=6379, db=4, charset="utf-8", decode_responses=True)
async_mode = "gevent"
curious_george.patch_all(ssl=False)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socket_ = SocketIO(app,async_mode=async_mode)
thread = None
@app.route("/")
def scrape():
return render_template('index.html')
@socket_.on('start_process', namespace='/start')
def start_process(message):
rt_huobi = RepeatedTimer(1, process_huobi_thread)
rt_okex = RepeatedTimer(1, process_okex_thread)
rt_binance = RepeatedTimer(1, process_binance_thread)
rt_medium = RepeatedTimer(1, process_medium_thread)
def process_huobi_thread():
thread = Thread(target=process_huobi_articles)
thread.daemon = True
thread.start()
def process_okex_thread():
thread = Thread(target=process_okex_articles)
thread.daemon = True
thread.start()
def process_binance_thread():
thread = Thread(target=process_binance_articles)
thread.daemon = True
thread.start()
def process_medium_thread():
thread = Thread(target=process_medium_articles)
thread.daemon = True
thread.start()
def process_huobi_articles():
current_time = dt.datetime.today()
with app.test_request_context('/'):
data = get_records("https://www.huobi.com/support/public/getList/v2?page=1&limit=20&oneLevelId=360000031902&twoLevelId=360000039481&language=en-us",True)
articles_data = data["data"]["list"]
for article in articles_data:
json_data = json.dumps([{"title":article["title"]}],cls=SetEncoder)
r1.set(article["showTime"], json_data)
articles = get_all_records(r1)
emit('get_huobi',{'articles': articles},broadcast=True,namespace='/start')
print("Total Time in seconds huobi :", (current_time- dt.datetime.today()).total_seconds())
def process_okex_articles():
current_time = dt.datetime.today()
with app.test_request_context('/'):
data = get_records("https://www.okex.com/support/hc/api/internal/recent_activities?locale=en-us&page=1&per_page=20&locale=en-us", False)
articles_data = data["activities"]
for article in articles_data:
json_data = json.dumps([{"title":article["title"]}, {"source":article["url"]}],cls=SetEncoder)
r2.set(article["timestamp"], json_data)
articles = get_all_records(r2)
emit('get_okex',{'articles': articles},broadcast=True,namespace='/start')
print("Total Time in seconds okex :", (current_time- dt.datetime.today()).total_seconds())
def process_binance_articles():
current_time = dt.datetime.today()
with app.test_request_context('/'):
data = get_records("https://www.binance.com/bapi/composite/v1/public/cms/article/catalog/list/query?catalogId=49&pageNo=1&pageSize=20", False)
articles_data = data["data"]["articles"]
for article in articles_data:
json_data = json.dumps([{"title":article["title"]}, {"catalogName":article["catalogName"]},{"publishDate":article["publishDate"]}],cls=SetEncoder)
r3.set(article["code"], json_data)
articles = get_all_records(r3)
emit('get_binance',{'articles': articles},broadcast=True,namespace='/start')
print("Total Time in seconds binance :", (current_time- dt.datetime.today()).total_seconds())
def process_medium_articles():
current_time = dt.datetime.today()
with app.test_request_context('/'):
data = get_medium_records("https://medium.com/@coinbaseblog")
for article in data:
title = article.find("a", class_="eh bw")
if title is not None:
title = title.contents[0]
else:
title = "None"
author = article.find("em", class_="jo")
if author is not None:
author = author.contents[0]
else:
author = "None"
image = article.find("img", class_="v gx ir")
if image is not None:
image = image['src']
else:
image = "None"
likes = article.find("button", class_="eh ei bz ca cb cc cd ce cf bl ej ek cg el em")
if likes is not None:
likes = likes.contents[0]
else:
likes = "None"
comments = article.find("span", class_="lj li")
if comments is not None:
comments = comments.contents[0]
else:
comments = "None"
json_data = json.dumps([{"title":title}, {"author":author},{"image":image},{"likes":likes},{"comments":comments}],cls=SetEncoder)
r4.set(title, json_data)
articles = get_all_records(r4)
emit('get_medium',{'articles': articles},broadcast=True,namespace='/start')
print("Total Time in seconds medium :", (current_time- dt.datetime.today()).total_seconds())
def get_records(url,huobi):
url = f'{url}'
headers = {
'accept': '*/*',
'accept-encoding': 'gzip,deflate,br',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'no-cache',
'cookie': 'machine_cookie=5356356749135',
'pragma': 'no-cache',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
}
if huobi:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
data = urlopen(req)
json_data = json.loads(data.read().decode(data.info().get_param('charset') or 'utf-8'))
return json_data
else:
data = requests.get(url, headers=headers,auth=('[username]','[password]'), verify=False).json()
return data
def get_medium_records(url):
response = requests.get(url, allow_redirects=True)
page = response.content
soup = BeautifulSoup(page, 'html.parser')
articles = soup.find_all("div", class_="ap aq ar as at fz av v")
return articles
def get_all_records(r):
articles=[]
keys = r.keys('*')
for key in keys:
vals = None
type = r.type(key)
if type == "string":
vals = r.get(key)
if type == "hash":
vals = r.hgetall(key)
if type == "zset":
vals = r.zrange(key, 0, -1)
if type == "list":
vals = r.lrange(key, 0, -1)
if type == "set":
vals = r.smembers(key)
article_dict = {key:vals}
articles.append(article_dict)
return articles
if __name__ == "__main__":
socket_.run(app, debug=True) | 5,953 | 0 | 298 |
148c10f070ecea9d40ab90099bfceb42aca290c6 | 241 | py | Python | pysif/__init__.py | houkensjtu/pysif | f5abb3e308394c97d5ace0b90e27fd044a4f77d6 | [
"MIT"
] | 4 | 2019-07-08T18:04:27.000Z | 2021-09-29T14:39:20.000Z | pysif/__init__.py | houkensjtu/pysif | f5abb3e308394c97d5ace0b90e27fd044a4f77d6 | [
"MIT"
] | 1 | 2020-03-28T03:41:01.000Z | 2020-03-28T17:28:33.000Z | pysif/__init__.py | houkensjtu/pysif | f5abb3e308394c97d5ace0b90e27fd044a4f77d6 | [
"MIT"
] | 2 | 2019-07-08T18:04:28.000Z | 2020-02-17T20:19:40.000Z | '''Navier-Stokes and Euler equations solver implemented in Python
Triple periodic spectral method in space
4th order Runge–Kutta method in time
Auther: Jia Cheng Hu (University of Waterloo, Canada)
'''
from pysif.spectral_method import *
| 24.1 | 65 | 0.792531 | '''Navier-Stokes and Euler equations solver implemented in Python
Triple periodic spectral method in space
4th order Runge–Kutta method in time
Auther: Jia Cheng Hu (University of Waterloo, Canada)
'''
from pysif.spectral_method import *
| 0 | 0 | 0 |
a581f46abd7ca6c7bbab42c2036ec74a1cb77d69 | 543 | wsgi | Python | flaskapp.wsgi | Anonymous78/Registration-System | 967c8a1c28f5c344663c5b27e0087a70c6e9f193 | [
"MIT"
] | null | null | null | flaskapp.wsgi | Anonymous78/Registration-System | 967c8a1c28f5c344663c5b27e0087a70c6e9f193 | [
"MIT"
] | null | null | null | flaskapp.wsgi | Anonymous78/Registration-System | 967c8a1c28f5c344663c5b27e0087a70c6e9f193 | [
"MIT"
] | null | null | null | import os
import sys
import site
# Add virtualenv site packages
site.addsitedir('/home/attendance/.virtualenvs/attendance/lib/python3.5/site-packages')
sys.path.insert(0, '/var/www/html/class-list')
# Fired up virtualenv before include application
activate_this = '/home/attendance/.virtualenvs/attendance/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from app import create_app
config_name = os.getenv('FLASK_CONFIG')
if not config_name:
config_name = 'development'
application = create_app(config_name)
| 24.681818 | 87 | 0.799263 | import os
import sys
import site
# Add virtualenv site packages
site.addsitedir('/home/attendance/.virtualenvs/attendance/lib/python3.5/site-packages')
sys.path.insert(0, '/var/www/html/class-list')
# Fired up virtualenv before include application
activate_this = '/home/attendance/.virtualenvs/attendance/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
from app import create_app
config_name = os.getenv('FLASK_CONFIG')
if not config_name:
config_name = 'development'
application = create_app(config_name)
| 0 | 0 | 0 |
823d0480ca28ef6abef98534fd95dead95023179 | 19,024 | py | Python | mediaMicroservices/gen-py/media_service/ReviewStorageService.py | rodrigo-bruno/DeathStarBench | c9ce09aaf7c1298a7c88efacd1010a71db0fa59d | [
"Apache-2.0"
] | 364 | 2019-04-28T01:45:37.000Z | 2022-03-31T15:08:03.000Z | mediaMicroservices/gen-py/media_service/ReviewStorageService.py | rodrigo-bruno/DeathStarBench | c9ce09aaf7c1298a7c88efacd1010a71db0fa59d | [
"Apache-2.0"
] | 111 | 2019-04-15T11:08:49.000Z | 2022-03-31T17:39:16.000Z | mediaMicroservices/gen-py/media_service/ReviewStorageService.py | rodrigo-bruno/DeathStarBench | c9ce09aaf7c1298a7c88efacd1010a71db0fa59d | [
"Apache-2.0"
] | 229 | 2019-05-14T08:55:57.000Z | 2022-03-31T03:14:55.000Z | #
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
# HELPER FUNCTIONS AND STRUCTURES
class StoreReview_args(object):
"""
Attributes:
- req_id
- review
- carrier
"""
all_structs.append(StoreReview_args)
StoreReview_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.STRUCT, 'review', [Review, None], None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class StoreReview_result(object):
"""
Attributes:
- se
"""
all_structs.append(StoreReview_result)
StoreReview_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
class ReadReviews_args(object):
"""
Attributes:
- req_id
- review_ids
- carrier
"""
all_structs.append(ReadReviews_args)
ReadReviews_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.LIST, 'review_ids', (TType.I64, None, False), None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class ReadReviews_result(object):
"""
Attributes:
- success
- se
"""
all_structs.append(ReadReviews_result)
ReadReviews_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [Review, None], False), None, ), # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| 34.589091 | 134 | 0.576693 | #
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def StoreReview(self, req_id, review, carrier):
"""
Parameters:
- req_id
- review
- carrier
"""
pass
def ReadReviews(self, req_id, review_ids, carrier):
"""
Parameters:
- req_id
- review_ids
- carrier
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def StoreReview(self, req_id, review, carrier):
"""
Parameters:
- req_id
- review
- carrier
"""
self.send_StoreReview(req_id, review, carrier)
self.recv_StoreReview()
def send_StoreReview(self, req_id, review, carrier):
self._oprot.writeMessageBegin('StoreReview', TMessageType.CALL, self._seqid)
args = StoreReview_args()
args.req_id = req_id
args.review = review
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_StoreReview(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = StoreReview_result()
result.read(iprot)
iprot.readMessageEnd()
if result.se is not None:
raise result.se
return
def ReadReviews(self, req_id, review_ids, carrier):
"""
Parameters:
- req_id
- review_ids
- carrier
"""
self.send_ReadReviews(req_id, review_ids, carrier)
return self.recv_ReadReviews()
def send_ReadReviews(self, req_id, review_ids, carrier):
self._oprot.writeMessageBegin('ReadReviews', TMessageType.CALL, self._seqid)
args = ReadReviews_args()
args.req_id = req_id
args.review_ids = review_ids
args.carrier = carrier
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ReadReviews(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ReadReviews_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.se is not None:
raise result.se
raise TApplicationException(TApplicationException.MISSING_RESULT, "ReadReviews failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["StoreReview"] = Processor.process_StoreReview
self._processMap["ReadReviews"] = Processor.process_ReadReviews
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_StoreReview(self, seqid, iprot, oprot):
args = StoreReview_args()
args.read(iprot)
iprot.readMessageEnd()
result = StoreReview_result()
try:
self._handler.StoreReview(args.req_id, args.review, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("StoreReview", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ReadReviews(self, seqid, iprot, oprot):
args = ReadReviews_args()
args.read(iprot)
iprot.readMessageEnd()
result = ReadReviews_result()
try:
result.success = self._handler.ReadReviews(args.req_id, args.review_ids, args.carrier)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except ServiceException as se:
msg_type = TMessageType.REPLY
result.se = se
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ReadReviews", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class StoreReview_args(object):
"""
Attributes:
- req_id
- review
- carrier
"""
def __init__(self, req_id=None, review=None, carrier=None,):
self.req_id = req_id
self.review = review
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.review = Review()
self.review.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.carrier = {}
(_ktype178, _vtype179, _size177) = iprot.readMapBegin()
for _i181 in range(_size177):
_key182 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val183 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key182] = _val183
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('StoreReview_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.review is not None:
oprot.writeFieldBegin('review', TType.STRUCT, 2)
self.review.write(oprot)
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter184, viter185 in self.carrier.items():
oprot.writeString(kiter184.encode('utf-8') if sys.version_info[0] == 2 else kiter184)
oprot.writeString(viter185.encode('utf-8') if sys.version_info[0] == 2 else viter185)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(StoreReview_args)
StoreReview_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.STRUCT, 'review', [Review, None], None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class StoreReview_result(object):
"""
Attributes:
- se
"""
def __init__(self, se=None,):
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('StoreReview_result')
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(StoreReview_result)
StoreReview_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
class ReadReviews_args(object):
"""
Attributes:
- req_id
- review_ids
- carrier
"""
def __init__(self, req_id=None, review_ids=None, carrier=None,):
self.req_id = req_id
self.review_ids = review_ids
self.carrier = carrier
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.review_ids = []
(_etype189, _size186) = iprot.readListBegin()
for _i190 in range(_size186):
_elem191 = iprot.readI64()
self.review_ids.append(_elem191)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.carrier = {}
(_ktype193, _vtype194, _size192) = iprot.readMapBegin()
for _i196 in range(_size192):
_key197 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val198 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.carrier[_key197] = _val198
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadReviews_args')
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 1)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.review_ids is not None:
oprot.writeFieldBegin('review_ids', TType.LIST, 2)
oprot.writeListBegin(TType.I64, len(self.review_ids))
for iter199 in self.review_ids:
oprot.writeI64(iter199)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.carrier is not None:
oprot.writeFieldBegin('carrier', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.carrier))
for kiter200, viter201 in self.carrier.items():
oprot.writeString(kiter200.encode('utf-8') if sys.version_info[0] == 2 else kiter200)
oprot.writeString(viter201.encode('utf-8') if sys.version_info[0] == 2 else viter201)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadReviews_args)
ReadReviews_args.thrift_spec = (
None, # 0
(1, TType.I64, 'req_id', None, None, ), # 1
(2, TType.LIST, 'review_ids', (TType.I64, None, False), None, ), # 2
(3, TType.MAP, 'carrier', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3
)
class ReadReviews_result(object):
"""
Attributes:
- success
- se
"""
def __init__(self, success=None, se=None,):
self.success = success
self.se = se
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype205, _size202) = iprot.readListBegin()
for _i206 in range(_size202):
_elem207 = Review()
_elem207.read(iprot)
self.success.append(_elem207)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.se = ServiceException()
self.se.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('ReadReviews_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter208 in self.success:
iter208.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.se is not None:
oprot.writeFieldBegin('se', TType.STRUCT, 1)
self.se.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(ReadReviews_result)
ReadReviews_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, [Review, None], False), None, ), # 0
(1, TType.STRUCT, 'se', [ServiceException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| 15,166 | 984 | 932 |
27b627105390986b0814b395a31ddd3fd665191f | 16,245 | py | Python | Project 3 - Neural Network and Tensor Flow/main.py | JruvikaBhimani/CSE-547---Introduction-to-Machine-Learning | 3a8196cf9372cfbecbb043fc7ea94bcce665b097 | [
"Apache-2.0"
] | null | null | null | Project 3 - Neural Network and Tensor Flow/main.py | JruvikaBhimani/CSE-547---Introduction-to-Machine-Learning | 3a8196cf9372cfbecbb043fc7ea94bcce665b097 | [
"Apache-2.0"
] | null | null | null | Project 3 - Neural Network and Tensor Flow/main.py | JruvikaBhimani/CSE-547---Introduction-to-Machine-Learning | 3a8196cf9372cfbecbb043fc7ea94bcce665b097 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import cPickle
import numpy as np
import math
import random
import os as os
from scipy import misc
from skimage import color
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#import matplotlib.pyplot as plot1
#def graph_plot(x, y, xlab, ylab):
#plot1.figure(num = 1, figsize =(15,10), dpi = 72)
#plot1.subplot(321)
#plot1.scatter(CS_Score,Res_OH)
# plot1.plot(x, y, 'g^')
# plot1.xlabel(xlab)
# plot1.ylabel(ylab)
# plot1.show()
if __name__ == "__main__":
print ("UBitName = jruvikam")
print ("personNumber = 50207613")
pickleFile = open('mnist.pkl','rb')
train_set_MNIST, valid_set_MNIST, test_set_MNIST = cPickle.load(pickleFile)
train_x_MNIST = train_set_MNIST[0]
train_target_MNIST = train_set_MNIST[1]
train_t_MNIST = oneHotEncoding(train_target_MNIST)
valid_x_MNIST = valid_set_MNIST[0]
valid_target_MNIST = valid_set_MNIST[1]
test_x_MNIST = test_set_MNIST[0]
test_target_MNIST = test_set_MNIST[1]
b = 1
# TUNE HYPERPARAMETER ETA
w_logRegress_MNIST = logRegression(train_x_MNIST, train_t_MNIST, b)
yOneHot_validate_MNIST, y_value_validate_MNIST, accuracy_validate_MNIST = logRegressionValidate(valid_x_MNIST, valid_target_MNIST, w_logRegress_MNIST, b)
yOneHot_test_MNIST, y_value_test_MNIST = logRegressionTest(test_x_MNIST, w_logRegress_MNIST, b)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
path = "USPSdata/Numerals/"
count = 0
validate_x_USPS = np.zeros((1,784))
target_set_USPS = np.zeros((1,1))
print (np.shape(validate_x_USPS))
for i in range(10):
new_path = path
new_path = new_path + str(i) + "/"
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
validate_x_USPS = np.insert(validate_x_USPS,len(validate_x_USPS),flat_img,axis=0)
target_set_USPS = np.insert(target_set_USPS,len(target_set_USPS),int(i),axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
validate_x_USPS = np.delete(validate_x_USPS,0,axis=0)
target_set_USPS = np.delete(target_set_USPS,0,axis=0)
yOneHot_validate_USPS, y_value_validate_USPS, accuracy_validate_USPS = logRegressionValidate(validate_x_USPS, target_set_USPS, w_logRegress_MNIST, b)
path = "USPSdata/Test/"
count = 0
test_x_USPS = np.zeros((1,784))
for i in range(10):
new_path = path
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
test_x_USPS = np.insert(test_x_USPS,len(validate_x_USPS),flat_img,axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
test_x_USPS = np.delete(test_x_USPS,0,axis=0)
yOneHot_test_USPS, y_value_test_USPS = logRegressionTest(test_x_USPS, w_logRegress_MNIST, b)
cnn()
print ("accuracy USPS validation:")
print (accuracy_validate_USPS)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
# w1_nn_MNIST, w2_nn_MNIST = neuralnetwork(train_x_MNIST, train_t_MNIST, b)
# yOneHot_nn_MNIST, y_value_nn_MNIST, accuracy_nn_MNIST = neuralnetwork(valid_x_MNIST, valid_target_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_MNIST, y_value_test_nn_MNIST = neuralnetwork(test_x_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_nn_USPS, y_value_nn_USPS, accuracy_nn_USPS = neuralnetwork(validate_x_USPS, target_set_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_USPS, y_value_test_nn_USPS = neuralnetwork(test_x_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
print ("PROGRAM COMPLETED")
| 29.323105 | 157 | 0.566205 | from __future__ import division
import cPickle
import numpy as np
import math
import random
import os as os
from scipy import misc
from skimage import color
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
#import matplotlib.pyplot as plot1
#def graph_plot(x, y, xlab, ylab):
#plot1.figure(num = 1, figsize =(15,10), dpi = 72)
#plot1.subplot(321)
#plot1.scatter(CS_Score,Res_OH)
# plot1.plot(x, y, 'g^')
# plot1.xlabel(xlab)
# plot1.ylabel(ylab)
# plot1.show()
def oneHotEncoding(target):
print ("oneHotEncoding")
print (np.shape(target))
t = np.zeros((len(target),10))
print (np.shape(t))
# print "entering for:"
for i in range(len(target)):
index = target[i]
# print target[i]
# print index
t[i][index] = 1
# print t
return t
def gradientErrorFunction(x, t, y):
print ("gradientErrorFunction:")
print (np.shape(y))
print (np.shape(t))
print (np.shape(x))
temp = y - t
xMat = np.matrix(x)
tempMat = np.matrix(temp)
deltaE = np.dot(tempMat.transpose(),xMat)
print (np.shape(deltaE))
return deltaE
def SGD_w(deltaE, eta, w):
print ("SGD_w:")
print (np.shape(deltaE))
print (np.shape(w))
# print len(deltaE)
# print len(deltaE[0])
# print len(w)
#print len(w[0])
# deltaE = (eta * deltaE)
wnew = w - (eta * deltaE)
# print len(deltaE)
# print len(deltaE[0])
print (np.shape(wnew))
print (len(wnew))
print (len(wnew[0]))
return wnew
def activationfn(x, w, b):
print ("activation:")
a = np.zeros(10)
print (np.shape(w))
print (np.shape(x))
xMat = np.matrix(x)
a = np.dot(w,xMat.transpose())
print (np.shape(a))
return a
def calculate_y(a):
print ("calculate_y:")
print (len(a))
print (np.shape(a))
sum_a = 0
c=max(a)
for i in range(len(a)):
sum_a = sum_a + (math.exp(a[i]-c))
y = np.zeros(len(a))
for i in range(len(a)):
y[i] = (math.exp(a[i]-c))/sum_a
sum_y = sum(y)
print (sum_y)
return y
def hiddenLayerActivation(x, w, b):
print ("hiddenLayerActivation:")
print (np.shape(w))
print (np.shape(x))
print (len(w))
print (np.shape(w[0]))
row,col = np.shape(w)
z = np.zeros(row)
# print "**************************************************"
for i in range(row):
for j in range(col):
# print x[j]
# print w[i][j], i, j
# print "print"
z[i] = z[i] + (w[i][j] * x[j])
z[i] = z[i] + b
#print "##########################################"
return z
def hFunction(z):
hz = np.zeros(len(z))
hdashA = np.zeros(len(z))
for i in range(len(z)):
hz[i] = 1/(1 + math.exp(-z[i]))
hdashA[i] = hz[i] * (1 - hz[i])
return hz,hdashA
def gradientErrorFunctionNNLayer2(y, t, z):
print ("gradientErrorFunctionNNLayer2:")
d = y - t
dk = np.matrix(d)
print (np.shape(dk))
zmat = np.matrix(z)
print (np.shape(zmat))
error_dk = np.dot(dk.transpose(), zmat)
# row, col = np.shape(error)
# error_dk = np.zeros((row,col))
# for i in range(row):
# temp = error[i][:]
# print (temp)
# print np.shape(temp)
# print len(temp[0][:])
#temp1 = temp[0][:]
# print temp1
# for j in range(col):
# error_dk[i][j] = temp1[j]
print (np.shape(error_dk))
print (len(error_dk))
print (len(error_dk[0]))
return dk.transpose(), error_dk
def gradientErrorFunctionNNLayer1(hdashA, w, dk, x):
print ("gradientErrorFunctionNNLayer1:")
print (np.shape(hdashA))
print (np.shape(w))
print (len(w))
print (len(w[0]))
print (np.shape(dk))
dj = np.zeros(len(hdashA))
for j in range(len(dj)):
sum_w = 0
for k in range(len(w)):
sum_w = sum_w + (w[k][j] * dk[k])
dj[j] = hdashA[j] * sum_w
print (np.shape(dj))
xmat = np.matrix(x)
print (np.shape(xmat))
djmat = np.matrix(dj)
print (np.shape(djmat))
error_dj = np.dot(djmat.transpose(), xmat)
print (np.shape(error_dj))
print (len(error_dj))
print (len(error_dj[0]))
return djmat.transpose(), error_dj
def softmax(y):
print ("SoftMax:")
print (np.shape(y))
maximum = -1.0
value = -1
for i in range(len(y)):
if(maximum < y[i]):
maximum = y[i]
value = i
# print value
# print "end softmax"
return value
def logRegression(x, t, b):
print ("logregression:")
print (len(x[0]))
w = np.ones((10,len(x[0])))
eta = 0.01
count = 0
for j in range(5):
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
deltaE = gradientErrorFunction(x[i][:],t[i][:],y)
w = SGD_w(deltaE, eta, w)
count = count + 1
print ("count:")
print (count)
return w
def logRegressionValidate(x, t, w, b):
print ("logRegressionValidate:")
found = 0.0
y_value = np.zeros(len(x))
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
value = softmax(y)
# print t[i]
y_value[i] = value
if(value==t[i]):
found = found + 1.0
print ("found:")
print (found)
accuracy = (found/len(t))*100
print ("accuracy:")
print (accuracy)
return y, y_value, accuracy
def logRegressionTest(x, w, b):
print ("logRegressionTest:")
y_value = np.zeros(len(x))
for i in range(len(x)):
a = activationfn(x[i][:], w, b)
y = calculate_y(a)
value = softmax(y)
# print t[i]
y_value[i] = value
return y, y_value
def neuralnetwork(x, t, b):
print ("neuralnetwork:")
eta = 0.01
# x = np.insert(input_x,0,0,axis =1)
print (np.shape(x))
w1 = np.ones((100,len(x[0])))
print (np.shape(w1))
print (len(w1[0]))
for i in range(len(w1)):
for j in range(len(w1[0])):
w1[i][j] = random.randrange(0,100,1)
w1[i][j] = w1[i][j] / 10000
w2 = np.ones((10,100))
print (np.shape(w2))
for i in range(len(w2)):
for j in range(len(w2[0])):
w2[i][j] = random.randrange(0,100,1)
w2[i][j] = w2[i][j] / 10000
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
dk, error_dk = gradientErrorFunctionNNLayer2(y, t[i], z)
dj, error_dj = gradientErrorFunctionNNLayer1(hdashA, w2, dk, x[i][:])
w1 = SGD_w(error_dj, eta, w1)
w2 = SGD_w(error_dk, eta, w2)
return w1, w2
def neuralnetworkValidate(input_x, t, w1, w2, b):
print ("neuralnetworkValidate:")
# x = np.insert(input_x,0,0,axis =1)
x = np.matrix(input_x)
print (np.shape(x))
found = 0.0
y_value = np.zeros(len(x))
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
value = softmax(y)
y_value[i] = value
if(value==t[i]):
found = found + 1.0
accuracy = (found/len(t))*100
print ("accuracy NN:")
print (accuracy)
return y, y_value, accuracy
def neuralnetworkTest(input_x, w1, w2, b):
print ("neuralnetworkTest:")
# x = np.insert(input_x,0,0,axis =1)
x = np.matrix(input_x)
print (np.shape(x))
y_value = np.zeros(len(x))
for i in range(len(x)):
z = hiddenLayerActivation(x[i][:], w1, b)
#z = np.insert(z,0,0)
hz, hdashA = hFunction(z)
a = hiddenLayerActivation(hz, w2, b)
y = calculate_y(a)
value = softmax(y)
y_value[i] = value
return y, y_value
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def cnn():
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(100)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%1000 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == "__main__":
print ("UBitName = jruvikam")
print ("personNumber = 50207613")
pickleFile = open('mnist.pkl','rb')
train_set_MNIST, valid_set_MNIST, test_set_MNIST = cPickle.load(pickleFile)
train_x_MNIST = train_set_MNIST[0]
train_target_MNIST = train_set_MNIST[1]
train_t_MNIST = oneHotEncoding(train_target_MNIST)
valid_x_MNIST = valid_set_MNIST[0]
valid_target_MNIST = valid_set_MNIST[1]
test_x_MNIST = test_set_MNIST[0]
test_target_MNIST = test_set_MNIST[1]
b = 1
# TUNE HYPERPARAMETER ETA
w_logRegress_MNIST = logRegression(train_x_MNIST, train_t_MNIST, b)
yOneHot_validate_MNIST, y_value_validate_MNIST, accuracy_validate_MNIST = logRegressionValidate(valid_x_MNIST, valid_target_MNIST, w_logRegress_MNIST, b)
yOneHot_test_MNIST, y_value_test_MNIST = logRegressionTest(test_x_MNIST, w_logRegress_MNIST, b)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
path = "USPSdata/Numerals/"
count = 0
validate_x_USPS = np.zeros((1,784))
target_set_USPS = np.zeros((1,1))
print (np.shape(validate_x_USPS))
for i in range(10):
new_path = path
new_path = new_path + str(i) + "/"
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
validate_x_USPS = np.insert(validate_x_USPS,len(validate_x_USPS),flat_img,axis=0)
target_set_USPS = np.insert(target_set_USPS,len(target_set_USPS),int(i),axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
validate_x_USPS = np.delete(validate_x_USPS,0,axis=0)
target_set_USPS = np.delete(target_set_USPS,0,axis=0)
yOneHot_validate_USPS, y_value_validate_USPS, accuracy_validate_USPS = logRegressionValidate(validate_x_USPS, target_set_USPS, w_logRegress_MNIST, b)
path = "USPSdata/Test/"
count = 0
test_x_USPS = np.zeros((1,784))
for i in range(10):
new_path = path
for name in os.listdir(new_path):
final_path = new_path
final_path = final_path + name
# print count
#print final_path
if ".list" not in name:
if (name != "Thumbs.db"):
# if count < 5:
img = misc.imread(final_path)
gray_img = color.rgb2gray(img)
resized_img = misc.imresize(gray_img,(28,28))
# print "resized img:"
# print len(resized_img)
# print np.shape(resized_img)
flat_img = np.ravel(resized_img)
test_x_USPS = np.insert(test_x_USPS,len(validate_x_USPS),flat_img,axis=0)
#print "resized img:"
#print len(flat_img)
#print np.shape(flat_img)
count = count + 1
if((count%1000) == 0):
print (count)
# else:
# break
print ("count:")
print (count)
test_x_USPS = np.delete(test_x_USPS,0,axis=0)
yOneHot_test_USPS, y_value_test_USPS = logRegressionTest(test_x_USPS, w_logRegress_MNIST, b)
cnn()
print ("accuracy USPS validation:")
print (accuracy_validate_USPS)
print ("accuracy MNIST validation:")
print (accuracy_validate_MNIST)
# w1_nn_MNIST, w2_nn_MNIST = neuralnetwork(train_x_MNIST, train_t_MNIST, b)
# yOneHot_nn_MNIST, y_value_nn_MNIST, accuracy_nn_MNIST = neuralnetwork(valid_x_MNIST, valid_target_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_MNIST, y_value_test_nn_MNIST = neuralnetwork(test_x_MNIST, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_nn_USPS, y_value_nn_USPS, accuracy_nn_USPS = neuralnetwork(validate_x_USPS, target_set_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
# yOneHot_test_nn_USPS, y_value_test_nn_USPS = neuralnetwork(test_x_USPS, w1_nn_MNIST, w2_nn_MNIST, b)
print ("PROGRAM COMPLETED")
| 10,197 | 0 | 543 |
beb50e5f1326b0f528a82c90ee9a7f29e090eb04 | 3,602 | py | Python | pyspark/pysparkTJ/pyspark08_df.py | mayi140611/mayiexamples | 221cf9e8916d81198df7355894ec59dc334ae0af | [
"Apache-2.0"
] | null | null | null | pyspark/pysparkTJ/pyspark08_df.py | mayi140611/mayiexamples | 221cf9e8916d81198df7355894ec59dc334ae0af | [
"Apache-2.0"
] | null | null | null | pyspark/pysparkTJ/pyspark08_df.py | mayi140611/mayiexamples | 221cf9e8916d81198df7355894ec59dc334ae0af | [
"Apache-2.0"
] | 2 | 2020-03-09T12:48:07.000Z | 2020-04-19T11:43:22.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: 'Administrator'
@contact:
@time:
"""
#!/usr/bin/python
# encoding: utf-8
# ================ 直接创建DataFrame
from pyspark.sql import SparkSession
from pyspark.sql import Row
spark = SparkSession.builder.appName('test').getOrCreate()
sc = spark.sparkContext
# spark.conf.set("spark.sql.shuffle.partitions", 6)
# ================直接创建==========================
l = [('Ankit',25),('Jalfaizy',22),('saurabh',20),('Bala',26)]
rdd = sc.parallelize(l)
people = rdd.map(lambda x: Row(name=x[0], age=int(x[1])))
# schemaPeople = sqlContext.createDataFrame(people)
schemaPeople = spark.createDataFrame(people)
# ==================从csv读取======================
df = spark.read.format("csv"). \
option("header", "true") \
.load("iris.csv")
df.printSchema()
df.show(10)
df.count()
df.columns
# ===============增加一列(或者替换) withColumn===========
# Column name which we want add /replace.
# Expression on column.
df.withColumn('newWidth',df.SepalWidth * 2).show()
# ==========删除一列 drop=========================
df.drop('Name').show()
#================ 统计信息 describe================
df.describe().show()
df.describe('Name').show() #分类变量
# ===============提取部分列 select==============
df.select('Name','SepalLength').show()
# ==================基本统计功能 distinct count=====
df.select('Name').distinct().count()
# 分组统计 groupby(colname).agg({'col':'fun','col2':'fun2'})
df.groupby('Name').agg({'SepalWidth':'mean','SepalLength':'max'}).show()
# avg(), count(), countDistinct(), first(), kurtosis(),
# max(), mean(), min(), skewness(), stddev(), stddev_pop(),
# stddev_samp(), sum(), sumDistinct(), var_pop(), var_samp() and variance()
# 自定义的汇总方法
import pyspark.sql.functions as fn
df.agg(fn.count('SepalWidth').alias('width_count'),
fn.countDistinct('id').alias('distinct_id_count')).collect()
#====================数据集拆成两部分 randomSplit ===========
trainDF, testDF = df.randomSplit([0.6, 0.4])
# ================采样数据 sample===========
# withReplacement = True or False to select a observation with or without replacement.
# fraction = x, where x = .5 shows that we want to have 50% data in sample DataFrame.
# seed for reproduce the result
sdf = df.sample(False,0.2,100)
#查看两个数据集在类别上的差异 subtract,确保训练数据集覆盖了所有分类
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name'))
diff_in_train_test.distinct().count()
# ================交叉表 crosstab=============
df.crosstab('Name','SepalLength').show()
# ===============sql 功能 ==============
df.registerAsTable('train_table')
spark.sql("").show()
#================== 综合案例,+ udf================
# 测试数据集中有些类别在训练集中是不存在的,把这些数据集应该从测试集中删除
trainDF,testDF = df.randomSplit([0.01,0.98])
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name')).distinct().show()
首先找到这些类,整理到一个列表
not_exist_cats = testDF.select('Name').subtract(trainDF.select('Name')).distinct().rdd.map(lambda x :x[0]).collect()
定义一个方法,用于检测
创建udf,udf函数需要两个参数:
# Function
# Return type (in my case StringType())
from pyspark.sql.types import StringType
from pyspark.sql.functions import udf
check = udf(should_remove,StringType())
testDF2 = testDF.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
testDF2.show()
# ==================过滤行 filter ==================
import pyspark.sql.functions as fun
estDF2 = df.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
df.withColumn('New_name',check(testDF['Name'])).filter(fun.col('Name')<>-1).show() | 25.728571 | 116 | 0.616047 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: 'Administrator'
@contact:
@time:
"""
#!/usr/bin/python
# encoding: utf-8
# ================ 直接创建DataFrame
from pyspark.sql import SparkSession
from pyspark.sql import Row
spark = SparkSession.builder.appName('test').getOrCreate()
sc = spark.sparkContext
# spark.conf.set("spark.sql.shuffle.partitions", 6)
# ================直接创建==========================
l = [('Ankit',25),('Jalfaizy',22),('saurabh',20),('Bala',26)]
rdd = sc.parallelize(l)
people = rdd.map(lambda x: Row(name=x[0], age=int(x[1])))
# schemaPeople = sqlContext.createDataFrame(people)
schemaPeople = spark.createDataFrame(people)
# ==================从csv读取======================
df = spark.read.format("csv"). \
option("header", "true") \
.load("iris.csv")
df.printSchema()
df.show(10)
df.count()
df.columns
# ===============增加一列(或者替换) withColumn===========
# Column name which we want add /replace.
# Expression on column.
df.withColumn('newWidth',df.SepalWidth * 2).show()
# ==========删除一列 drop=========================
df.drop('Name').show()
#================ 统计信息 describe================
df.describe().show()
df.describe('Name').show() #分类变量
# ===============提取部分列 select==============
df.select('Name','SepalLength').show()
# ==================基本统计功能 distinct count=====
df.select('Name').distinct().count()
# 分组统计 groupby(colname).agg({'col':'fun','col2':'fun2'})
df.groupby('Name').agg({'SepalWidth':'mean','SepalLength':'max'}).show()
# avg(), count(), countDistinct(), first(), kurtosis(),
# max(), mean(), min(), skewness(), stddev(), stddev_pop(),
# stddev_samp(), sum(), sumDistinct(), var_pop(), var_samp() and variance()
# 自定义的汇总方法
import pyspark.sql.functions as fn
df.agg(fn.count('SepalWidth').alias('width_count'),
fn.countDistinct('id').alias('distinct_id_count')).collect()
#====================数据集拆成两部分 randomSplit ===========
trainDF, testDF = df.randomSplit([0.6, 0.4])
# ================采样数据 sample===========
# withReplacement = True or False to select a observation with or without replacement.
# fraction = x, where x = .5 shows that we want to have 50% data in sample DataFrame.
# seed for reproduce the result
sdf = df.sample(False,0.2,100)
#查看两个数据集在类别上的差异 subtract,确保训练数据集覆盖了所有分类
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name'))
diff_in_train_test.distinct().count()
# ================交叉表 crosstab=============
df.crosstab('Name','SepalLength').show()
# ===============sql 功能 ==============
df.registerAsTable('train_table')
spark.sql("").show()
#================== 综合案例,+ udf================
# 测试数据集中有些类别在训练集中是不存在的,把这些数据集应该从测试集中删除
trainDF,testDF = df.randomSplit([0.01,0.98])
diff_in_train_test = testDF.select('Name').subtract(trainDF.select('Name')).distinct().show()
首先找到这些类,整理到一个列表
not_exist_cats = testDF.select('Name').subtract(trainDF.select('Name')).distinct().rdd.map(lambda x :x[0]).collect()
定义一个方法,用于检测
def should_remove(x):
if x in not_exist_cats:
return -1
else :
return x
创建udf,udf函数需要两个参数:
# Function
# Return type (in my case StringType())
from pyspark.sql.types import StringType
from pyspark.sql.functions import udf
check = udf(should_remove,StringType())
testDF2 = testDF.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
testDF2.show()
# ==================过滤行 filter ==================
import pyspark.sql.functions as fun
estDF2 = df.withColumn('New_name',check(testDF['Name'])).filter('New_name <> -1')
df.withColumn('New_name',check(testDF['Name'])).filter(fun.col('Name')<>-1).show() | 74 | 0 | 22 |
08ca175fdea8c1373cfc1bb4d1565ec2bb58c4d4 | 193 | py | Python | resources/routes.py | rezaabdullah/Cramstack | 066b185c612185a6b942197355adbe42c88a170a | [
"MIT"
] | null | null | null | resources/routes.py | rezaabdullah/Cramstack | 066b185c612185a6b942197355adbe42c88a170a | [
"MIT"
] | null | null | null | resources/routes.py | rezaabdullah/Cramstack | 066b185c612185a6b942197355adbe42c88a170a | [
"MIT"
] | null | null | null | from .pgcb import Pgcb, PgcbHome, PgcbFilter | 32.166667 | 44 | 0.725389 | from .pgcb import Pgcb, PgcbHome, PgcbFilter
def initialize_routes(api):
api.add_resource(PgcbHome, "/")
api.add_resource(Pgcb, "/show_all")
api.add_resource(PgcbFilter, "/filter") | 126 | 0 | 23 |
9f0709244ffaa202439950676c6a42f9bf286dfd | 29,567 | py | Python | src/analyze.py | quanxianwang/wr-graph | ef6d705161b41f85f5ea8102e653e14b66b77d7c | [
"Intel"
] | 1 | 2016-02-14T06:04:44.000Z | 2016-02-14T06:04:44.000Z | src/analyze.py | quanxianwang/wr-graph | ef6d705161b41f85f5ea8102e653e14b66b77d7c | [
"Intel"
] | null | null | null | src/analyze.py | quanxianwang/wr-graph | ef6d705161b41f85f5ea8102e653e14b66b77d7c | [
"Intel"
] | null | null | null | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# analyze.py
#
# Copyright © 214 Intel Corporation
#
# Author: Quanxian Wang <quanxian.wang@intel.com>
# Zhang Xiaoyan <zhang.xiaoyanx@intel.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import re
import math
import xml.etree.ElementTree as ET
import collections
import copy
import sys
import os
from cairographic import Graphic
#Define macro
START_TIME = 999999999
TOTAL_INTERVAL = 0
GROUP_NUM = 3
X_AXIS_INTERVAL = 120
MAX_LEN = 1000000
class Analyzer:
"""
Profile Analyzer
It's used to read from log file and visualize the record.
"""
def draw_smooth(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw frame summary graph
Args:
show_start: the start time to show
show_end: the end time to show
output_dir: the output directory of fps.txt
Input:self.smooth_events, self.comm_events
Output:Graphic object
"""
if len(self.smooth_events.keys()) == 0:
return None
st_dic = collections.OrderedDict()
for cid in self.client_id_list:
if 'client'+'_'+cid not in self.client_activate \
or self.client_activate['client'+'_'+cid] != True:
continue
st_dic[cid] = collections.OrderedDict()
data = []
color_index = 0
colors = []
x_labels = []
sum_total = 0
se_len = len(self.smooth_event_list)
for i in range(se_len):
total = 0
ename = self.smooth_event_list[i]
data_len = len(self.smooth_events[cid][ename])
for number in self.smooth_events[cid][ename]:
total += number
st_dic[cid][ename] = total/data_len
if i < se_len - 1:
cname = 'comm' + str(i)
comm_val = self.comm_events[cid][cname]
if comm_val > 0.1:
st_dic[cid][cname] = comm_val
# get sum_total
for ename in st_dic[cid]:
sum_total += st_dic[cid][ename]
sum_total = float("{0:.2f}".format(sum_total))
fps = float("{0:.2f}".format(1000 / sum_total))
str1 = 'total_time = ' + str(sum_total) + 'ms'\
+ '\n' + 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
for ename in st_dic[cid].keys():
x_labels.append(ename)
for i in range(len(x_labels)):
color_index = color_index % (len(self.color_table))
colors.append(self.color_table.values()[color_index])
color_index += 1
data = st_dic[cid]
smooth_chart = Graphic(name, data, width,
height, x_labels=x_labels,
axis=True, grid=True,
background="white", series_colors=colors)
smooth_chart.render()
smooth_chart.render_smooth()
return smooth_chart
def draw_fps(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.time_dic
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
time_list = []
x_labels = []
time_list = self.time_dic[cid]
FPS = collections.OrderedDict()
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for i in range(len(time_list)):
if time_list[i].start < rel_start:
continue
if time_list[i].end > rel_end:
break
if time_list[i].end == -1:
FPS[time_list[i].start] = -1
continue
# change ms value to FPS value
FPS[time_list[i].start] = 1000/time_list[i].end
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def draw_fps_media(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.new_events
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
x_labels = []
FPS = collections.OrderedDict()
offset = 0
time_old = 0
sum_total = 0
fps_len = len(self.fps_event_list)
event_len = len(self.new_events[cid])
event_name = self.fps_event_list[0]
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for time in range(1000, int(rel_end) + 1000)[::1000]:
count = 0
for i in range(offset, event_len)[::fps_len]:
event1 = self.new_events[cid][i]
if event1[0] == event_name and time_old <= event1[1] < time:
if (i + fps_len - 1) < event_len:
event2 = self.new_events[cid][i + fps_len - 1]
if event2[2] < time:
count += 1
else:
break
offset = i
time_old = time
if count >= 1:
sum_total += count
FPS[time] = count
if sum_total > 0:
fps = int("{0:.0f}".format(sum_total / len(FPS)))
str1 = 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def calculate_fps(self):
"""
Input:self.time_dic = {event_name:{id:time}}
Output:self.time_dic
Data Formate:
"""
for cid in self.client_id_list:
time_list = []
number = 0
index = 0
offset = 0
rate = 0
event1 = self.fps_event_list[0]
event2 = self.fps_event_list[-1]
seg_len = len(self.seg_point_time)
for event in self.new_events[cid]:
if event[0] == event1:
start = event[1]
continue
if event[0] == event2:
end = event[1]
itv = interval()
itv.start = start
itv.end = end - start
number += 1
if seg_len > 0 and seg_len > index:
seg_time = self.seg_point_time[index]
if start >= seg_time:
"""
Before insert segment point, sample the time data
"""
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv2 = interval()
itv2.start = seg_time
itv2.end = -1
index += 1
time_list.append(itv2)
offset = len(time_list)
time_list.append(itv)
if seg_len == 0:
time_list.sort(key=lambda e:e.end)
self.sample_data(time_list, 0, len(time_list))
time_list.sort(key=lambda e:e.start)
if seg_len > 0 and self.seg_point_time[-1] not in [e.start for e in time_list]:
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv = interval()
itv.start = self.seg_point_time[-1]
itv.end = -1
time_list.append(itv)
self.time_dic[cid] = time_list
def parse_log_file(self):
"""
parse log file.
Return:self.events_dic
"""
color_index = 0
for debug_file in self.log_files:
with open(debug_file) as inf:
for line in inf:
# Find the match
match = self.idregex.match(line)
if match is not None:
self.process_id(match)
continue
match = self.pregex.match(line)
if match is not None:
self.process_point(match)
continue
match = self.sregex.match(line)
if match is not None:
self.process_timestr(match, True)
continue
match = self.eregex.match(line)
if match is not None:
self.process_timestr(match, False)
continue
def parse_config_file(self, configfile, logfile):
"""
parse config xml file, it shows how to parse log file.
parse log file then according to the xml instruction.
"""
if configfile == None:
configfile = '../config/config.xml'
if not os.path.exists(configfile):
return
self.root = ET.parse(configfile).getroot()
config_tags = {"segmentation_point":("point", []),
"event_item":("event", []),
"fps_item":("fps", []),
"smooth_item":("smooth", []),
"sample_rate":("rate", []),
"action_type":("type", []),
"profile":("file", [])}
for key in config_tags.keys():
debug = self.root.find(key)
if debug is None:
continue
subitems = debug.getchildren()
for item in subitems:
if item.tag == config_tags[key][0]:
config_tags[key][1].append(item.text)
# convert config to global values
if len(config_tags["segmentation_point"][1]) > 0:
self.seg_point = config_tags["segmentation_point"][1][0]
self.event_list.extend(config_tags["event_item"][1])
self.fps_event_list.extend(config_tags["fps_item"][1])
self.smooth_event_list.extend(config_tags["smooth_item"][1])
if len(config_tags["sample_rate"][1]) == 0:
self.sample_rate = 0
else:
self.sample_rate = config_tags["sample_rate"][1][0]
if logfile != None:
self.log_files.append(logfile)
else:
self.log_files.extend(config_tags["profile"][1])
if len(config_tags["action_type"][1]) != 0:
self.action_type = config_tags["action_type"][1][0]
def get_smooth_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the smooth data(self.smooth_events)
Input:self.events_dic
Return:self.smooth_events
Data Format:self.smooth_events = {event_name:{client_id:time}}
"""
event_len = MAX_LEN
for cid in self.client_id_list:
self.smooth_events[cid] = {}
for event in self.new_events[cid]:
name = event[0]
number = event[2] - event[1]
if name not in self.smooth_events[cid].keys():
self.smooth_events[cid][name] = []
self.smooth_events[cid][name].append(number)
# merge the data based on the sample rate
for name in self.smooth_events[cid].keys():
self.smooth_events[cid][name].sort()
self.sample_data(self.smooth_events[cid][name], 0, \
len(self.smooth_events[cid][name]))
def get_comm_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the communication data(self.comm_events)
Input:self.events_dic
Return:self.comm_events
Data Format:self.comm_events = {client_id:{event_name:time}}
"""
for cid in self.client_id_list:
self.comm_events[cid] = collections.OrderedDict()
total = 0
comm_time = 0
comm_len = 0
for i in range(0, len(self.smooth_event_list) - 1):
fname = self.smooth_event_list[i]
sname = self.smooth_event_list[i+1]
fst_end = [e[2] for e in self.new_events[cid] \
if e[0] == fname]
sec_start = [e[1] for e in self.new_events[cid] \
if e[0] == sname]
comm_list = []
if len(fst_end) == 0 or len(sec_start) == 0:
print 'smooth invalid data!'
sys.exit(-1)
comm_len = len(fst_end) > len(sec_start) and \
len(sec_start) or len(fst_end)
for j in range(comm_len):
number = sec_start[j] - fst_end[j]
comm_list.append(number)
comm_list.sort()
self.sample_data(comm_list, 0, len(comm_list))
for number in comm_list:
total += number
if len(comm_list) > 0:
comm_time = total / len(comm_list)
self.comm_events[cid]['comm' + str(i)] = comm_time
def form_new_dic(self):
"""
Form new event dictionary
"""
for cid in self.client_id_list:
self.new_events[cid] = []
for cid in self.client_id_list:
for i in range(len(self.events_dic[cid])):
event = self.events_dic[cid][i]
if event['start'] == True:
new_event = (event['name'], event['time'], -1)
self.new_events[cid].append(new_event)
continue
if event['start'] == False:
# find the last event which end is -1
event_len = len(self.new_events[cid])
if event_len == 0:
continue
i = 1
while i < (event_len - 1):
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] != -1:
break
i += 1
while i > 0:
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] == -1:
new_event = (e1[0], e1[1], event['time'])
del self.new_events[cid][-i]
self.new_events[cid].append(new_event)
i -= 1
# sort self.new_events
self.new_events[cid].sort(key=lambda e:e[1])
def build_complete_dic(self):
"""
Form a complete event dictionary
"""
elen = len(self.event_list)
for cid in self.client_id_list:
ecount = len(self.new_events[cid])
j = 0
index = 0
while j < ecount:
if self.new_events[cid][j][0] == self.event_list[index]:
index += 1
index = index % elen
j += 1
else:
del self.new_events[cid][j]
ecount -= 1
for cid in self.new_events.keys():
if len(self.new_events[cid]) < elen:
del self.new_events[cid]
index = self.client_id_list.index(cid)
del self.client_id_list[index]
continue
for i in range(len(self.new_events[cid])):
event = self.new_events[cid][i]
if event[2] == -1:
del self.new_events[cid][i:]
break
def get_valid_data(self):
"""
Note:according to the first event in
self.event_list(like 'client', self.event_list
according to config.xml, the order of the list is
the order of the events), rule out the error data.
Input:original data:self.events_dic
Return:valid data:self.events_dic
data format:self.events_dic = {id:[event]}
event = {'name':event_name, 'start':start_time, 'end':end_time}
"""
self.clean_up()
self.merge_server()
self.form_new_dic()
# build a complate event dic
self.build_complete_dic()
self.init_client_activate()
self.get_startend_time()
def get_startend_time(self):
"""
Note:get the start time of log files.
Input:self.events_dic
Output:self.start_time
"""
for cid in self.client_id_list:
if len(self.new_events[cid]) <= 0:
continue
start_time = self.new_events[cid][0][1]
end_time = self.new_events[cid][-1][2]
if self.start_time > start_time:
self.start_time = start_time
if self.end_time < end_time:
self.end_time = end_time
for time in self.seg_point_time:
if time < self.start_time:
self.start_time = time
for time in self.seg_point_time:
if time > self.end_time:
self.end_time = time
self.total_interval = self.end_time
def update2rel(self):
"""
all event time is decreased by start time
"""
for cid in self.client_id_list:
time_list = self.new_events[cid]
for i in range(len(time_list)):
event = time_list[i]
event_new = (event[0], event[1] - self.start_time, \
event[2] - self.start_time)
time_list[i] = event_new
for i in range(len(self.seg_point_time)):
self.seg_point_time[i] -= self.start_time
def init(self, configfile, logfile):
"""initialize start time and parse config file"""
self.parse_config_file(configfile, logfile)
self.parse_log_file()
if len(self.client_id_list) == 0:
# self.client_id_list.append('0')
self.client_id_list.extend(self.events_dic.keys())
# filer the all data to be valid
if len(self.events_dic.keys()) == 0:
print 'logfile do not have valid data!'
sys.exit(-1)
self.get_valid_data()
self.update2rel()
if len(self.smooth_event_list) > 0:
self.get_smooth_time()
self.get_comm_time()
if len(self.fps_event_list) != 0:
self.time_dic = {}
self.calculate_fps()
| 36.278528 | 124 | 0.509893 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# analyze.py
#
# Copyright © 214 Intel Corporation
#
# Author: Quanxian Wang <quanxian.wang@intel.com>
# Zhang Xiaoyan <zhang.xiaoyanx@intel.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import re
import math
import xml.etree.ElementTree as ET
import collections
import copy
import sys
import os
from cairographic import Graphic
#Define macro
START_TIME = 999999999
TOTAL_INTERVAL = 0
GROUP_NUM = 3
X_AXIS_INTERVAL = 120
MAX_LEN = 1000000
class interval:
def __init__(self):
self.start = -1
self.end = -1
def __repr__(self):
return repr(self.start)
class Analyzer:
"""
Profile Analyzer
It's used to read from log file and visualize the record.
"""
def __init__(self):
# log file's start time, will be changed (absolute real world time)
self.start_time = START_TIME
# total interval
self.total_interval = TOTAL_INTERVAL
# log file's end time
self.end_time = TOTAL_INTERVAL
# predefined match pattern
self.pregex = re.compile('\[\ *(?P<hour>[0-9]+):(?P<min>[0-9]+):(?P<sec>[0-9]+)\.(?P<msec>[0-9]+)\] perf_point:' + \
'(?P<name>.*)')
self.sregex = re.compile('\[\ *(?P<hour>[0-9]+):(?P<min>[0-9]+):(?P<sec>[0-9]+)\.(?P<msec>[0-9]+)\] perf_start:' + \
'(?P<name>.*)')
self.eregex = re.compile('\[\ *(?P<hour>[0-9]+):(?P<min>[0-9]+):(?P<sec>[0-9]+)\.(?P<msec>[0-9]+)\] perf_end:' + \
'(?P<name>.*)')
self.idregex = re.compile('\[\ *(?P<hour>[0-9]+):(?P<min>[0-9]+):(?P<sec>[0-9]+)\.(?P<msec>[0-9]+)\] perf_id:' + \
'(?P<name>.*)')
# dic of events data generate from log file analysis
self.events_dic = {}
# dic of valid events data generate from self.events_dic
self.new_events = {}
# dic of client's activate(accordng to client's activate to draw graph)
self.client_activate = {}
# dic of different data used to represent different functions
# the amount of time of each event in order to draw summary frame chart
self.smooth_events = collections.OrderedDict()
# the communication time of between each eventi in order to draw summary frame chart.
self.comm_events = collections.OrderedDict()
# the whole time of each cycle in order to draw fps chart.
self.time_dic = collections.OrderedDict()
# list of all event
self.event_list = []
# event list to calculate fps
self.fps_event_list = []
# event list to calculate frame summary
self.smooth_event_list = []
# client_id list generate from log file
self.client_id_list = []
# log files list
self.log_files = []
# happened events
self.client_color = []
# segmentation point
self.seg_point = None
# time list of segmentation point
self.seg_point_time = []
# sample rate
self.sample_rate = None
self.action_type = None
# predefined color hexcode list
self.color_table = {"blue": (0.0, 0.0, 1.0),
"cyan": (0.0, 1.0, 1.0),
"magenta": (1.0, 0.0, 1.0),
"orange": (1.0, 0.5, 0.0),
"maroon": (0.5, 0.0, 0.0),
"purple": (1.0, 0.2, 1.0),
"green": (0.0, 0.5, 0.0),
"red": (1.0, 0.0, 0.0),
"lime": (0.0, 1.0, 0.0),
"navy": (0.0, 0.0, 0.5),
"yellow": (1.0, 1.0, 0.0),
"black": (0.0, 0.0, 0.0)}
def get_client_activate(self):
return self.client_activate
def get_client_color(self):
self.client_color.append(self.color_table["blue"])
return self.client_color
def updateClient(self, clients):
for id in self.client_id_list:
self.client_activate['client'+'_'+id] \
= clients['client'+'_'+id]
def get_action_type(self):
return self.action_type
def draw_smooth(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw frame summary graph
Args:
show_start: the start time to show
show_end: the end time to show
output_dir: the output directory of fps.txt
Input:self.smooth_events, self.comm_events
Output:Graphic object
"""
if len(self.smooth_events.keys()) == 0:
return None
st_dic = collections.OrderedDict()
for cid in self.client_id_list:
if 'client'+'_'+cid not in self.client_activate \
or self.client_activate['client'+'_'+cid] != True:
continue
st_dic[cid] = collections.OrderedDict()
data = []
color_index = 0
colors = []
x_labels = []
sum_total = 0
se_len = len(self.smooth_event_list)
for i in range(se_len):
total = 0
ename = self.smooth_event_list[i]
data_len = len(self.smooth_events[cid][ename])
for number in self.smooth_events[cid][ename]:
total += number
st_dic[cid][ename] = total/data_len
if i < se_len - 1:
cname = 'comm' + str(i)
comm_val = self.comm_events[cid][cname]
if comm_val > 0.1:
st_dic[cid][cname] = comm_val
# get sum_total
for ename in st_dic[cid]:
sum_total += st_dic[cid][ename]
sum_total = float("{0:.2f}".format(sum_total))
fps = float("{0:.2f}".format(1000 / sum_total))
str1 = 'total_time = ' + str(sum_total) + 'ms'\
+ '\n' + 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
for ename in st_dic[cid].keys():
x_labels.append(ename)
for i in range(len(x_labels)):
color_index = color_index % (len(self.color_table))
colors.append(self.color_table.values()[color_index])
color_index += 1
data = st_dic[cid]
smooth_chart = Graphic(name, data, width,
height, x_labels=x_labels,
axis=True, grid=True,
background="white", series_colors=colors)
smooth_chart.render()
smooth_chart.render_smooth()
return smooth_chart
def draw_fps(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.time_dic
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
time_list = []
x_labels = []
time_list = self.time_dic[cid]
FPS = collections.OrderedDict()
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for i in range(len(time_list)):
if time_list[i].start < rel_start:
continue
if time_list[i].end > rel_end:
break
if time_list[i].end == -1:
FPS[time_list[i].start] = -1
continue
# change ms value to FPS value
FPS[time_list[i].start] = 1000/time_list[i].end
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def draw_fps_media(self, name, show_start, show_end, width, height, output_dir=None):
"""
Note:draw fps graph
Args:
show_start: the start time to show
show_end: the end time to show
Input:self.new_events
Output:Graphic object
"""
if len(self.time_dic) == 0:
return None
# change to relative time
rel_start = show_start - self.start_time
rel_end = show_end - self.start_time
for cid in self.client_id_list:
if 'client' + '_' + cid not in self.client_activate or \
self.client_activate['client' + '_' + cid] == False:
continue
client_color = []
x_labels = []
FPS = collections.OrderedDict()
offset = 0
time_old = 0
sum_total = 0
fps_len = len(self.fps_event_list)
event_len = len(self.new_events[cid])
event_name = self.fps_event_list[0]
x_axis_num = int(math.floor(width / X_AXIS_INTERVAL))
x_interval = int(math.floor((rel_end - rel_start)
/ x_axis_num))
for i in range(x_axis_num + 1):
x_labels.append("{0}ms".format(rel_start + i * x_interval))
for time in range(1000, int(rel_end) + 1000)[::1000]:
count = 0
for i in range(offset, event_len)[::fps_len]:
event1 = self.new_events[cid][i]
if event1[0] == event_name and time_old <= event1[1] < time:
if (i + fps_len - 1) < event_len:
event2 = self.new_events[cid][i + fps_len - 1]
if event2[2] < time:
count += 1
else:
break
offset = i
time_old = time
if count >= 1:
sum_total += count
FPS[time] = count
if sum_total > 0:
fps = int("{0:.0f}".format(sum_total / len(FPS)))
str1 = 'fps = ' + str(fps) + 'fps'
if output_dir == None:
output_dir = '.'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
fd = open(output_dir + '/fps.txt', 'w')
fd.write(str1)
fd.close()
client_color.append(self.color_table["blue"])
# FPS is defined for every client id
# lets calculate start, end, interval and labels.
fps_chart = Graphic(name, FPS, width, height, rel_end,
x_labels=x_labels, axis=True, grid=True,
background="white", series_colors=client_color)
fps_chart.render()
fps_chart.render_fps()
return fps_chart
def create_interval(self, start, end):
itv = interval()
itv.start = start
itv.end = cycle
def sample_data(self, time_list = None, start = 0, end = 0):
if not time_list:
return []
rate = float(self.sample_rate) * (end-start)
rate = int("{0:.0f}".format(rate))
if rate > 0:
del time_list[start:start+rate]
del time_list[end-rate*2:end-rate]
def calculate_fps(self):
"""
Input:self.time_dic = {event_name:{id:time}}
Output:self.time_dic
Data Formate:
"""
for cid in self.client_id_list:
time_list = []
number = 0
index = 0
offset = 0
rate = 0
event1 = self.fps_event_list[0]
event2 = self.fps_event_list[-1]
seg_len = len(self.seg_point_time)
for event in self.new_events[cid]:
if event[0] == event1:
start = event[1]
continue
if event[0] == event2:
end = event[1]
itv = interval()
itv.start = start
itv.end = end - start
number += 1
if seg_len > 0 and seg_len > index:
seg_time = self.seg_point_time[index]
if start >= seg_time:
"""
Before insert segment point, sample the time data
"""
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv2 = interval()
itv2.start = seg_time
itv2.end = -1
index += 1
time_list.append(itv2)
offset = len(time_list)
time_list.append(itv)
if seg_len == 0:
time_list.sort(key=lambda e:e.end)
self.sample_data(time_list, 0, len(time_list))
time_list.sort(key=lambda e:e.start)
if seg_len > 0 and self.seg_point_time[-1] not in [e.start for e in time_list]:
new_list = sorted(time_list[offset:len(time_list)], key=lambda e:e.end)
self.sample_data(new_list, 0, len(new_list))
new_list.sort(key=lambda e:e.start)
if offset > 0:
time_list = time_list[0:offset] + new_list
else:
time_list = new_list
itv = interval()
itv.start = self.seg_point_time[-1]
itv.end = -1
time_list.append(itv)
self.time_dic[cid] = time_list
def process_id(self, match):
if not match:
return
ename = match.group('name')
if ename not in self.client_id_list:
self.client_id_list.append(ename)
def process_point(self, match):
if not match:
return
ename = match.group('name')
etime = float(match.group('hour')) * 60 * 60 * 1000 +\
float(match.group('min')) * 60 * 1000 +\
float(match.group('sec')) * 1000 +\
float(match.group('msec'))/1000
if self.seg_point == ename:
self.seg_point_time.append(float(etime))
def process_timestr(self, match=None, start=True):
if not match:
return
ename_ori = match.group('name')
etime = float(match.group('hour')) * 60 * 60 * 1000 +\
float(match.group('min')) * 60 * 1000 +\
float(match.group('sec')) * 1000 +\
float(match.group('msec'))/1000
id_index = ename_ori.find('_')
if id_index == -1:
eid = '0'
ename = ename_ori
else:
ename = ename_ori[:id_index]
eid = ename_ori[id_index+1:]
if start:
if self.seg_point == ename:
self.seg_point_time.append(float(etime))
if eid not in self.events_dic:
self.events_dic[eid] = []
if start:
new_event = {'name':ename, 'time':float(etime), 'start':True}
else:
new_event = {'name':ename, 'time':float(etime), 'start':False}
self.events_dic[eid].append(new_event)
def parse_log_file(self):
"""
parse log file.
Return:self.events_dic
"""
color_index = 0
for debug_file in self.log_files:
with open(debug_file) as inf:
for line in inf:
# Find the match
match = self.idregex.match(line)
if match is not None:
self.process_id(match)
continue
match = self.pregex.match(line)
if match is not None:
self.process_point(match)
continue
match = self.sregex.match(line)
if match is not None:
self.process_timestr(match, True)
continue
match = self.eregex.match(line)
if match is not None:
self.process_timestr(match, False)
continue
def parse_config_file(self, configfile, logfile):
"""
parse config xml file, it shows how to parse log file.
parse log file then according to the xml instruction.
"""
if configfile == None:
configfile = '../config/config.xml'
if not os.path.exists(configfile):
return
self.root = ET.parse(configfile).getroot()
config_tags = {"segmentation_point":("point", []),
"event_item":("event", []),
"fps_item":("fps", []),
"smooth_item":("smooth", []),
"sample_rate":("rate", []),
"action_type":("type", []),
"profile":("file", [])}
for key in config_tags.keys():
debug = self.root.find(key)
if debug is None:
continue
subitems = debug.getchildren()
for item in subitems:
if item.tag == config_tags[key][0]:
config_tags[key][1].append(item.text)
# convert config to global values
if len(config_tags["segmentation_point"][1]) > 0:
self.seg_point = config_tags["segmentation_point"][1][0]
self.event_list.extend(config_tags["event_item"][1])
self.fps_event_list.extend(config_tags["fps_item"][1])
self.smooth_event_list.extend(config_tags["smooth_item"][1])
if len(config_tags["sample_rate"][1]) == 0:
self.sample_rate = 0
else:
self.sample_rate = config_tags["sample_rate"][1][0]
if logfile != None:
self.log_files.append(logfile)
else:
self.log_files.extend(config_tags["profile"][1])
if len(config_tags["action_type"][1]) != 0:
self.action_type = config_tags["action_type"][1][0]
def init_client_activate(self):
first = self.client_id_list[0]
self.client_activate['client' + '_' + first] = True
for cid in self.client_id_list[1:]:
self.client_activate['client' + '_' + cid] = False
def get_smooth_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the smooth data(self.smooth_events)
Input:self.events_dic
Return:self.smooth_events
Data Format:self.smooth_events = {event_name:{client_id:time}}
"""
event_len = MAX_LEN
for cid in self.client_id_list:
self.smooth_events[cid] = {}
for event in self.new_events[cid]:
name = event[0]
number = event[2] - event[1]
if name not in self.smooth_events[cid].keys():
self.smooth_events[cid][name] = []
self.smooth_events[cid][name].append(number)
# merge the data based on the sample rate
for name in self.smooth_events[cid].keys():
self.smooth_events[cid][name].sort()
self.sample_data(self.smooth_events[cid][name], 0, \
len(self.smooth_events[cid][name]))
def get_comm_time(self):
"""
Note:According to valid data(self.events_dic)
to generate the communication data(self.comm_events)
Input:self.events_dic
Return:self.comm_events
Data Format:self.comm_events = {client_id:{event_name:time}}
"""
for cid in self.client_id_list:
self.comm_events[cid] = collections.OrderedDict()
total = 0
comm_time = 0
comm_len = 0
for i in range(0, len(self.smooth_event_list) - 1):
fname = self.smooth_event_list[i]
sname = self.smooth_event_list[i+1]
fst_end = [e[2] for e in self.new_events[cid] \
if e[0] == fname]
sec_start = [e[1] for e in self.new_events[cid] \
if e[0] == sname]
comm_list = []
if len(fst_end) == 0 or len(sec_start) == 0:
print 'smooth invalid data!'
sys.exit(-1)
comm_len = len(fst_end) > len(sec_start) and \
len(sec_start) or len(fst_end)
for j in range(comm_len):
number = sec_start[j] - fst_end[j]
comm_list.append(number)
comm_list.sort()
self.sample_data(comm_list, 0, len(comm_list))
for number in comm_list:
total += number
if len(comm_list) > 0:
comm_time = total / len(comm_list)
self.comm_events[cid]['comm' + str(i)] = comm_time
def clean_up(self):
# Clean up data unused
for cid in self.events_dic.keys():
# clean up unsed client id
if cid not in self.client_id_list and cid != '0':
del self.events_dic[cid]
continue
# clean up unused event
event_len = len(self.events_dic[cid])
i = 0
while i < event_len:
event = self.events_dic[cid][i]
if event['name'] not in self.event_list:
del self.events_dic[cid][i]
event_len -= 1
continue
i += 1
for cid in self.client_id_list:
if cid not in self.events_dic.keys():
index = self.client_id_list.index(cid)
del self.client_id_list[index]
def merge_server(self):
# merge weston server data with client data
events = self.events_dic['0']
for cid in self.client_id_list:
if cid == '0':
continue
self.events_dic[cid].extend(events)
self.events_dic[cid].sort(key=lambda e: e['time'])
def form_new_dic(self):
"""
Form new event dictionary
"""
for cid in self.client_id_list:
self.new_events[cid] = []
for cid in self.client_id_list:
for i in range(len(self.events_dic[cid])):
event = self.events_dic[cid][i]
if event['start'] == True:
new_event = (event['name'], event['time'], -1)
self.new_events[cid].append(new_event)
continue
if event['start'] == False:
# find the last event which end is -1
event_len = len(self.new_events[cid])
if event_len == 0:
continue
i = 1
while i < (event_len - 1):
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] != -1:
break
i += 1
while i > 0:
e1 = self.new_events[cid][-i]
if e1[0] == event['name'] and e1[2] == -1:
new_event = (e1[0], e1[1], event['time'])
del self.new_events[cid][-i]
self.new_events[cid].append(new_event)
i -= 1
# sort self.new_events
self.new_events[cid].sort(key=lambda e:e[1])
def build_complete_dic(self):
"""
Form a complete event dictionary
"""
elen = len(self.event_list)
for cid in self.client_id_list:
ecount = len(self.new_events[cid])
j = 0
index = 0
while j < ecount:
if self.new_events[cid][j][0] == self.event_list[index]:
index += 1
index = index % elen
j += 1
else:
del self.new_events[cid][j]
ecount -= 1
for cid in self.new_events.keys():
if len(self.new_events[cid]) < elen:
del self.new_events[cid]
index = self.client_id_list.index(cid)
del self.client_id_list[index]
continue
for i in range(len(self.new_events[cid])):
event = self.new_events[cid][i]
if event[2] == -1:
del self.new_events[cid][i:]
break
def get_valid_data(self):
"""
Note:according to the first event in
self.event_list(like 'client', self.event_list
according to config.xml, the order of the list is
the order of the events), rule out the error data.
Input:original data:self.events_dic
Return:valid data:self.events_dic
data format:self.events_dic = {id:[event]}
event = {'name':event_name, 'start':start_time, 'end':end_time}
"""
self.clean_up()
self.merge_server()
self.form_new_dic()
# build a complate event dic
self.build_complete_dic()
self.init_client_activate()
self.get_startend_time()
def get_startend_time(self):
"""
Note:get the start time of log files.
Input:self.events_dic
Output:self.start_time
"""
for cid in self.client_id_list:
if len(self.new_events[cid]) <= 0:
continue
start_time = self.new_events[cid][0][1]
end_time = self.new_events[cid][-1][2]
if self.start_time > start_time:
self.start_time = start_time
if self.end_time < end_time:
self.end_time = end_time
for time in self.seg_point_time:
if time < self.start_time:
self.start_time = time
for time in self.seg_point_time:
if time > self.end_time:
self.end_time = time
self.total_interval = self.end_time
def update2rel(self):
"""
all event time is decreased by start time
"""
for cid in self.client_id_list:
time_list = self.new_events[cid]
for i in range(len(time_list)):
event = time_list[i]
event_new = (event[0], event[1] - self.start_time, \
event[2] - self.start_time)
time_list[i] = event_new
for i in range(len(self.seg_point_time)):
self.seg_point_time[i] -= self.start_time
def init(self, configfile, logfile):
"""initialize start time and parse config file"""
self.parse_config_file(configfile, logfile)
self.parse_log_file()
if len(self.client_id_list) == 0:
# self.client_id_list.append('0')
self.client_id_list.extend(self.events_dic.keys())
# filer the all data to be valid
if len(self.events_dic.keys()) == 0:
print 'logfile do not have valid data!'
sys.exit(-1)
self.get_valid_data()
self.update2rel()
if len(self.smooth_event_list) > 0:
self.get_smooth_time()
self.get_comm_time()
if len(self.fps_event_list) != 0:
self.time_dic = {}
self.calculate_fps()
| 6,544 | -6 | 432 |
88c70644ec5578d95f592c82f4959f388a8fe2ee | 7,600 | py | Python | shopping_cart.py | richiebubbs/shopping-cart | 0b0054b2179c39d6a0f6cf011c52023f90aee2c5 | [
"MIT"
] | 1 | 2019-06-17T21:03:35.000Z | 2019-06-17T21:03:35.000Z | shopping_cart.py | richiebubbs/shopping-cart | 0b0054b2179c39d6a0f6cf011c52023f90aee2c5 | [
"MIT"
] | null | null | null | shopping_cart.py | richiebubbs/shopping-cart | 0b0054b2179c39d6a0f6cf011c52023f90aee2c5 | [
"MIT"
] | null | null | null | # shopping_cart.py
#from pprint import pprint
import pandas as pd
import datetime
import time
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
#print(products)
# pprint(products)
# TODO: write some Python code here to produce the desired output
products_list_csv = pd.read_csv('/Users/richiebubbs/Downloads/GitHub/shopping-cart/data/products.csv')
acceptable_inputs = [str(i["id"]) for i in products]
selected_products = []
#print(acceptable_inputs)
#I constructed this while loop with help from https://realpython.com/python-while-loop/
# I reconstructed the loop with some help form your screencast when I got stuck...
#https://www.youtube.com/watch?v=3BaGb-1cIr0&feature=youtu.be
total_price = 0
selected_ids = []
a = False
while not a:
print("Please enter a product identifier (or enter 'DONE' to exit): ")
x = input()
if x != "DONE" and x in acceptable_inputs:
a = False
#matching_products = [p for p in products if str(p["id"])==x]
#matching_product = matching_products[0]
#total_price = total_price + matching_product["price"]
selected_ids.append(x)
#print("..." + matching_product["name"] + "(" + str(matching_product["price"])+ ")")
#print(type(x))
elif x == "DONE":
a = True
else:
print("I'm sorry, that is not a valid selection, please try again")
#print("Total Price: ", total_price)
#print(selected_products) i did this to make sure that the list was being properly appended
#breakpoint()
# time delay help from https://www.cyberciti.biz/faq/python-sleep-command-syntax-example/
time.sleep(1)
print(" ")
print("Here is your receipt")
time.sleep(1)
print(".")
time.sleep(1)
print("..")
time.sleep(1)
print("...")
time.sleep(1)
print(" ")
print("--------------------------------------")
print(" ")
print("RichieBubbs Grocery Emporium")
print("WWW.RICHIEBUBBS-GROCERY-EMPORIUM.COM")
print(" ")
print("--------------------------------------")
# for date time I got some help from https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/
# and for formatting: https://stackoverflow.com/questions/415511/how-to-get-the-current-time-in-python
# https://stackoverflow.com/questions/31487732/simple-way-to-drop-milliseconds-from-python-datetime-datetime-object
now = datetime.datetime.now().replace(microsecond=0)
print("CHECKOUT AT: ", now)
print(" ")
print("--------------------------------------")
print(" ")
print("SELECTED PRODUCTS:")
if selected_ids == []:
total_price = 0.00
tax = 0.00
grand_ttl_price_usd = 0.00
ttl_price_usd = 0.00
tax_price_usd = 0.00
ttl_price_usd = 0.00
else:
for y in selected_ids:
matching_products = [p for p in products if str(p["id"])==y]
matching_product = matching_products[0]
#price_usd = "{0:.2f}".format(matching_product["price"])
price_usd = "{0:.2f}".format(matching_product["price"])
total_price = total_price + matching_product["price"]
ttl_price_usd = "{0:.2f}".format(total_price)
print("..." + matching_product["name"] + "($" + str(price_usd)+ ")")
tax = total_price * 0.08875
tax_price_usd = "{0:.2f}".format(tax)
grand_ttl = total_price + tax
grand_ttl_price_usd = "{0:.2f}".format(grand_ttl)
print("--------------------------------------")
print(" ")
print("SUBTOTAL: $" + str(ttl_price_usd))
print("TAX: $" + str(tax_price_usd))
print("TOTAL: $" + str(grand_ttl_price_usd))
print(" ")
print("--------------------------------------")
print("THANK YOU, COME AGAIN!")
print("--------------------------------------")
#for y in selected_products:
# matching_products_name = [p["name"] for p in products if p["id"]==y]
# matching_products_price =[p['price'] for p in products if p['id']==y]
#print(final_product_selection, final_product_price)
#for p in selected_products:
# print("..." + products["id"] == p)
#> ---------------------------------
#> GREEN FOODS GROCERY
#> WWW.GREEN-FOODS-GROCERY.COM
#> ---------------------------------
#> CHECKOUT AT: 2019-06-06 11:31 AM
#> ---------------------------------
#> SELECTED PRODUCTS:
#> ... Chocolate Sandwich Cookies ($3.50)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Dry Nose Oil ($21.99)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Mint Chocolate Flavored Syrup ($4.50)
#> ... Chocolate Fudge Layer Cake ($18.50)
#> ---------------------------------
#> SUBTOTAL: $61.24
#> TAX: $5.35
#> TOTAL: $66.59
#> ---------------------------------
#> THANKS, SEE YOU AGAIN SOON!
#> ---------------------------------
#print(products_list_csv)
| 44.186047 | 158 | 0.586579 | # shopping_cart.py
#from pprint import pprint
import pandas as pd
import datetime
import time
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
#print(products)
# pprint(products)
# TODO: write some Python code here to produce the desired output
products_list_csv = pd.read_csv('/Users/richiebubbs/Downloads/GitHub/shopping-cart/data/products.csv')
acceptable_inputs = [str(i["id"]) for i in products]
selected_products = []
#print(acceptable_inputs)
#I constructed this while loop with help from https://realpython.com/python-while-loop/
# I reconstructed the loop with some help form your screencast when I got stuck...
#https://www.youtube.com/watch?v=3BaGb-1cIr0&feature=youtu.be
total_price = 0
selected_ids = []
a = False
while not a:
print("Please enter a product identifier (or enter 'DONE' to exit): ")
x = input()
if x != "DONE" and x in acceptable_inputs:
a = False
#matching_products = [p for p in products if str(p["id"])==x]
#matching_product = matching_products[0]
#total_price = total_price + matching_product["price"]
selected_ids.append(x)
#print("..." + matching_product["name"] + "(" + str(matching_product["price"])+ ")")
#print(type(x))
elif x == "DONE":
a = True
else:
print("I'm sorry, that is not a valid selection, please try again")
#print("Total Price: ", total_price)
#print(selected_products) i did this to make sure that the list was being properly appended
#breakpoint()
# time delay help from https://www.cyberciti.biz/faq/python-sleep-command-syntax-example/
time.sleep(1)
print(" ")
print("Here is your receipt")
time.sleep(1)
print(".")
time.sleep(1)
print("..")
time.sleep(1)
print("...")
time.sleep(1)
print(" ")
print("--------------------------------------")
print(" ")
print("RichieBubbs Grocery Emporium")
print("WWW.RICHIEBUBBS-GROCERY-EMPORIUM.COM")
print(" ")
print("--------------------------------------")
# for date time I got some help from https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/
# and for formatting: https://stackoverflow.com/questions/415511/how-to-get-the-current-time-in-python
# https://stackoverflow.com/questions/31487732/simple-way-to-drop-milliseconds-from-python-datetime-datetime-object
now = datetime.datetime.now().replace(microsecond=0)
print("CHECKOUT AT: ", now)
print(" ")
print("--------------------------------------")
print(" ")
print("SELECTED PRODUCTS:")
if selected_ids == []:
total_price = 0.00
tax = 0.00
grand_ttl_price_usd = 0.00
ttl_price_usd = 0.00
tax_price_usd = 0.00
ttl_price_usd = 0.00
else:
for y in selected_ids:
matching_products = [p for p in products if str(p["id"])==y]
matching_product = matching_products[0]
#price_usd = "{0:.2f}".format(matching_product["price"])
price_usd = "{0:.2f}".format(matching_product["price"])
total_price = total_price + matching_product["price"]
ttl_price_usd = "{0:.2f}".format(total_price)
print("..." + matching_product["name"] + "($" + str(price_usd)+ ")")
tax = total_price * 0.08875
tax_price_usd = "{0:.2f}".format(tax)
grand_ttl = total_price + tax
grand_ttl_price_usd = "{0:.2f}".format(grand_ttl)
print("--------------------------------------")
print(" ")
print("SUBTOTAL: $" + str(ttl_price_usd))
print("TAX: $" + str(tax_price_usd))
print("TOTAL: $" + str(grand_ttl_price_usd))
print(" ")
print("--------------------------------------")
print("THANK YOU, COME AGAIN!")
print("--------------------------------------")
#for y in selected_products:
# matching_products_name = [p["name"] for p in products if p["id"]==y]
# matching_products_price =[p['price'] for p in products if p['id']==y]
#print(final_product_selection, final_product_price)
#for p in selected_products:
# print("..." + products["id"] == p)
#> ---------------------------------
#> GREEN FOODS GROCERY
#> WWW.GREEN-FOODS-GROCERY.COM
#> ---------------------------------
#> CHECKOUT AT: 2019-06-06 11:31 AM
#> ---------------------------------
#> SELECTED PRODUCTS:
#> ... Chocolate Sandwich Cookies ($3.50)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Dry Nose Oil ($21.99)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Cut Russet Potatoes Steam N' Mash ($4.25)
#> ... Mint Chocolate Flavored Syrup ($4.50)
#> ... Chocolate Fudge Layer Cake ($18.50)
#> ---------------------------------
#> SUBTOTAL: $61.24
#> TAX: $5.35
#> TOTAL: $66.59
#> ---------------------------------
#> THANKS, SEE YOU AGAIN SOON!
#> ---------------------------------
#print(products_list_csv)
| 0 | 0 | 0 |
cff5ac77360e298988b3ae7a3c20502f8a1aeb34 | 6,168 | py | Python | setup.py | ptrbortolotti/WEIS | 1e4dbf6728050f75cee08cd483fe57c5614488fe | [
"Apache-2.0"
] | 26 | 2020-08-25T16:16:21.000Z | 2022-03-10T08:23:57.000Z | setup.py | ptrbortolotti/WEIS | 1e4dbf6728050f75cee08cd483fe57c5614488fe | [
"Apache-2.0"
] | 90 | 2020-08-24T23:02:47.000Z | 2022-03-29T13:48:15.000Z | setup.py | ptrbortolotti/WEIS | 1e4dbf6728050f75cee08cd483fe57c5614488fe | [
"Apache-2.0"
] | 25 | 2020-08-24T19:28:24.000Z | 2022-01-27T21:17:37.000Z | import os
import sys
import platform
import multiprocessing
from distutils.core import run_setup
from setuptools import find_packages
from numpy.distutils.command.build_ext import build_ext
from numpy.distutils.core import setup, Extension
from io import open
# Global constants
ncpus = multiprocessing.cpu_count()
this_directory = os.path.abspath(os.path.dirname(__file__))
# Eagle environment
eagle_nodes = ['el'+str(m) for m in range(10)] + ['ed'+str(m) for m in range(10)]
eagle_flag = platform.node() in eagle_nodes
ci_flag = platform.node().find('fv-az') >= 0
if eagle_flag:
os.environ["FC"] = "ifort"
os.environ["CC"] = "icc"
os.environ["CXX"] = "icpc"
os.environ["LDSHARED"] = "icc -pthread -shared"
# For the CMake Extensions
# All of the extensions
fastExt = CMakeExtension('openfast','OpenFAST')
roscoExt = CMakeExtension('rosco','ROSCO')
extList = [roscoExt] if platform.system() == "Windows" else [roscoExt, fastExt]
# Setup content
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CLASSIFIERS = '''
Development Status :: 1 - Planning
Intended Audience :: Science/Research
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS
'''
weis_pkgs = find_packages()
# Install the python sub-packages
print(sys.argv)
for pkg in ['WISDEM','ROSCO_toolbox','pCrunch','pyHAMS','MoorPy','RAFT','pyoptsparse']:
os.chdir(pkg)
if pkg == 'pyoptsparse':
# Build pyOptSparse specially
run_setup('setup.py', script_args=['install'])
else:
run_setup('setup.py', script_args=sys.argv[1:], stop_after='run')
# subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."]) # This option runs `pip install -e .` on each package
os.chdir('..')
# Now install WEIS and the Fortran packages
metadata = dict(
name = 'WEIS',
version = '0.2',
description = 'Wind Energy with Integrated Servo-control',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'NREL',
url = 'https://github.com/WISDEM/WEIS',
install_requires = ['openmdao>=3.4','numpy','scipy','nlopt','dill','smt','control','jsonmerge','fatpack'],
classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f],
packages = weis_pkgs,
package_data = {'':['*.yaml','*.xlsx']},
python_requires = '>=3.6',
license = 'Apache License, Version 2.0',
ext_modules = extList,
cmdclass = {'build_ext': CMakeBuildExt},
zip_safe = False,
)
setup(**metadata)
| 38.074074 | 135 | 0.577335 | import os
import sys
import platform
import multiprocessing
from distutils.core import run_setup
from setuptools import find_packages
from numpy.distutils.command.build_ext import build_ext
from numpy.distutils.core import setup, Extension
from io import open
# Global constants
ncpus = multiprocessing.cpu_count()
this_directory = os.path.abspath(os.path.dirname(__file__))
# Eagle environment
eagle_nodes = ['el'+str(m) for m in range(10)] + ['ed'+str(m) for m in range(10)]
eagle_flag = platform.node() in eagle_nodes
ci_flag = platform.node().find('fv-az') >= 0
if eagle_flag:
os.environ["FC"] = "ifort"
os.environ["CC"] = "icc"
os.environ["CXX"] = "icpc"
os.environ["LDSHARED"] = "icc -pthread -shared"
# For the CMake Extensions
class CMakeExtension(Extension):
def __init__(self, name, sourcedir='', **kwa):
Extension.__init__(self, name, sources=[], **kwa)
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuildExt(build_ext):
def copy_extensions_to_source(self):
newext = []
for ext in self.extensions:
if isinstance(ext, CMakeExtension): continue
newext.append( ext )
self.extensions = newext
super().copy_extensions_to_source()
def build_extension(self, ext):
if isinstance(ext, CMakeExtension):
# Ensure that CMake is present and working
try:
self.spawn(['cmake', '--version'])
except OSError:
raise RuntimeError('Cannot find CMake executable')
localdir = os.path.join(this_directory, 'local')
# CMAKE profiles default for all
buildtype = 'RelWithDebInfo' # Hydrodyn has issues with Debug
cmake_args = ['-DBUILD_SHARED_LIBS=OFF',
'-DDOUBLE_PRECISION:BOOL=OFF',
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON',
'-DCMAKE_INSTALL_PREFIX='+localdir,
'-DCMAKE_BUILD_TYPE='+buildtype]
buildtype = buildtype.upper()
# Custom tuning
mycompiler = self.compiler.compiler[0]
if ci_flag:
tune = '-O0 -g' #-ffpe-trap=invalid,zero,overflow,underflow
elif eagle_flag:
tune = '-xSKYLAKE-AVX512'
cmake_args += ['-DOPENMP=ON']
try:
self.spawn(['ifort', '--version'])
except OSError:
raise RuntimeError('Recommend loading intel compiler modules on Eagle (comp-intel, intel-mpi, mkl)')
elif (mycompiler.find('ifort') >= 0 or mycompiler.find('icc') >= 0 or
mycompiler.find('icpc') >= 0):
tune = '-xHost'
else:
tune = '-march=native -mtune=native'
cmake_args += ['-DCMAKE_Fortran_FLAGS_'+buildtype+'='+tune,
'-DCMAKE_C_FLAGS_'+buildtype+'='+tune,
'-DCMAKE_CXX_FLAGS_'+buildtype+'='+tune]
if platform.system() == 'Windows':
cmake_args += ['-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE']
if self.compiler.compiler_type == 'msvc':
cmake_args += ['-DCMAKE_GENERATOR_PLATFORM=x64']
else:
cmake_args += ['-G', 'MinGW Makefiles']
self.build_temp += '_'+ext.name
os.makedirs(localdir, exist_ok=True)
# Need fresh build directory for CMake
os.makedirs(self.build_temp, exist_ok=True)
self.spawn(['cmake','-S', ext.sourcedir, '-B', self.build_temp] + cmake_args)
self.spawn(['cmake', '--build', self.build_temp, '-j', str(ncpus), '--target', 'install', '--config', buildtype])
else:
super().build_extension(ext)
# All of the extensions
fastExt = CMakeExtension('openfast','OpenFAST')
roscoExt = CMakeExtension('rosco','ROSCO')
extList = [roscoExt] if platform.system() == "Windows" else [roscoExt, fastExt]
# Setup content
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CLASSIFIERS = '''
Development Status :: 1 - Planning
Intended Audience :: Science/Research
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: Unix
Operating System :: MacOS
'''
weis_pkgs = find_packages()
# Install the python sub-packages
print(sys.argv)
for pkg in ['WISDEM','ROSCO_toolbox','pCrunch','pyHAMS','MoorPy','RAFT','pyoptsparse']:
os.chdir(pkg)
if pkg == 'pyoptsparse':
# Build pyOptSparse specially
run_setup('setup.py', script_args=['install'])
else:
run_setup('setup.py', script_args=sys.argv[1:], stop_after='run')
# subprocess.check_call([sys.executable, "-m", "pip", "install", "-e", "."]) # This option runs `pip install -e .` on each package
os.chdir('..')
# Now install WEIS and the Fortran packages
metadata = dict(
name = 'WEIS',
version = '0.2',
description = 'Wind Energy with Integrated Servo-control',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'NREL',
url = 'https://github.com/WISDEM/WEIS',
install_requires = ['openmdao>=3.4','numpy','scipy','nlopt','dill','smt','control','jsonmerge','fatpack'],
classifiers = [_f for _f in CLASSIFIERS.split('\n') if _f],
packages = weis_pkgs,
package_data = {'':['*.yaml','*.xlsx']},
python_requires = '>=3.6',
license = 'Apache License, Version 2.0',
ext_modules = extList,
cmdclass = {'build_ext': CMakeBuildExt},
zip_safe = False,
)
setup(**metadata)
| 2,975 | 21 | 126 |
b4053b7777cfbdc0dd4b8bc93525c9b79ceb4853 | 8,987 | py | Python | player.py | czatom/raspberry-pi-rfid-jukebox | d89b8438c50c1efa079ed9953259e665ee0e8c6f | [
"MIT"
] | null | null | null | player.py | czatom/raspberry-pi-rfid-jukebox | d89b8438c50c1efa079ed9953259e665ee0e8c6f | [
"MIT"
] | null | null | null | player.py | czatom/raspberry-pi-rfid-jukebox | d89b8438c50c1efa079ed9953259e665ee0e8c6f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Tomasz Czaja'
__version__ = '0.0.1'
import sys
import time
from pathlib import Path
import signal
import RPi.GPIO as GPIO
from PIL import Image, ImageDraw, ImageFont
from ST7789 import ST7789
from audioplayer import AudioPlayer
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
jukebox = RfidJukebox()
while True:
try:
value = input("Enter song key:\n")
if value.isdigit():
jukebox.play_song(value)
time.sleep(0.3)
except KeyboardInterrupt:
if jukebox.player:
jukebox.player.stop()
print("Bye")
sys.exit()
| 36.237903 | 108 | 0.586291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Tomasz Czaja'
__version__ = '0.0.1'
import sys
import time
from pathlib import Path
import signal
import RPi.GPIO as GPIO
from PIL import Image, ImageDraw, ImageFont
from ST7789 import ST7789
from audioplayer import AudioPlayer
class RfidJukebox(object):
# Hardcoded list of files
FILES = {
'3373707988': "07. Dans Les Jardins de Baya.mp3",
'1': "01. Awaya Baka.mp3",
'2': "02. Braighe Locheil (The Brais of Loch Eil).mp3"
}
SPI_SPEED_MHZ = 80
_st7789 = ST7789(
rotation=90, # Needed to display the right way up on Pirate Audio
port=0, # SPI port
cs=1, # SPI port Chip-select channel
dc=9, # BCM pin used for data/command
backlight=13,
spi_speed_hz=SPI_SPEED_MHZ * 1000 * 1000
)
# The buttons on Pirate Audio are connected to pins 5, 6, 16 and 24
# Boards prior to 23 January 2020 used 5, 6, 16 and 20
# try changing 24 to 20 if your Y button doesn't work.
BUTTONS = [5, 6, 16, 24]
# These correspond to buttons A, B, X and Y respectively
LABELS = ['A', 'B', 'X', 'Y']
# Stuff for drawing on screen
_image = None
_draw = None
_font = None
@property
def font(self):
return self._font
@font.setter
def font(self, new_font):
self._font = new_font
# Player settings
_last_selected_key = None
_min_volume = 0
_max_volume = 100
_volume = 50
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, new_volume):
self._volume = new_volume
_player = None
@property
def player(self):
return self._player
def __init__(self):
"""
Init the _player
:return: void
"""
# Set up RPi.GPIO with the "BCM" numbering scheme
GPIO.setmode(GPIO.BCM)
# Buttons connect to ground when pressed, so we should set them up
# with a "PULL UP", which weakly pulls the input signal to 3.3V.
GPIO.setup(self.BUTTONS, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Loop through out buttons and attach the "handle_button" function to each
# We're watching the "FALLING" edge (transition from 3.3V to Ground) and
# picking a generous bouncetime of 100ms to smooth out button presses.
for pin in self.BUTTONS:
GPIO.add_event_detect(pin, GPIO.FALLING, self._handle_button, bouncetime=100)
# Get initial value - first in the dictionary
self._last_selected_key = list(self.FILES.keys())[0]
# Set image and draw objects
self._image = Image.new("RGB", (240, 240), (0, 0, 0))
self._draw = ImageDraw.Draw(self._image)
# Set font type and size
self._font = ImageFont.truetype("/home/pi/Fonts/FreeMono.ttf", 42)
# Draw default background
self._draw_background()
label_length = self._font.getsize('version')[0]
label_x_pos = int(round(240 / 2 - label_length / 2))
self._draw.text((label_x_pos, 100), 'version', font=self.font, fill=(255, 255, 255, 255))
label_length = self._font.getsize(str(__version__))[0]
label_x_pos = int(round(240 / 2 - label_length / 2))
self._draw.text((label_x_pos, 135), __version__, font=self.font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
def _get_previous_key(self):
temp = list(self.FILES.keys())
try:
key = temp[temp.index(self._last_selected_key) - 1]
except (ValueError, IndexError):
# If no more keys - use last one - this is probably obsolete
key = temp[-1]
return key
def _get_next_key(self):
temp = list(self.FILES.keys())
try:
key = temp[temp.index(self._last_selected_key) + 1]
except (ValueError, IndexError):
# If no more keys - use first one
key = temp[0]
return key
def _draw_background(self):
self._draw.rectangle((0, 0, 240, 240), (0, 0, 0)) # Draw background
# Draw related _image if exists
if self._player and self._last_selected_key:
picture = Path(f'/home/pi/Pictures/{self._last_selected_key}.jpg')
if picture.is_file():
with Image.open(str(picture)) as im:
im_resized = im.resize((240, 240))
self._image.paste(im_resized)
# Left navigation button
self._draw.polygon([(25, 20), (10, 30), (25, 40)], fill=(0x60, 0x60, 0x60), outline=(255, 255, 255))
self._draw.polygon([(40, 20), (25, 30), (40, 40)], fill=(0x60, 0x60, 0x60), outline=(255, 255, 255))
# Right navigation button
self._draw.polygon([(240 - 25, 20), (240 - 10, 30), (240 - 25, 40)], fill=(0x60, 0x60, 0x60),
outline=(255, 255, 255))
self._draw.polygon([(240 - 40, 20), (240 - 25, 30), (240 - 40, 40)], fill=(0x60, 0x60, 0x60),
outline=(255, 255, 255))
def play_song(self, key):
if key in self.FILES:
audio_file = Path(f'/home/pi/Music/{self.FILES[key]}')
if audio_file.is_file():
# Stop _player if running
if self._player:
self._player.stop()
# Play audio file
print(f"Playing {audio_file.name} ({key})")
self._last_selected_key = key
self._draw_background()
self._draw.text((100, 140), str(key), font=self.font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
self._player = AudioPlayer(f"Music/{audio_file.name}")
self._player.volume = self._volume
self._player.play()
def play_next_song(self):
next_key = self._get_next_key()
self.play_song(next_key)
def play_previous_song(self):
prev_key = self._get_previous_key()
self.play_song(prev_key)
def _draw_volume_indicators(self, new_volume):
self._draw_background()
label_length = self._font.getsize(str(new_volume))[0]
label_x_pos = int(round(240 / 2 - label_length / 2)) # Volume label start pos
self._draw.text((label_x_pos, 140), str(new_volume), font=self._font,
fill=(255, 255, 255, 255)) # Draw _volume label
volume_bar_x = int(round(10 + (220 * new_volume / self._max_volume)))
self._draw.rectangle((10, 200, volume_bar_x, 210), (0x30, 0x30, 0x30)) # Draw _volume bar
self._st7789.display(self._image)
# "handle_button" will be called every time a button is pressed
# It receives one argument: the associated input pin.
def _handle_button(self, pin):
label = self.LABELS[self.BUTTONS.index(pin)]
print("Button press detected on pin: {} label: {}".format(pin, label))
if label == 'B':
# Decrease volume
new_volume = self._volume - 10
if new_volume < self._min_volume:
new_volume = self._min_volume
self._volume = new_volume # Store _volume for new instances of _player
# Draw value and _volume bar
self._draw_volume_indicators(new_volume)
# Set new volume for player
if self._player:
self._player.volume = new_volume
elif label == 'Y':
# Increase volume
new_volume = self._volume + 10
if new_volume > self._max_volume:
new_volume = self._max_volume
self._volume = new_volume # Store _volume for new instances of _player
# Draw value and _volume bar
self._draw_volume_indicators(new_volume)
# Set new volume for player
if self._player:
self._player.volume = new_volume
elif label == 'A':
# Play previous song
self.play_previous_song()
message = "Prev song"
self._draw_background()
self._draw.text((10, 140), message, font=self._font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
elif label == 'X':
# Play next song
self.play_next_song()
message = "Next song"
self._draw_background()
self._draw.text((10, 140), message, font=self._font, fill=(255, 255, 255, 255))
self._st7789.display(self._image)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
jukebox = RfidJukebox()
while True:
try:
value = input("Enter song key:\n")
if value.isdigit():
jukebox.play_song(value)
time.sleep(0.3)
except KeyboardInterrupt:
if jukebox.player:
jukebox.player.stop()
print("Bye")
sys.exit()
| 4,916 | 3,314 | 23 |
80bacaf4db0e0536c9ceb0109bd8d31aa08769f4 | 2,558 | py | Python | StreamPy/StreamPy-UI/src/root/nested/run.py | AnomalyInc/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | 2 | 2017-04-27T11:04:27.000Z | 2019-02-07T21:03:32.000Z | StreamPy/StreamPy-UI/src/root/nested/run.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | StreamPy/StreamPy-UI/src/root/nested/run.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | '''
This module parses the input arguments and extracts the necessary
data structures from it, then calls the appropriate functions to
process it.
'''
import sys
import getpass
from Subgraph import *
from Multiprocessing import *
def dispatch(json_file_name):
'''
Looks at input JSON file and determines
which functions should be called to
process it.
Parameters
----------
json_file_name : str
Path to JSON file to be executed
Returns
-------
None
'''
# Convert JSON to my format
agent_dict_json = make_json(json_file_name)
# Extract the dictionary from JSON
with open(agent_dict_json) as data_file:
json_data = json.load(data_file)
# Case 1: No groups -> no parallel processing
if 'groups' not in json_data.keys():
# First expose nested subgraphs
agent_dict_json = unwrap_subgraph(agent_dict_json)
# Then animate it
make_js(agent_dict_json)
# Case 2: Has groups -> parallel processing
else:
# Sort components into indicated processes
big_dict = parallel_dict(json_data)
# Then execute using multiprocessing
run_parallel(big_dict)
###################################################
# If you're running from an IDE...
# Simple example with parameter arguments
var1 = 'JSON/multiplyparam.json'
# Example of an input JSON file that is already in the
# special agent descriptor dict format
var2 = 'JSON/agent_descriptor.json'
# Simple nested subgraph example
var3 = 'JSON/simplesubgraph.json'
# Graph with 3 nested subgraphs
var4 = 'JSON/doublenested.json'
# Multiprocessing example. Doesn't work yet!!
var5 = 'JSON/simplegroups.json'
# UNCOMMENT the following 3 lines to be prompted
# for a JSON file name at each run
# var = raw_input("Please enter path of JSON: ")
# var = str(var)
# dispatch(var)
# UNCOMMENT the following line to run the same
# file each run, replacing 'var1' with the
# path to the file you want
# dispatch(var1)
###################################################
# If you're running from terminal:
# Usage: navigate into the directory with this file
# type: python run.py NAME_OF_JSON_FILE
user_os = sys.platform
user_name = getpass.getuser()
if user_os == 'darwin':
path = '/Users/' + user_name + '/Downloads/'
elif user_os[:3] == 'win':
path = 'C:/Users/' + user_name + '/Downloads/'
elif 'linux' in user_os:
path = '/home/' + user_name + '/Downloads/'
else:
path = ''
var = sys.argv
fullpath = path + var[1]
dispatch(fullpath)
| 24.361905 | 65 | 0.661454 | '''
This module parses the input arguments and extracts the necessary
data structures from it, then calls the appropriate functions to
process it.
'''
import sys
import getpass
from Subgraph import *
from Multiprocessing import *
def dispatch(json_file_name):
'''
Looks at input JSON file and determines
which functions should be called to
process it.
Parameters
----------
json_file_name : str
Path to JSON file to be executed
Returns
-------
None
'''
# Convert JSON to my format
agent_dict_json = make_json(json_file_name)
# Extract the dictionary from JSON
with open(agent_dict_json) as data_file:
json_data = json.load(data_file)
# Case 1: No groups -> no parallel processing
if 'groups' not in json_data.keys():
# First expose nested subgraphs
agent_dict_json = unwrap_subgraph(agent_dict_json)
# Then animate it
make_js(agent_dict_json)
# Case 2: Has groups -> parallel processing
else:
# Sort components into indicated processes
big_dict = parallel_dict(json_data)
# Then execute using multiprocessing
run_parallel(big_dict)
###################################################
# If you're running from an IDE...
# Simple example with parameter arguments
var1 = 'JSON/multiplyparam.json'
# Example of an input JSON file that is already in the
# special agent descriptor dict format
var2 = 'JSON/agent_descriptor.json'
# Simple nested subgraph example
var3 = 'JSON/simplesubgraph.json'
# Graph with 3 nested subgraphs
var4 = 'JSON/doublenested.json'
# Multiprocessing example. Doesn't work yet!!
var5 = 'JSON/simplegroups.json'
# UNCOMMENT the following 3 lines to be prompted
# for a JSON file name at each run
# var = raw_input("Please enter path of JSON: ")
# var = str(var)
# dispatch(var)
# UNCOMMENT the following line to run the same
# file each run, replacing 'var1' with the
# path to the file you want
# dispatch(var1)
###################################################
# If you're running from terminal:
# Usage: navigate into the directory with this file
# type: python run.py NAME_OF_JSON_FILE
user_os = sys.platform
user_name = getpass.getuser()
if user_os == 'darwin':
path = '/Users/' + user_name + '/Downloads/'
elif user_os[:3] == 'win':
path = 'C:/Users/' + user_name + '/Downloads/'
elif 'linux' in user_os:
path = '/home/' + user_name + '/Downloads/'
else:
path = ''
var = sys.argv
fullpath = path + var[1]
dispatch(fullpath)
| 0 | 0 | 0 |
83e8fd1f77501df7be5bc7b460716f859e343762 | 65,111 | py | Python | dbReports/iondb/rundb/data/views.py | sequencer2014/TS | 465804570349d46b47c1bdf131bdafea5c582dee | [
"Apache-2.0"
] | null | null | null | dbReports/iondb/rundb/data/views.py | sequencer2014/TS | 465804570349d46b47c1bdf131bdafea5c582dee | [
"Apache-2.0"
] | null | null | null | dbReports/iondb/rundb/data/views.py | sequencer2014/TS | 465804570349d46b47c1bdf131bdafea5c582dee | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
from django import http, template
from django.core import urlresolvers
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.servers.basehttp import FileWrapper
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
import json
import cStringIO
import csv
import os
import tempfile
import shutil
import subprocess
import glob
import time
import traceback
from django.views.generic import ListView
from iondb.rundb.models import (
Experiment,
Results,
Project,
Location,
ReportStorage,
EventLog, GlobalConfig, ReferenceGenome, dnaBarcode, KitInfo, ContentType, Plugin, ExperimentAnalysisSettings, Sample,
DMFileSet, DMFileStat, FileServer, IonMeshNode, Chip)
from iondb.rundb.api import CompositeExperimentResource, ProjectResource
from iondb.rundb.report.analyze import build_result
from iondb.rundb.report.views import _report_started
from iondb.rundb.report import file_browse
from iondb.rundb import forms
from iondb.anaserve import client
from iondb.rundb.data import dmactions_types
from iondb.rundb.data import tasks as dmtasks
from iondb.rundb.data import dmactions
from iondb.rundb.data.data_management import update_files_in_use
from iondb.rundb.data import exceptions as DMExceptions
from iondb.rundb.data.data_import import find_data_to_import, data_import
from iondb.utils.files import get_disk_attributes_gb, is_mounted
from iondb.rundb.data.dmfilestat_utils import dm_category_stats, get_keepers_diskspace
from django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound
from datetime import datetime
import logging
from django.core.urlresolvers import reverse
from django.db.models.query_utils import Q
from urllib import unquote_plus
logger = logging.getLogger(__name__)
@login_required
@login_required
def data(request):
"""This is a the main entry point to the Data tab."""
context = cache.get("data_tab_context")
if context is None:
context = data_context(request)
cache.set("data_tab_context", context, 29)
return render(request, "rundb/data/data.html", context)
class ExperimentListView(ListView):
"""This is a class based view using the Django ListView generic view.
It shows Experiment objects and data from their representative report.
"""
queryset = Experiment.objects.select_related(
"repResult", "repResult__qualitymetrics", "repResult__eas"
).exclude(repResult=None).order_by('-repResult__timeStamp')
template_name = "rundb/data/fast.html"
paginate_by = 30
class ResultsListView(ListView):
"""This ListView shows Results objects and is meant to be quick and light weight
"""
queryset = Results.objects.select_related(
"experiment", "qualitymetrics", "eas"
).order_by('-timeStamp')
template_name = "rundb/data/results_list.html"
paginate_by = 30
@login_required
@login_required
def dm_action_selected(request, results_pks, action):
'''
file categories to process: data['categories']
user log entry comment: data['comment']
results_pks could contain more than 1 result
'''
logger = logging.getLogger('data_management')
data = json.loads(request.body)
logger.info("dm_action_selected: request '%s' on report(s): %s" % (action, results_pks))
'''
organize the dmfilestat objects by result_id, we make multiple dbase queries
but it keeps them organized. Most times, this will be a single query anyway.
'''
dmfilestat_dict = {}
try:
# update any dmfilestats in use by running analyses
update_files_in_use()
backup_directory = data['backup_dir'] if data['backup_dir'] != 'default' else None
for resultPK in results_pks.split(','):
logger.debug("Matching dmfilestats contain %s reportpk" % resultPK)
dmfilestat_dict[resultPK] = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
for dmfilestat in dmfilestat_dict[resultPK]:
# validate export/archive destination folders
if action in ['export', 'archive']:
dmactions.destination_validation(dmfilestat, backup_directory, manual_action=True)
# validate files not in use
try:
dmactions.action_validation(dmfilestat, action, data['confirmed'])
except DMExceptions.FilesInUse as e:
# warn if exporting files currently in use, allow to proceed if confirmed
if action == 'export':
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e) + '<br>Exporting now may produce incomplete data set.'}), mimetype="application/json")
else:
raise e
except DMExceptions.BaseInputLinked as e:
# warn if deleting basecaller files used in any other re-analysis started from BaseCalling
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e)}), mimetype="application/json")
# warn if archiving data marked Keep
if action == 'archive' and dmfilestat.getpreserved():
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': '%s currently marked Keep.' % dmfilestat.dmfileset.type}), mimetype="application/json")
else:
dmfilestat.setpreserved(False)
# if further processing an archived dataset, error if archive drive is not mounted
if dmfilestat.isarchived() and not os.path.exists(dmfilestat.archivepath):
return HttpResponseServerError("%s archive location %s is not available." % (dmfilestat.dmfileset.type, dmfilestat.archivepath))
async_task_result = dmtasks.action_group.delay(request.user.username, data[
'categories'], action, dmfilestat_dict, data['comment'], backup_directory, data['confirmed'])
if async_task_result:
logger.debug(async_task_result)
except DMExceptions.SrcDirDoesNotExist as e:
dmfilestat.setactionstate('DD')
msg = "Source directory %s no longer exists. Setting action_state to Deleted" % e.message
logger.info(msg)
EventLog.objects.add_entry(dmfilestat.result, msg, username=request.user.username)
except Exception as e:
logger.error("dm_action_selected: error: %s" % str(e))
return HttpResponseServerError("%s" % str(e))
test = {'pks': results_pks, 'action': action, 'data': data}
return HttpResponse(json.dumps(test), mimetype="application/json")
@login_required
@permission_required('user.is_staff', raise_exception=True)
@login_required
def dm_list_files(request, resultPK, action):
"""Returns the list of files that are selected for the given file categories for the given Report"""
data = json.loads(request.body)
dmfilestat = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
dmfilestat = dmfilestat[0]
# Hack - generate serialized json file for the DataXfer plugin
dmactions.write_serialized_json(dmfilestat.result, dmfilestat.result.get_report_dir())
to_process, to_keep = dmactions.get_file_list(dmfilestat)
payload = {
'files_to_transfer': to_process,
'start_dirs': [dmfilestat.result.get_report_dir(), dmfilestat.result.experiment.expDir],
}
return HttpResponse(json.dumps(payload), mimetype="application/json")
@login_required
| 42.225032 | 336 | 0.641059 | # Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
from django import http, template
from django.core import urlresolvers
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.servers.basehttp import FileWrapper
from django.core.serializers.json import DjangoJSONEncoder
from django.forms.models import model_to_dict
import json
import cStringIO
import csv
import os
import tempfile
import shutil
import subprocess
import glob
import time
import traceback
from django.views.generic import ListView
from iondb.rundb.models import (
Experiment,
Results,
Project,
Location,
ReportStorage,
EventLog, GlobalConfig, ReferenceGenome, dnaBarcode, KitInfo, ContentType, Plugin, ExperimentAnalysisSettings, Sample,
DMFileSet, DMFileStat, FileServer, IonMeshNode, Chip)
from iondb.rundb.api import CompositeExperimentResource, ProjectResource
from iondb.rundb.report.analyze import build_result
from iondb.rundb.report.views import _report_started
from iondb.rundb.report import file_browse
from iondb.rundb import forms
from iondb.anaserve import client
from iondb.rundb.data import dmactions_types
from iondb.rundb.data import tasks as dmtasks
from iondb.rundb.data import dmactions
from iondb.rundb.data.data_management import update_files_in_use
from iondb.rundb.data import exceptions as DMExceptions
from iondb.rundb.data.data_import import find_data_to_import, data_import
from iondb.utils.files import get_disk_attributes_gb, is_mounted
from iondb.rundb.data.dmfilestat_utils import dm_category_stats, get_keepers_diskspace
from django.http import HttpResponse, HttpResponseServerError, HttpResponseNotFound
from datetime import datetime
import logging
from django.core.urlresolvers import reverse
from django.db.models.query_utils import Q
from urllib import unquote_plus
logger = logging.getLogger(__name__)
def get_search_parameters():
def get_chip_choices():
chip_choices = []
# Get all string chip values from the db
used_chip_values = [str(value).strip() for value in list(
Experiment.objects.values_list("chipType", flat=True).distinct("chipType").order_by("chipType")
)]
# Try to find a better display name from the db if available
# otherwise, just show the raw string value
for chip_value in used_chip_values:
if len(chip_value) > 0:
try:
display_name = Chip.objects.get(name=chip_value).getChipDisplayedName()
except Chip.DoesNotExist:
if chip_value == "900": # See TS-5276
display_name = "PI"
else:
display_name = chip_value
chip_choices.append({"display_name": display_name, "value": chip_value})
return sorted(chip_choices, reverse=True)
experiment_params = {
'flows': [],
'pgmName': [],
}
report_params = {
'processedflows': [],
}
eas_keys = [('library', 'reference')]
for key in experiment_params.keys():
experiment_params[key] = list(Experiment.objects.values_list(key, flat=True).distinct(key).order_by(key))
experiment_params[key] = [value for value in experiment_params[key] if len(str(value).strip()) > 0]
# Limit samples drop-down to only 5000 samples sorted by creation date date.
experiment_params['sample'] = list(
Sample.objects.filter(status="run").order_by('-date')[:5000].values_list('name', flat=True)
)
experiment_params['chipType'] = get_chip_choices()
for expkey, key in eas_keys:
experiment_params[expkey] = list(
ExperimentAnalysisSettings.objects.values_list(key, flat=True).distinct(key).order_by(key)
)
experiment_params[expkey] = [value for value in experiment_params[expkey] if len(str(value).strip()) > 0]
for key in report_params.keys():
report_params[key] = list(Results.objects.values_list(key, flat=True).distinct(key).order_by(key))
combined_params = {
'flows': sorted(set(experiment_params['flows'] + report_params['processedflows'])),
'projects': Project.objects.values_list('name', flat=True).distinct('name').order_by('name'),
'plugins': Plugin.objects.values_list('name', flat=True).distinct('name').order_by('name')
}
mesh_params = {
'nodes': IonMeshNode.objects.all()
}
del experiment_params['flows']
del report_params['processedflows']
return {'experiment': experiment_params,
'report': report_params,
'combined': combined_params,
'mesh': mesh_params
}
@login_required
def rundb_redirect(request):
# Old /rundb/ page redirects to /data/, keeps args
url = reverse('dashboard')
args = request.META.get('QUERY_STRING', '')
if args:
url = "%s?%s" % (url, args)
return redirect(url, permanent=False)
def get_serialized_exps(request, pageSize):
resource = CompositeExperimentResource()
objects = resource.get_object_list(request).exclude(status="planned").exclude(expDir="").order_by('-resultDate')
paginator = resource._meta.paginator_class(
request.GET,
objects,
resource_uri=resource.get_resource_uri(),
limit=pageSize,
max_limit=resource._meta.max_limit,
collection_name=resource._meta.collection_name
)
to_be_serialized = paginator.page()
to_be_serialized['objects'] = [
resource.full_dehydrate(resource.build_bundle(obj=obj, request=request))
for obj in to_be_serialized['objects']
]
serialized_exps = resource.serialize(None, to_be_serialized, 'application/json')
return serialized_exps
def data_context(request):
pageSize = GlobalConfig.get().records_to_display
context = {
'search': get_search_parameters(),
'inital_query': get_serialized_exps(request, pageSize),
'pageSize': pageSize
}
return context
@login_required
def data(request):
"""This is a the main entry point to the Data tab."""
context = cache.get("data_tab_context")
if context is None:
context = data_context(request)
cache.set("data_tab_context", context, 29)
return render(request, "rundb/data/data.html", context)
class ExperimentListView(ListView):
"""This is a class based view using the Django ListView generic view.
It shows Experiment objects and data from their representative report.
"""
queryset = Experiment.objects.select_related(
"repResult", "repResult__qualitymetrics", "repResult__eas"
).exclude(repResult=None).order_by('-repResult__timeStamp')
template_name = "rundb/data/fast.html"
paginate_by = 30
def get_object(self):
exp = super(ExperimentListView, self).get_object()
if exp.has_status:
exp.in_progress = exp.ftpStatus.isdigit()
if exp.in_progress:
exp.progress_percent = 100 * float(exp.ftpStatus) / float(exp.flows)
return exp
def get_context_data(self, **kwargs):
context = super(ExperimentListView, self).get_context_data(**kwargs)
context['show_status'] = any(e.has_status for e in self.object_list)
return context
class ResultsListView(ListView):
"""This ListView shows Results objects and is meant to be quick and light weight
"""
queryset = Results.objects.select_related(
"experiment", "qualitymetrics", "eas"
).order_by('-timeStamp')
template_name = "rundb/data/results_list.html"
paginate_by = 30
def get_object(self):
result = super(ResultsListView, self).get_object()
if result.experiment.has_status():
result.experiment.in_progress = result.experiment.ftpStatus.isdigit()
if result.experiment.in_progress:
result.experiment.progress_percent = 100 * \
float(result.experiment.ftpStatus) / float(result.experiment.flows)
return result
def get_context_data(self, **kwargs):
context = super(ResultsListView, self).get_context_data(**kwargs)
context['show_status'] = any(r.experiment.has_status for r in self.object_list)
return context
def data_table(request):
data = {
'search': get_search_parameters()
}
return render_to_response("rundb/data/completed_table.html", data,
context_instance=RequestContext(request))
def _makeCSVstr(object_list):
table = Results.to_pretty_table(object_list)
CSVstr = cStringIO.StringIO()
writer = csv.writer(CSVstr)
writer.writerows(table)
CSVstr.seek(0)
return CSVstr
def getCSV(request):
# Use the CompositeExperimentResource to generate a queryset from the request args
resource_instance = CompositeExperimentResource()
request_bundle = resource_instance.build_bundle(request=request)
experiment_queryset = resource_instance.obj_get_list(request_bundle)
# The CSV is generated from a Results queryset so we need to get a Results queryset from the Experiment queryset
experiment_ids = experiment_queryset.values_list('id', flat=True)
results_queryset = Results.objects.filter(experiment_id__in=experiment_ids)
# Now we can directly return a csv file response
response = http.HttpResponse(_makeCSVstr(results_queryset), mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=metrics_%s.csv' % str(
datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
return response
def get_project_CSV(request, project_pk, result_pks):
projectName = Project.objects.get(id=project_pk).name
result_ids = result_pks.split(",")
base_object_list = Results.objects.select_related('experiment').prefetch_related(
'libmetrics_set', 'tfmetrics_set', 'analysismetrics_set', 'pluginresult_set__plugin')
base_object_list = base_object_list.filter(id__in=result_ids).order_by('-timeStamp')
CSVstr = _makeCSVstr(base_object_list)
ret = http.HttpResponse(CSVstr, mimetype='text/csv')
ret['Content-Disposition'] = 'attachment; filename=%s_metrics_%s.csv' % (
projectName, str(datetime.now().strftime("%Y_%m_%d_%H_%M_%S")))
return ret
def projects(request):
ctx = template.RequestContext(request)
return render_to_response("rundb/data/projects.html", context_instance=ctx)
def project_view(request, pk=None):
pr = ProjectResource()
base_bundle = pr.build_bundle(request=request)
project = pr.obj_get(bundle=base_bundle, pk=pk)
pr_bundle = pr.build_bundle(obj=project, request=request)
return render_to_response("rundb/data/modal_project_details.html", {
# Other things here.
"project_json": pr.serialize(None, pr.full_dehydrate(pr_bundle), 'application/json'), "project": project, "method": "GET", "readonly": True, 'action': reverse('api_dispatch_detail', kwargs={'resource_name': 'project', 'api_name': 'v1', 'pk': int(pk)}, args={'format': 'json'})
})
def project_add(request, pk=None):
otherList = [p.name for p in Project.objects.all()]
ctx = template.RequestContext(request, {
'id': pk, 'otherList': json.dumps(otherList), 'method': 'POST', 'methodDescription': 'Add', 'readonly': False, 'action': reverse('api_dispatch_list', kwargs={'resource_name': 'project', 'api_name': 'v1'})
})
return render_to_response("rundb/data/modal_project_details.html", context_instance=ctx)
def project_delete(request, pk=None):
pr = ProjectResource()
base_bundle = pr.build_bundle(request=request)
project = pr.obj_get(bundle=base_bundle, pk=pk)
_type = 'project'
ctx = template.RequestContext(request, {
"id": pk, "name": project.name, "method": "DELETE", 'methodDescription': 'Delete', "readonly": False, 'type': _type, 'action': reverse('api_dispatch_detail', kwargs={'resource_name': _type, 'api_name': 'v1', 'pk': int(pk)})
})
return render_to_response("rundb/data/modal_confirm_delete.html", context_instance=ctx)
def project_edit(request, pk=None):
pr = ProjectResource()
base_bundle = pr.build_bundle(request=request)
project = pr.obj_get(bundle=base_bundle, pk=pk)
pr_bundle = pr.build_bundle(obj=project, request=request)
otherList = [p.name for p in Project.objects.all()]
return render_to_response("rundb/data/modal_project_details.html", {
# Other things here.
"project_json": pr.serialize(None, pr.full_dehydrate(pr_bundle), 'application/json'), "project": project, "id": pk, 'otherList': json.dumps(otherList), "method": "PATCH", 'methodDescription': 'Edit', "readonly": True, 'action': reverse('api_dispatch_detail', kwargs={'resource_name': 'project', 'api_name': 'v1', 'pk': int(pk)})
})
def project_log(request, pk=None):
if request.method == 'GET':
selected = get_object_or_404(Project, pk=pk)
ct = ContentType.objects.get_for_model(selected)
title = "Project History for %s (%s):" % (selected.name, pk)
ctx = template.RequestContext(request, {"title": title, "pk": pk, "cttype": ct.id})
return render_to_response("rundb/common/modal_event_log.html", context_instance=ctx)
if request.method == 'POST':
try:
pk = int(pk)
except:
pk = request.REQUEST['url'].split('/')[-2]
# logger.debug('project_log post %s pk=%s' % (request.REQUEST, pk))
try:
project = Project.objects.get(pk=pk)
if request.REQUEST['type'] == 'PATCH':
message = 'Edit project name= %s.' % project.name
elif request.REQUEST['type'] == 'DELETE':
message = 'Delete project requested.'
elif request.REQUEST['type'] == 'POST':
message = 'Created project name= %s.' % project.name
EventLog.objects.add_entry(project, message, request.user.username)
except Exception as e:
logger.exception(e)
return HttpResponse()
def project_results(request, pk):
selected = get_object_or_404(Project, pk=pk)
thumbs_exist = Results.objects.filter(metaData__contains='thumb').exists()
ctx = template.RequestContext(request, {"project": selected, 'filter_thumbnails': thumbs_exist})
return render_to_response("rundb/data/project_results.html", context_instance=ctx)
def get_result_metrics(result):
metrics = [
result.timeStamp,
result.experiment.chipType,
result.qualitymetrics.q0_bases,
result.qualitymetrics.q0_reads,
result.qualitymetrics.q0_mean_read_length,
result.qualitymetrics.q20_bases,
result.qualitymetrics.q20_reads,
result.qualitymetrics.q20_mean_read_length,
result.eas.reference,
result.libmetrics.total_mapped_target_bases,
result.libmetrics.total_mapped_reads
]
return metrics
def project_compare_context(pk):
project = get_object_or_404(Project, pk=pk)
results = Results.objects.filter(projects=project).select_related()
context = {
"project": project,
"results": results,
"result_ids": [str(r.id) for r in results]
}
return context
def project_compare_make_latex(pk):
context = project_compare_context(pk)
latex_template = render_to_string("rundb/data/print_multi_report.tex", context)
directory = tempfile.mkdtemp(prefix="project_compare_", dir="/tmp")
latex_path = os.path.join(directory, "comparison_report.tex")
pdf_path = os.path.join(directory, "comparison_report.pdf")
with open(latex_path, 'w') as latex_file:
latex_file.write(latex_template)
logger.debug("Creating PDF directory %s" % directory)
cmd = [
"pdflatex", latex_path,
"-output-directory", directory,
"-interaction",
"nonstopmode"
]
logger.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=directory)
stdout, stderr = proc.communicate()
if 0 <= proc.returncode <= 1:
return directory, pdf_path
else:
logger.error("PDF stdout: %s" % stdout)
raise Exception("Project Comparison PDF generation failure")
def project_compare_pdf(request, pk):
project = get_object_or_404(Project, pk=pk)
directory, pdf_path = project_compare_make_latex(pk)
if not os.path.exists(pdf_path):
return HttpResponse(open(os.path.join(directory, "comparison_report.log")).read(), "text/plain")
response = HttpResponse(FileWrapper(open(pdf_path)),
content_type="application/pdf")
response['Content-Length'] = os.path.getsize(pdf_path)
response['Content-Disposition'] = "attachment; filename=project_compare_%s.pdf" % project.name
return response
def project_compare(request, pk):
context = project_compare_context(pk)
return render(request, "rundb/data/project_compare.html", context)
def get_value(obj, path, default=None):
attributes = path.split('.')
length = len(attributes)
value = default
missing = False
for i in xrange(length - 1):
if hasattr(obj, attributes[i]):
obj = getattr(obj, attributes[i])
else:
missing = True
break
if not missing and length > 0:
value = getattr(obj, attributes[-1], default)
return value
def project_compare_csv(request, pk):
comparison = project_compare_context(pk)
out_obj = cStringIO.StringIO()
writer = csv.writer(out_obj)
table = [
('Result Name', 'resultsName'),
('Status', 'status'),
('Date', 'timeStamp'),
('Chip', 'experiment.chipType'),
('Total Bases', 'qualitymetrics.q0_bases'),
('Total Reads', 'qualitymetrics.q0_reads'),
('Key Signal', 'libmetrics.aveKeyCounts'),
('Loading', 'analysismetrics.loading'),
('Mean Read Len.', 'qualitymetrics.q0_mean_read_length'),
('Median Read Len.', 'qualitymetrics.q0_median_read_length'),
('Mode Read Len.', 'qualitymetrics.q0_mode_read_length'),
('Q20 Bases', 'qualitymetrics.q20_bases'),
('Q20 Reads', 'qualitymetrics.q20_reads'),
('Q20 Read Len.', 'qualitymetrics.q20_mean_read_length'),
('Reference', 'eas.reference'),
('Aligned Bases', 'libmetrics.total_mapped_target_bases'),
('Aligned Reads', 'libmetrics.total_mapped_reads')
]
# above we define the header name and column's value path toether in a tuple
# for visual clarity and to help catch typos when making changes
# here we separate them again for use in the loop.
header, columns = zip(*table)
writer.writerow(header)
for result in comparison['results']:
row = [get_value(result, c, '') for c in columns]
writer.writerow(row)
filename = "project_compare_{0}.csv".format(comparison['project'].name)
csv_content = out_obj.getvalue()
size = len(csv_content)
out_obj.close()
response = HttpResponse(csv_content, content_type="text/csv")
response['Content-Length'] = size
response['Content-Disposition'] = "attachment; filename=" + filename
return response
def results_from_project(request, results_pks, project_pk):
action = 'Remove'
try:
_results_to_project_helper(request, results_pks, project_pk, action)
return HttpResponse()
except:
return HttpResponseServerError('Errors occurred while processing your request')
def results_to_project(request, results_pks):
if request.method == 'GET':
ctx = template.RequestContext(request, {
"results_pks": results_pks,
"action": urlresolvers.reverse('results_to_project', args=[results_pks, ]),
"method": 'POST'})
return render_to_response("rundb/data/modal_projects_select.html", context_instance=ctx)
if request.method == 'POST':
json_data = json.loads(request.body)
try:
project_pks = json_data['projects']
except KeyError:
return HttpResponseServerError("Missing 'projects' attribute!")
action = 'Add'
try:
for project_pk in project_pks:
_results_to_project_helper(request, int(project_pk), results_pks, action)
return HttpResponse()
except Exception as e:
logger.exception(e)
raise
return HttpResponseServerError('Errors occurred while processing your request')
def _results_to_project_helper(request, project_pk, result_pks, action):
project = Project.objects.get(pk=project_pk)
for result_pk in result_pks.split(','):
result = Results.objects.get(pk=int(result_pk))
if action == 'Add':
result.projects.add(project)
elif action == 'Remove':
result.projects.remove(project)
# log project history
message = '%s results (%s).' % (action, result_pks)
EventLog.objects.add_entry(project, message, request.user.username)
def validate_results_to_combine(selected_results, override_samples=False):
# validate selected reports
warnings = []
ver_map = {'analysis': 'an', 'alignment': 'al', 'dbreports': 'db', 'tmap': 'tm'}
common = {}
for i, r in enumerate(selected_results):
version = {}
for name, shortname in ver_map.iteritems():
version[name] = next((v.split(':')[1].strip()
for v in r.analysisVersion.split(',') if v.split(':')[0].strip() == shortname), '')
setattr(r, name + "_version", version[name])
# starting with TS3.6 we don't have separate alignment or tmap packages
if not version['tmap']: r.tmap_version = version['analysis']
if not version['alignment']: r.alignment_version = version['analysis']
if not common:
if r.resultsType != 'CombinedAlignments' or i == (len(selected_results) - 1):
common = {
'floworder': r.experiment.flowsInOrder,
'barcodeId': r.eas.barcodeKitName,
'barcodedSamples': r.eas.barcodedSamples if r.eas.barcodeKitName else {},
'sample': r.experiment.get_sample() if not r.eas.barcodeKitName else ''
}
if len(set([getattr(r, 'tmap_version') for r in selected_results])) > 1:
warnings.append("Selected results have different TMAP versions.")
if len(set([getattr(r, 'alignment_version') for r in selected_results])) > 1:
warnings.append("Selected results have different Alignment versions.")
if len(set([r.experiment.flowsInOrder for r in selected_results if r.resultsType != 'CombinedAlignments'])) > 1:
warnings.append("Selected results have different FlowOrder Sequences.")
common['floworder'] = ''
barcodeSet = set(
[r.eas.barcodeKitName for r in selected_results if r.resultsType != 'CombinedAlignments'])
if len(barcodeSet) > 1:
warnings.append("Selected results have different Barcode Sets.")
# allow merging for sub-sets of barcodes, e.g. "IonCode" and "IonCode Barcodes 1-32"
minstr = min(barcodeSet, key=len)
common['barcodeId'] = minstr if all(s.startswith(minstr) for s in barcodeSet) else ""
if not override_samples:
if common['barcodeId']:
if len(set([json.dumps(r.eas.barcodedSamples) for r in selected_results if r.resultsType != 'CombinedAlignments'])) > 1:
warnings.append("Selected results have different Samples.")
common['barcodedSamples'] = {}
else:
if len(set([r.experiment.get_sample() for r in selected_results if r.resultsType != 'CombinedAlignments'])) > 1:
warnings.append("Selected results have different Samples.")
common['sample'] = ''
return warnings, common
def results_to_combine(request, results_pks, project_pk):
if request.method == 'GET':
selected_results = Results.objects.filter(id__in=results_pks.split(',')).order_by('-timeStamp')
warnings, common = validate_results_to_combine(selected_results)
barcodes = ''
if common['barcodeId']:
barcodes = dnaBarcode.objects.filter(name=common['barcodeId']).order_by('name', 'index')
ctx = template.RequestContext(request, {
"results_pks": results_pks, "project_pk": project_pk, "selected_results": selected_results,
"warnings": warnings, "barcodes": barcodes,
"action": urlresolvers.reverse('results_to_combine', args=(results_pks, project_pk)),
"method": 'POST'})
return render_to_response("rundb/data/modal_combine_results.html", context_instance=ctx)
if request.method == 'POST':
try:
json_data = json.loads(request.body)
result = _combine_results_sendto_project(project_pk, json_data, request.user.username)
ctx = _report_started(request, result.pk)
return render_to_response("rundb/reports/analysis_started.html", context_instance=ctx)
except Exception as e:
return HttpResponseServerError("%s" % e)
def _combine_results_sendto_project(project_pk, json_data, username=''):
project = Project.objects.get(id=project_pk)
projectName = project.name
name = json_data['name']
mark_duplicates = json_data['mark_duplicates']
ids_to_merge = json_data['selected_pks']
override_samples = json_data.get('override_samples', False) == 'on'
parents = Results.objects.filter(id__in=ids_to_merge).order_by('-timeStamp')
# test if reports can be combined and get common field values
for parent in parents:
if parent.dmfilestat_set.get(dmfileset__type=dmactions_types.OUT).action_state == 'DD':
raise Exception("Output Files for %s are Deleted." % parent.resultsName)
if parent.pk == parents[0].pk:
reference = parent.reference
else:
if not reference == parent.reference:
raise Exception("Selected results do not have the same Alignment Reference.")
warnings, common = validate_results_to_combine(parents, override_samples)
floworder = common['floworder']
barcodeId = common['barcodeId']
if override_samples:
sample = json_data.get('sample', '')
barcodedSamples = json_data.get('barcodedSamples', {})
if barcodedSamples and common['barcodedSamples']:
# try to update with original barcodeSampleInfo
for sample_name, value in barcodedSamples.items():
for barcode in value['barcodes']:
barcodeSampleInfo = [v.get('barcodeSampleInfo', {}).get(barcode) for v in common[
'barcodedSamples'].values() if v.get('barcodeSampleInfo', {}).get(barcode)]
if barcodeSampleInfo:
barcodedSamples[sample_name].setdefault(
'barcodeSampleInfo', {})[barcode] = barcodeSampleInfo[0]
else:
barcodedSamples = json.dumps(barcodedSamples, cls=DjangoJSONEncoder)
else:
sample = common['sample']
barcodedSamples = common['barcodedSamples']
# create new entry in DB for combined Result
delim = ':'
filePrefix = "CombineAlignments" # this would normally be Experiment name that's prefixed to all filenames
result, exp = create_combined_result('CA_%s_%s' % (name, projectName))
result.resultsType = 'CombinedAlignments'
result.parentIDs = delim + delim.join(ids_to_merge) + delim
result.reference = reference
result.sffLink = os.path.join(result.reportLink, "%s_%s.sff" % (filePrefix, result.resultsName))
result.projects.add(project)
# add ExperimentAnalysisSettings
eas_kwargs = {
'date': datetime.now(),
'experiment': exp,
'isEditable': False,
'isOneTimeOverride': True,
'status': 'run',
'reference': reference,
'barcodeKitName': barcodeId,
'barcodedSamples': barcodedSamples,
'targetRegionBedFile': '',
'hotSpotRegionBedFile': '',
'isDuplicateReads': mark_duplicates
}
eas = ExperimentAnalysisSettings(**eas_kwargs)
eas.save()
result.eas = eas
result.save()
# gather parameters to pass to merging script
links = []
bams = []
names = []
bamFile = 'rawlib.bam'
for parent in parents:
links.append(parent.reportLink)
names.append(parent.resultsName)
# BAM files location
dmfilestat = parent.dmfilestat_set.get(dmfileset__type=dmactions_types.OUT)
reportDir = dmfilestat.archivepath if dmfilestat.action_state == 'AD' else parent.get_report_dir()
bams.append(os.path.join(reportDir, bamFile))
# need Plan section for plugins, use latest report's plan
latest_w_plan = parents.filter(experiment__plan__isnull=False)
if not latest_w_plan:
grandparents = sum([v.split(delim) for v in parents.values_list('parentIDs', flat=True)], [])
grandparents = [v for v in set(grandparents) if v]
latest_w_plan = Results.objects.filter(
id__in=grandparents, experiment__plan__isnull=False).order_by('-timeStamp')
plan_json = model_to_dict(latest_w_plan[0].experiment.plan) if latest_w_plan else {}
try:
genome = ReferenceGenome.objects.all().filter(
short_name=reference, index_version=settings.TMAP_VERSION, enabled=True)[0]
if os.path.exists(genome.info_text()):
genomeinfo = genome.info_text()
else:
genomeinfo = ""
except:
genomeinfo = ""
eas_json = model_to_dict(eas)
barcodedSamples_reference_names = eas.barcoded_samples_reference_names
# use barcodedSamples' selected reference if NO plan default reference is specified
reference = result.reference
if not result.reference and barcodedSamples_reference_names:
reference = barcodedSamples_reference_names[0]
params = {
'resultsName': result.resultsName,
'parentIDs': ids_to_merge,
'parentNames': names,
'parentLinks': links,
'parentBAMs': bams,
'referenceName': reference,
'tmap_version': settings.TMAP_VERSION,
'mark_duplicates': mark_duplicates,
'plan': plan_json,
'run_name': filePrefix,
'genomeinfo': genomeinfo,
'flowOrder': floworder,
'project': projectName,
'barcodeId': barcodeId,
"barcodeSamples_referenceNames": barcodedSamples_reference_names,
'sample': sample,
'override_samples': override_samples,
'experimentAnalysisSettings': eas_json,
'warnings': warnings,
'runid': result.runid
}
params = json.dumps(params, cls=DjangoJSONEncoder)
from distutils.sysconfig import get_python_lib
scriptpath = os.path.join(get_python_lib(), 'ion', 'reports', 'combineReports.py')
try:
with open(scriptpath, "r") as f:
script = f.read()
except Exception as error:
result.status = "Error reading %s\n%s" % (scriptpath, error.args)
raise Exception(result.status)
files = []
# do we need expMeta?
lines = ("Project = %s" % ','.join(p.name for p in result.projects.all()),
"Library = %s" % result.reference,
"Analysis Name = %s" % result.resultsName,
"Flow Order = %s" % floworder,
"Run Name = %s" % filePrefix
)
files.append(('expMeta.dat', '\n'.join(lines)))
files.append(("primary.key", "ResultsPK = %s" % result.pk))
webRootPath = result.web_root_path(Location.getdefault())
try:
host = "127.0.0.1"
conn = client.connect(host, settings.JOBSERVER_PORT)
conn.startanalysis(result.resultsName, script, params,
files, webRootPath, result.pk, '', {}, 'combineAlignments')
except:
result.status = "Failed to contact job server."
raise Exception(result.status)
# log project history
message = 'Combine results %s into name= %s (%s), auto-assign to project name= %s (%s).' % (
ids_to_merge, result.resultsName, result.pk, projectName, project_pk)
EventLog.objects.add_entry(project, message, username)
return result
def create_combined_result(resultsName):
# create Results entry in DB without any Experiment (creates blank Exp)
exp = _blank_Exp('NONE_ReportOnly_NONE')
last = 0
# check resultsName for invalid chars to be safe
validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-"
resultsName = ''.join([c for c in resultsName if c in validChars])
otherResults = Results.objects.filter(resultsName__contains=resultsName).order_by('pk')
if otherResults:
lastName = otherResults[len(otherResults) - 1].resultsName
last = int(lastName.split('_')[-1])
resultsName = "%s_%03d" % (resultsName, last + 1)
storage = ReportStorage.objects.filter(default=True)[0]
result = build_result(exp, resultsName, storage, Location.getdefault())
return result, exp
def _blank_Exp(blankName):
# create blank experiment if doesn't already exist
try:
ret = Experiment.objects.get(expName=blankName)
except Experiment.DoesNotExist:
kwargs = {}
for field in Experiment._meta.fields:
if field.name == 'expName':
kwargs[field.name] = blankName
elif field.name == 'date':
kwargs[field.name] = datetime.now()
elif not field.null:
if field.get_internal_type() == 'CharField' or field.get_internal_type() == 'TextField':
kwargs[field.name] = ""
elif field.get_internal_type() == 'BooleanField':
kwargs[field.name] = False
elif field.get_internal_type() == 'IntegerField':
kwargs[field.name] = 0
else: # only id should remain
logging.debug(field.name, field.get_internal_type())
kwargs['cycles'] = 1
kwargs['flows'] = 1
kwargs['ftpStatus'] = 'Complete'
ret = Experiment(**kwargs)
ret.save()
return ret
@login_required
def experiment_edit(request, pk):
exp = get_object_or_404(Experiment, pk=pk)
eas, eas_created = exp.get_or_create_EAS(editable=True)
plan = exp.plan
barcodes = {}
for bc in dnaBarcode.objects.order_by('name', 'index').values('name', 'id_str', 'sequence'):
barcodes.setdefault(bc['name'], []).append(bc)
# get list of plugins to run
plugins = Plugin.objects.filter(selected=True, active=True).exclude(path='')
selected_names = [pl['name'] for pl in eas.selectedPlugins.values()]
plugins_list = list(plugins.filter(name__in=selected_names))
if request.method == 'GET':
exp_form = forms.ExperimentSettingsForm(instance=exp)
eas_form = forms.AnalysisSettingsForm(instance=eas)
# Application, i.e. runType
if plan:
exp_form.fields['runtype'].initial = plan.runType
exp_form.fields['sampleTubeLabel'].initial = plan.sampleTubeLabel
# Library Kit name - can get directly or from kit barcode
libraryKitName = ''
if eas.libraryKitName:
libraryKitName = eas.libraryKitName
elif eas.libraryKitBarcode:
libkitset = KitInfo.objects.filter(kitType='LibraryKit', kitpart__barcode=eas.libraryKitBarcode)
if len(libkitset) == 1:
libraryKitName = libkitset[0].name
exp_form.fields['libraryKitname'].initial = libraryKitName
# Sequencing Kit name - can get directly or from kit barcode
if not exp.sequencekitname and exp.sequencekitbarcode:
seqkitset = KitInfo.objects.filter(
kitType='SequencingKit', kitpart__barcode=exp.sequencekitbarcode)
if len(seqkitset) == 1:
exp_form.fields['sequencekitname'] = seqkitset[0].name
exp_form.fields['libraryKey'].initial = eas.libraryKey
if len(exp.samples.all()) > 0:
exp_form.fields['sample'].initial = exp.samples.all()[0].id
exp_form.fields['barcodedSamples'].initial = eas.barcodedSamples
exp_form.fields['mark_duplicates'].initial = eas.isDuplicateReads
# plugins with optional userInput
eas_form.fields['plugins'].initial = [plugin.id for plugin in plugins_list]
pluginsUserInput = {}
for plugin in plugins_list:
pluginsUserInput[str(plugin.id)] = eas.selectedPlugins.get(plugin.name, {}).get('userInput', '')
eas_form.fields['pluginsUserInput'].initial = json.dumps(pluginsUserInput)
if request.method == 'POST':
exp_form = forms.ExperimentSettingsForm(request.POST, instance=exp)
eas_form = forms.AnalysisSettingsForm(request.POST, instance=eas)
if exp_form.is_valid() and eas_form.is_valid():
# save Plan
if plan:
plan.runType = exp_form.cleaned_data['runtype']
plan.sampleTubeLabel = exp_form.cleaned_data['sampleTubeLabel']
plan.save()
# save Experiment
exp_form.save()
# save ExperimentAnalysisSettings
eas = eas_form.save(commit=False)
eas.libraryKey = exp_form.cleaned_data['libraryKey']
eas.libraryKitName = exp_form.cleaned_data['libraryKitname']
eas.barcodedSamples = exp_form.cleaned_data['barcodedSamples']
eas.isDuplicateReads = exp_form.cleaned_data['mark_duplicates']
# plugins
form_plugins_list = list(eas_form.cleaned_data['plugins'])
pluginsUserInput = json.loads(eas_form.cleaned_data['pluginsUserInput'])
selectedPlugins = {}
for plugin in form_plugins_list:
selectedPlugins[plugin.name] = {
"id": str(plugin.id),
"name": plugin.name,
"version": plugin.version,
"features": plugin.pluginsettings.get('features', []),
"userInput": pluginsUserInput.get(str(plugin.id), '')
}
eas.selectedPlugins = selectedPlugins
eas.save()
# save single non-barcoded sample or barcoded samples
if not eas.barcodeKitName:
sampleId = exp_form.cleaned_data['sample']
if sampleId:
sample = Sample.objects.get(pk=sampleId)
exp.samples.clear()
exp.samples.add(sample)
elif eas.barcodedSamples:
exp.samples.clear()
for value in eas.barcodedSamples.values():
sampleId = value['id']
sample = Sample.objects.get(pk=sampleId)
exp.samples.add(sample)
else:
return HttpResponseServerError('%s %s' % (exp_form.errors, eas_form.errors))
ctxd = {"exp_form": exp_form, "eas_form": eas_form, "pk":
pk, "name": exp.expName, "barcodes": json.dumps(barcodes)}
return render_to_response("rundb/data/modal_experiment_edit.html", context_instance=template.RequestContext(request, ctxd))
@login_required
def datamanagement(request):
gc = GlobalConfig.get()
if os.path.exists("/opt/ion/.ion-internal-server"):
# split Data Management tables per fileserver
dm_tables = []
for path in FileServer.objects.all().order_by('pk').values_list('filesPrefix', flat=True):
dm_tables.append({
"filesPrefix": path,
"url": "/rundb/api/v1/compositedatamanagement/?format=json&expDir__startswith=%s" % path
})
else:
dm_tables = [{
"filesPrefix": "",
"url": "/rundb/api/v1/compositedatamanagement/?format=json"
}]
dm_filesets = DMFileSet.objects.filter(version=settings.RELVERSION).order_by('pk')
for dmfileset in dm_filesets:
if dmfileset.backup_directory in ['None', None, '']:
dmfileset.mounted = False
else:
dmfileset.mounted = bool(is_mounted(dmfileset.backup_directory)) and os.path.exists(
dmfileset.backup_directory)
# Disk Usage section
fs_stats = {}
fs_stats_mounted_paths = []
for path in FileServer.objects.all().order_by('pk').values_list('filesPrefix', flat=True):
try:
if os.path.exists(path):
fs_stats[path] = get_disk_attributes_gb(path)
# get space used by data marked Keep
keeper_used = get_keepers_diskspace(path)
keeper_used = float(sum(keeper_used.values())) / 1024 # gbytes
total_gb = fs_stats[path]['disksize']
fs_stats[path]['percentkeep'] = 100 * (keeper_used / total_gb) if total_gb > 0 else 0
if os.path.islink(path):
mounted = is_mounted(os.path.realpath(path))
if mounted:
fs_stats_mounted_paths.append(mounted)
except:
logger.error(traceback.format_exc())
archive_stats = {}
backup_dirs = get_dir_choices()[1:]
for bdir, name in backup_dirs:
try:
mounted = is_mounted(bdir) # This will return mountpoint path
if mounted and bdir not in archive_stats and bdir not in fs_stats and bdir not in fs_stats_mounted_paths:
archive_stats[bdir] = get_disk_attributes_gb(bdir)
except:
logger.error(traceback.format_exc())
ctxd = {
"autoArchive": gc.auto_archive_ack,
"autoArchiveEnable": gc.auto_archive_enable,
"dm_filesets": dm_filesets,
"dm_tables": dm_tables,
"archive_stats": archive_stats,
"fs_stats": fs_stats,
"dm_stats": dm_category_stats()
}
ctx = template.RequestContext(request, ctxd)
return render_to_response("rundb/data/data_management.html", context_instance=ctx)
def dm_actions(request, results_pks):
results = Results.objects.filter(pk__in=results_pks.split(','))
# update disk space info if needed
to_update = DMFileStat.objects.filter(diskspace=None, result__in=results_pks.split(','))
for dmfilestat in to_update:
dmtasks.update_dmfilestats_diskspace.delay(dmfilestat)
if len(to_update) > 0:
time.sleep(2)
dm_files_info = []
for category in dmactions_types.FILESET_TYPES:
info = ''
for result in results:
dmfilestat = result.dmfilestat_set.get(dmfileset__type=category)
if not info:
info = {
'category': dmfilestat.dmfileset.type,
'description': dmfilestat.dmfileset.description,
'action_state': dmfilestat.get_action_state_display(),
'keep': dmfilestat.getpreserved(),
'diskspace': dmfilestat.diskspace,
'in_process': dmfilestat.in_process()
}
else:
# multiple results
if info['action_state'] != dmfilestat.get_action_state_display():
info['action_state'] = '*'
if info['keep'] != dmfilestat.getpreserved():
info['keep'] = '*'
if (info['diskspace'] is not None) and (dmfilestat.diskspace is not None):
info['diskspace'] += dmfilestat.diskspace
else:
info['diskspace'] = None
info['in_process'] &= dmfilestat.in_process()
dm_files_info.append(info)
if len(results) == 1:
name = "Report Name: %s" % result.resultsName
subtitle = "Run Name: %s" % result.experiment.expName
else:
# multiple results (available from Project page)
name = "Selected %s results." % len(results)
subtitle = "(%s)" % ', '.join(results_pks.split(','))
backup_dirs = get_dir_choices()[1:]
ctxd = {
"dm_files_info": dm_files_info,
"name": name,
"subtitle": subtitle,
"results_pks": results_pks,
"backup_dirs": backup_dirs,
"isDMDebug": os.path.exists("/opt/ion/.ion-dm-debug"),
}
ctx = RequestContext(request, ctxd)
return render_to_response("rundb/data/modal_dm_actions.html", context_instance=ctx)
def dm_action_selected(request, results_pks, action):
'''
file categories to process: data['categories']
user log entry comment: data['comment']
results_pks could contain more than 1 result
'''
logger = logging.getLogger('data_management')
data = json.loads(request.body)
logger.info("dm_action_selected: request '%s' on report(s): %s" % (action, results_pks))
'''
organize the dmfilestat objects by result_id, we make multiple dbase queries
but it keeps them organized. Most times, this will be a single query anyway.
'''
dmfilestat_dict = {}
try:
# update any dmfilestats in use by running analyses
update_files_in_use()
backup_directory = data['backup_dir'] if data['backup_dir'] != 'default' else None
for resultPK in results_pks.split(','):
logger.debug("Matching dmfilestats contain %s reportpk" % resultPK)
dmfilestat_dict[resultPK] = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
for dmfilestat in dmfilestat_dict[resultPK]:
# validate export/archive destination folders
if action in ['export', 'archive']:
dmactions.destination_validation(dmfilestat, backup_directory, manual_action=True)
# validate files not in use
try:
dmactions.action_validation(dmfilestat, action, data['confirmed'])
except DMExceptions.FilesInUse as e:
# warn if exporting files currently in use, allow to proceed if confirmed
if action == 'export':
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e) + '<br>Exporting now may produce incomplete data set.'}), mimetype="application/json")
else:
raise e
except DMExceptions.BaseInputLinked as e:
# warn if deleting basecaller files used in any other re-analysis started from BaseCalling
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': str(e)}), mimetype="application/json")
# warn if archiving data marked Keep
if action == 'archive' and dmfilestat.getpreserved():
if not data['confirmed']:
return HttpResponse(json.dumps({'warning': '%s currently marked Keep.' % dmfilestat.dmfileset.type}), mimetype="application/json")
else:
dmfilestat.setpreserved(False)
# if further processing an archived dataset, error if archive drive is not mounted
if dmfilestat.isarchived() and not os.path.exists(dmfilestat.archivepath):
return HttpResponseServerError("%s archive location %s is not available." % (dmfilestat.dmfileset.type, dmfilestat.archivepath))
async_task_result = dmtasks.action_group.delay(request.user.username, data[
'categories'], action, dmfilestat_dict, data['comment'], backup_directory, data['confirmed'])
if async_task_result:
logger.debug(async_task_result)
except DMExceptions.SrcDirDoesNotExist as e:
dmfilestat.setactionstate('DD')
msg = "Source directory %s no longer exists. Setting action_state to Deleted" % e.message
logger.info(msg)
EventLog.objects.add_entry(dmfilestat.result, msg, username=request.user.username)
except Exception as e:
logger.error("dm_action_selected: error: %s" % str(e))
return HttpResponseServerError("%s" % str(e))
test = {'pks': results_pks, 'action': action, 'data': data}
return HttpResponse(json.dumps(test), mimetype="application/json")
@login_required
@permission_required('user.is_staff', raise_exception=True)
def dm_configuration(request):
def isdiff(value1, value2):
return str(value1) != str(value2)
config = GlobalConfig.get()
dm_contact, created = User.objects.get_or_create(username='dm_contact')
if request.method == 'GET':
dm_filesets = DMFileSet.objects.filter(version=settings.RELVERSION).order_by('pk')
backup_dirs = get_dir_choices()
config.email = dm_contact.email
ctx = RequestContext(request, {
"dm_filesets": dm_filesets,
"config": config,
"backup_dirs": backup_dirs,
"categories": dmactions_types.FILESET_TYPES
})
return render_to_response("rundb/data/dm_configuration.html", context_instance=ctx)
elif request.method == 'POST':
dm_filesets = DMFileSet.objects.all()
log = 'SAVED Data Management Configuration<br>'
data = json.loads(request.body)
changed = False
html = lambda s: '<span style="color:#3A87AD;">%s</span>' % s
try:
for key, value in data.items():
if key == 'filesets':
for category, params in value.items():
# log += '<b>%s:</b> %s<br>' % (category,
# json.dumps(params).translate(None, "{}\"\'") )
dmfileset = dm_filesets.filter(type=category)
current_params = dmfileset.values(*params.keys())[0]
changed_params = [
key for key, value in params.items() if isdiff(value, current_params.get(key))]
if len(changed_params) > 0:
dmfileset.update(**params)
changed = True
log += '<b>%s:</b> ' % category
for key, value in params.items():
log_txt = ' %s: %s,' % (key, value)
if key in changed_params:
log_txt = html(log_txt)
log += log_txt
log = log[:-1] + '<br>'
elif key == 'email':
log_txt = '<b>Email:</b> %s' % value
if isdiff(value, dm_contact.email):
changed = True
dm_contact.email = value
dm_contact.save()
log_txt = html(log_txt)
log += log_txt + '<br>'
elif key == 'auto_archive_ack':
log_txt = '<b>Auto Acknowledge Delete:</b> %s' % value
if isdiff(value, config.auto_archive_ack):
changed = True
config.auto_archive_ack = True if value == 'True' else False
config.save()
log_txt = html(log_txt)
log += log_txt + '<br>'
if changed:
_add_dm_configuration_log(request, log)
except Exception as e:
logger.exception("dm_configuration: error: %s" % str(e))
return HttpResponseServerError("Error: %s" % str(e))
return HttpResponse()
def _add_dm_configuration_log(request, log):
# add log entry. Want to save with the DMFileSet class, not any single object, so use fake object_pk.
ct = ContentType.objects.get_for_model(DMFileSet)
ev = EventLog(object_pk=0, content_type=ct, username=request.user.username, text=log)
ev.save()
def delete_ack(request):
runPK = request.POST.get('runpk', False)
runState = request.POST.get('runstate', False)
if not runPK:
return HttpResponse(json.dumps({"status": "error, no runPK POSTed"}), mimetype="application/json")
if not runState:
return HttpResponse(json.dumps({"status": "error, no runState POSTed"}), mimetype="application/json")
# Also change the experiment user_ack value.
exp = Results.objects.get(pk=runPK).experiment
exp.user_ack = runState
exp.save()
# If multiple reports per experiment update all sigproc action_states.
results_pks = exp.results_set.values_list('pk', flat=True)
ret = DMFileStat.objects.filter(
result__pk__in=results_pks, dmfileset__type=dmactions_types.SIG).update(action_state=runState)
for result in exp.results_set.all():
msg = '%s deletion ' % dmactions_types.SIG
msg += 'is Acknowledged' if runState == 'A' else ' Acknowledgement is removed'
EventLog.objects.add_entry(result, msg, username=request.user.username)
return HttpResponse(json.dumps({"runState": runState, "count": ret, "runPK": runPK}), mimetype="application/json")
@login_required
def preserve_data(request):
# Sets flag to preserve data for a single DMFileStat object
if request.method == 'POST':
reportPK = request.POST.get('reportpk', False)
expPK = request.POST.get('exppk', False)
keep = True if request.POST.get('keep') == 'true' else False
dmtype = request.POST.get('type', '')
if dmtype == 'sig':
typeStr = dmactions_types.SIG
elif dmtype == 'base':
typeStr = dmactions_types.BASE
elif dmtype == 'out':
typeStr = dmactions_types.OUT
elif dmtype == 'intr':
typeStr = dmactions_types.INTR
elif dmtype == 'reanalysis':
typeStr = dmactions_types.SIG
else:
return HttpResponseServerError("error, unknown DMFileStat type")
try:
if reportPK:
if dmtype == 'sig':
expPKs = Results.objects.filter(
pk__in=reportPK.split(',')).values_list('experiment', flat=True)
results = Results.objects.filter(experiment__in=expPKs)
else:
results = Results.objects.filter(pk__in=reportPK.split(','))
elif expPK:
results = Experiment.objects.get(pk=expPK).results_set.all()
else:
return HttpResponseServerError("error, no object pk specified")
msg = '%s marked exempt from auto-action' if keep else '%s no longer exempt from auto-action'
msg = msg % typeStr
for result in results:
filestat = result.get_filestat(typeStr)
filestat.setpreserved(keep)
if dmtype == 'reanalysis':
# Keep BASE category data for Proton fullchip for re-analysis from on-instrument files
if result.experiment.log.get('oninstranalysis', '') == 'yes' and not result.isThumbnail:
result.get_filestat(dmactions_types.BASE).setpreserved(keep)
EventLog.objects.add_entry(result, msg, username=request.user.username)
except Exception as err:
return HttpResponseServerError("error, %s" % err)
return HttpResponse(json.dumps({"reportPK": reportPK, "type": typeStr, "keep": filestat.getpreserved()}), mimetype="application/json")
def get_dir_choices():
from iondb.utils import devices
basicChoice = [(None, 'None')] + devices.to_media(devices.disk_report())
# add selected directories to choices
for choice in set(DMFileSet.objects.exclude(backup_directory__in=['', 'None']).values_list('backup_directory', flat=True)):
if choice and not (choice, choice) in basicChoice:
choice_str = choice if bool(is_mounted(choice)) else '%s (not mounted)' % choice
basicChoice.append((choice, choice_str))
return tuple(basicChoice)
def dm_log(request, pk=None):
if request.method == 'GET':
selected = get_object_or_404(Results, pk=pk)
ct = ContentType.objects.get_for_model(selected)
title = "Data Management Actions for %s (%s):" % (selected.resultsName, pk)
ctx = RequestContext(request, {"title": title, "pk": pk, "cttype": ct.id})
return render_to_response("rundb/common/modal_event_log.html", context_instance=ctx)
def dm_configuration_log(request):
if request.method == 'GET':
ct = ContentType.objects.get_for_model(DMFileSet)
title = "Data Management Configuration History"
ctx = RequestContext(request, {"title": title, "pk": 0, "cttype": ct.id})
return render_to_response("rundb/common/modal_event_log.html", context_instance=ctx)
elif request.method == 'POST':
log = request.POST.get('log')
_add_dm_configuration_log(request, log)
return HttpResponse()
def dm_history(request):
logs = EventLog.objects.for_model(Results)
usernames = set(logs.values_list('username', flat=True))
ctx = RequestContext(request, {'usernames': usernames})
return render_to_response("rundb/data/dm_history.html", context_instance=ctx)
def dm_list_files(request, resultPK, action):
"""Returns the list of files that are selected for the given file categories for the given Report"""
data = json.loads(request.body)
dmfilestat = DMFileStat.objects.select_related() \
.filter(dmfileset__type__in=data['categories'], result__id=int(resultPK))
dmfilestat = dmfilestat[0]
# Hack - generate serialized json file for the DataXfer plugin
dmactions.write_serialized_json(dmfilestat.result, dmfilestat.result.get_report_dir())
to_process, to_keep = dmactions.get_file_list(dmfilestat)
payload = {
'files_to_transfer': to_process,
'start_dirs': [dmfilestat.result.get_report_dir(), dmfilestat.result.experiment.expDir],
}
return HttpResponse(json.dumps(payload), mimetype="application/json")
@login_required
def browse_backup_dirs(request, path):
from iondb.utils import devices
def bread_crumbs(path):
crumbs = []
while path != '/':
if os.path.ismount(path):
crumbs.insert(0, (path, path))
break
head, tail = os.path.split(path)
crumbs.insert(0, (path, tail))
path = head
return crumbs
backup_dirs = get_dir_choices()[1:]
breadcrumbs = []
dir_info = []
file_info = []
path_allowed = True
if path:
if not os.path.isabs(path): path = os.path.join('/', path)
# only allow directories inside mount points
path_allowed = any([path.startswith(d) for d, n in backup_dirs])
if not path_allowed:
return HttpResponseServerError("Directory not allowed: %s" % path)
exclude_archived = request.GET.get('exclude_archived', 'false')
if path and path_allowed:
breadcrumbs = bread_crumbs(path)
try:
dirs, files = file_browse.list_directory(path)
except Exception as e:
return HttpResponseServerError(str(e))
dirs.sort()
files.sort()
for name, full_dir_path, stat in dirs:
dir_path = full_dir_path
if exclude_archived == 'true':
if name == 'archivedReports' or name == 'exportedReports' or glob.glob(os.path.join(full_dir_path, 'serialized_*.json')):
dir_path = ''
date = datetime.fromtimestamp(stat.st_mtime)
try:
size = file_browse.dir_size(full_dir_path)
except:
size = ''
dir_info.append((name, dir_path, date, size))
for name, full_file_path, stat in files:
file_path = os.path.join(path, name)
date = datetime.fromtimestamp(stat.st_mtime)
try:
size = file_browse.format_units(stat.st_size)
except:
size = ''
file_info.append((name, file_path, date, size))
ctxd = {"backup_dirs": backup_dirs, "selected_path": path, "breadcrumbs": breadcrumbs,
"dirs": dir_info, "files": file_info, "exclude_archived": exclude_archived}
return render_to_response("rundb/data/modal_browse_dirs.html", ctxd)
def import_data(request):
if request.method == "GET":
backup_dirs = get_dir_choices()
return render_to_response("rundb/data/modal_import_data.html", context_instance=RequestContext(request, {"backup_dirs": backup_dirs}))
elif request.method == "POST":
postData = json.loads(request.body)
for result in postData:
name = result.pop('name')
copy_data = bool(result.pop('copy_data', False))
copy_report = bool(result.pop('copy_report', False))
async_result = data_import.delay(name, result, request.user.username, copy_data, copy_report)
return HttpResponse()
def import_data_find(request, path):
# search directory tree for importable data
if not os.path.isabs(path): path = os.path.join('/', path.strip())
if path and os.path.exists(path):
found_results = find_data_to_import(path)
if len(found_results) == 0:
return HttpResponseNotFound('Did not find any data exported or archived with TS version 4.0 or later in %s.' % path)
else:
results_list = []
for result in found_results:
results_list.append({
'name': result['name'],
'report': result['categories'].get(dmactions_types.OUT),
'basecall': result['categories'].get(dmactions_types.BASE),
'sigproc': result['categories'].get(dmactions_types.SIG)
})
ctxd = {
"dm_types": [dmactions_types.OUT, dmactions_types.BASE, dmactions_types.SIG],
'results_list': results_list
}
return render_to_response("rundb/data/modal_import_data.html", ctxd)
else:
return HttpResponseNotFound('Cannot access path: %s.' % path)
def import_data_log(request, path):
# display log file
if not os.path.isabs(path): path = os.path.join('/', path)
contents = ''
with open(path, 'rb') as f:
contents = f.read()
response = '<div><pre>%s</pre></div>' % contents
# add some js to overwrite modal header
response += '<script type="text/javascript">$(function() { $("#modal_report_log .modal-header").find("h2").text("Data Import Log");});</script>'
return HttpResponse(response, mimetype='text/plain')
def dmactions_jobs(request):
active_dmfilestats = DMFileStat.objects.filter(
action_state__in=['AG', 'DG', 'EG', 'SA', 'SE', 'SD', 'IG']).select_related('result', 'dmfileset')
active_logs = EventLog.objects.filter(
object_pk__in=active_dmfilestats.values_list('result__pk', flat=True))
dmactions_jobs = []
for dmfilestat in active_dmfilestats:
d = {
'pk': dmfilestat.pk,
'state': dmfilestat.get_action_state_display(),
'diskspace': "%.1f" % (dmfilestat.diskspace or 0),
'result_pk': dmfilestat.result.pk,
'resultsName': dmfilestat.result.resultsName,
'category': dmfilestat.dmfileset.type,
'destination': dmfilestat.archivepath if dmfilestat.archivepath else ''
}
# parse date and comment from logs
for log in active_logs.filter(object_pk=d['result_pk']).order_by('-created'):
if d['category'] in log.text and d['state'] in log.text:
d['started_on'] = log.created
d['username'] = log.username
try:
d['comment'] = log.text.split('User Comment:')[1].strip()
except:
d['comment'] = log.text
dmactions_jobs.append(d)
ctx = json.dumps({'objects': dmactions_jobs, 'total': len(dmactions_jobs)}, cls=DjangoJSONEncoder)
return HttpResponse(ctx, content_type="application/json")
def cancel_pending_dmaction(request, pk):
dmfilestat = DMFileStat.objects.get(pk=pk)
msg = 'Canceled %s for %s' % (dmfilestat.get_action_state_display(), dmfilestat.dmfileset.type)
if dmfilestat.action_state in ['SA', 'SE']:
dmfilestat.setactionstate('L')
EventLog.objects.add_entry(dmfilestat.result, msg, username=request.user.username)
return HttpResponse()
| 55,546 | 0 | 1,183 |
5a668b2ae45d6e68e24efd310be550e8ab5a16b7 | 821 | py | Python | backend/nezbank/migrations/0003_auto_20211231_1638.py | romanselivanov/djangobank | adf08b1e80043d4b8199f89f4570580a73b61504 | [
"MIT"
] | null | null | null | backend/nezbank/migrations/0003_auto_20211231_1638.py | romanselivanov/djangobank | adf08b1e80043d4b8199f89f4570580a73b61504 | [
"MIT"
] | null | null | null | backend/nezbank/migrations/0003_auto_20211231_1638.py | romanselivanov/djangobank | adf08b1e80043d4b8199f89f4570580a73b61504 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-31 13:38
from django.db import migrations, models
| 28.310345 | 104 | 0.600487 | # Generated by Django 3.2.9 on 2021-12-31 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nezbank', '0002_auto_20211228_1743'),
]
operations = [
migrations.AlterField(
model_name='account',
name='rate',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Средств на счете'),
),
migrations.AlterField(
model_name='accounttype',
name='currency',
field=models.CharField(max_length=50, verbose_name='Валюта'),
),
migrations.AlterField(
model_name='accounttype',
name='value',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Курс'),
),
]
| 0 | 731 | 23 |
d7be815f9d7c6f5cb324451cc23b3dfad7481e53 | 2,839 | py | Python | api/models.py | LukJA/trent | d79b38a2bfe3a2a510b0b6ac314fe2a0ccfad546 | [
"WTFPL"
] | null | null | null | api/models.py | LukJA/trent | d79b38a2bfe3a2a510b0b6ac314fe2a0ccfad546 | [
"WTFPL"
] | 1 | 2021-01-25T16:07:57.000Z | 2021-01-25T16:07:57.000Z | api/models.py | LukJA/trent | d79b38a2bfe3a2a510b0b6ac314fe2a0ccfad546 | [
"WTFPL"
] | null | null | null | from django.db import models
import random
import numpy as np
| 35.936709 | 98 | 0.48186 | from django.db import models
import random
import numpy as np
class Fund(models.Model):
name = models.CharField(max_length=5)
mean = models.DecimalField(max_digits=4, decimal_places=3)
variance = models.DecimalField(max_digits=4, decimal_places=3)
dsdt = models.DecimalField(max_digits=4, decimal_places=3)
dsdtC = models.DecimalField(max_digits=4, decimal_places=3)
def __str__(self):
return self.name
def predict_value(self, invest):
"""
Predict the value of "investment" (a list of the investment every year) after time.
Time needs to be in the same units as growth.
Returns:
- Dict of projected values and times
- Dict of static values and times
dsdtDict = {"VASGX": 0.016, "VFIFX": 0.017, "VMIGX": 0.04, "VMMSX": 0.035, "VSCGX": 0.009}
dsdtCDict = {"VASGX": 0.094, "VFIFX": 0.097, "VMIGX": 0.1, "VMMSX": 0.131, "VSCGX": 0.059}
"""
time = len(invest)
value = 0
static_val = 0
projection = {}
static = {}
value_ls = []
static_ls = []
upper = []
lower = []
time_ls = []
checkpoint = np.array([ 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., -10000., -10000., -10000., -10000.,
-10000., -20000., -20000., -20000., -20000., -20000., -20000.,
-20000., -20000., -20000., -20000., -20000., -20000., -20000.,
-30000., -30000., -30000., -30000., -10000., -20000., -20000.,
-20000., -20000., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.])
for i in range(time):
value += invest[i] + checkpoint[i]
if value < 0:
value = 0
static_val += invest[i] + checkpoint[i]
time_ls.append(i)
d_value = value * float(self.mean)
if d_value > 2000:
taxable = d_value - 2000
else:
taxable = 0
value *= 1 + float(self.mean)
value -= taxable * 0.381
up = value * (1 + (float(self.dsdt)) * i + float(self.dsdtC))
low = value * (1 - (float(self.dsdt)) * i - float(self.dsdtC))
value_ls.append(value)
static_ls.append(static_val)
upper.append(up)
lower.append(low)
projection["Time"] = time_ls
projection["Value"] = value_ls
projection["Lower"] = lower
projection["Higher"] = upper
static["Time"] = time_ls
static["Value"] = static_ls
return projection, static
| 22 | 2,731 | 23 |
3fa4a50263395c37bf24777fe66ffb3c12d5884e | 16,918 | py | Python | python/services/recaptchaenterprise/key.py | GoogleCloudPlatform/declarative-resource-client-library | ef28ca390518a8266cfe5a5e0d5abb8f45f5c7d2 | [
"Apache-2.0"
] | 16 | 2021-01-08T19:35:22.000Z | 2022-03-23T16:23:49.000Z | python/services/recaptchaenterprise/key.py | GoogleCloudPlatform/declarative-resource-client-library | ef28ca390518a8266cfe5a5e0d5abb8f45f5c7d2 | [
"Apache-2.0"
] | 1 | 2021-01-13T04:59:56.000Z | 2021-01-13T04:59:56.000Z | python/services/recaptchaenterprise/key.py | GoogleCloudPlatform/declarative-resource-client-library | ef28ca390518a8266cfe5a5e0d5abb8f45f5c7d2 | [
"Apache-2.0"
] | 11 | 2021-03-18T11:27:28.000Z | 2022-03-12T06:49:14.000Z | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2_grpc
from typing import List
| 37.182418 | 103 | 0.68235 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2
from google3.cloud.graphite.mmv2.services.google.recaptchaenterprise import key_pb2_grpc
from typing import List
class Key(object):
def __init__(
self,
name: str = None,
display_name: str = None,
web_settings: dict = None,
android_settings: dict = None,
ios_settings: dict = None,
labels: dict = None,
create_time: str = None,
testing_options: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.web_settings = web_settings
self.android_settings = android_settings
self.ios_settings = ios_settings
self.labels = labels
self.testing_options = testing_options
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = key_pb2_grpc.RecaptchaenterpriseKeyServiceStub(channel.Channel())
request = key_pb2.ApplyRecaptchaenterpriseKeyRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if KeyWebSettings.to_proto(self.web_settings):
request.resource.web_settings.CopyFrom(
KeyWebSettings.to_proto(self.web_settings)
)
else:
request.resource.ClearField("web_settings")
if KeyAndroidSettings.to_proto(self.android_settings):
request.resource.android_settings.CopyFrom(
KeyAndroidSettings.to_proto(self.android_settings)
)
else:
request.resource.ClearField("android_settings")
if KeyIosSettings.to_proto(self.ios_settings):
request.resource.ios_settings.CopyFrom(
KeyIosSettings.to_proto(self.ios_settings)
)
else:
request.resource.ClearField("ios_settings")
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KeyTestingOptions.to_proto(self.testing_options):
request.resource.testing_options.CopyFrom(
KeyTestingOptions.to_proto(self.testing_options)
)
else:
request.resource.ClearField("testing_options")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyRecaptchaenterpriseKey(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.web_settings = KeyWebSettings.from_proto(response.web_settings)
self.android_settings = KeyAndroidSettings.from_proto(response.android_settings)
self.ios_settings = KeyIosSettings.from_proto(response.ios_settings)
self.labels = Primitive.from_proto(response.labels)
self.create_time = Primitive.from_proto(response.create_time)
self.testing_options = KeyTestingOptions.from_proto(response.testing_options)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = key_pb2_grpc.RecaptchaenterpriseKeyServiceStub(channel.Channel())
request = key_pb2.DeleteRecaptchaenterpriseKeyRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if KeyWebSettings.to_proto(self.web_settings):
request.resource.web_settings.CopyFrom(
KeyWebSettings.to_proto(self.web_settings)
)
else:
request.resource.ClearField("web_settings")
if KeyAndroidSettings.to_proto(self.android_settings):
request.resource.android_settings.CopyFrom(
KeyAndroidSettings.to_proto(self.android_settings)
)
else:
request.resource.ClearField("android_settings")
if KeyIosSettings.to_proto(self.ios_settings):
request.resource.ios_settings.CopyFrom(
KeyIosSettings.to_proto(self.ios_settings)
)
else:
request.resource.ClearField("ios_settings")
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if KeyTestingOptions.to_proto(self.testing_options):
request.resource.testing_options.CopyFrom(
KeyTestingOptions.to_proto(self.testing_options)
)
else:
request.resource.ClearField("testing_options")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteRecaptchaenterpriseKey(request)
@classmethod
def list(self, project, service_account_file=""):
stub = key_pb2_grpc.RecaptchaenterpriseKeyServiceStub(channel.Channel())
request = key_pb2.ListRecaptchaenterpriseKeyRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListRecaptchaenterpriseKey(request).items
def to_proto(self):
resource = key_pb2.RecaptchaenterpriseKey()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if KeyWebSettings.to_proto(self.web_settings):
resource.web_settings.CopyFrom(KeyWebSettings.to_proto(self.web_settings))
else:
resource.ClearField("web_settings")
if KeyAndroidSettings.to_proto(self.android_settings):
resource.android_settings.CopyFrom(
KeyAndroidSettings.to_proto(self.android_settings)
)
else:
resource.ClearField("android_settings")
if KeyIosSettings.to_proto(self.ios_settings):
resource.ios_settings.CopyFrom(KeyIosSettings.to_proto(self.ios_settings))
else:
resource.ClearField("ios_settings")
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if KeyTestingOptions.to_proto(self.testing_options):
resource.testing_options.CopyFrom(
KeyTestingOptions.to_proto(self.testing_options)
)
else:
resource.ClearField("testing_options")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class KeyWebSettings(object):
def __init__(
self,
allow_all_domains: bool = None,
allowed_domains: list = None,
allow_amp_traffic: bool = None,
integration_type: str = None,
challenge_security_preference: str = None,
):
self.allow_all_domains = allow_all_domains
self.allowed_domains = allowed_domains
self.allow_amp_traffic = allow_amp_traffic
self.integration_type = integration_type
self.challenge_security_preference = challenge_security_preference
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = key_pb2.RecaptchaenterpriseKeyWebSettings()
if Primitive.to_proto(resource.allow_all_domains):
res.allow_all_domains = Primitive.to_proto(resource.allow_all_domains)
if Primitive.to_proto(resource.allowed_domains):
res.allowed_domains.extend(Primitive.to_proto(resource.allowed_domains))
if Primitive.to_proto(resource.allow_amp_traffic):
res.allow_amp_traffic = Primitive.to_proto(resource.allow_amp_traffic)
if KeyWebSettingsIntegrationTypeEnum.to_proto(resource.integration_type):
res.integration_type = KeyWebSettingsIntegrationTypeEnum.to_proto(
resource.integration_type
)
if KeyWebSettingsChallengeSecurityPreferenceEnum.to_proto(
resource.challenge_security_preference
):
res.challenge_security_preference = KeyWebSettingsChallengeSecurityPreferenceEnum.to_proto(
resource.challenge_security_preference
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KeyWebSettings(
allow_all_domains=Primitive.from_proto(resource.allow_all_domains),
allowed_domains=Primitive.from_proto(resource.allowed_domains),
allow_amp_traffic=Primitive.from_proto(resource.allow_amp_traffic),
integration_type=KeyWebSettingsIntegrationTypeEnum.from_proto(
resource.integration_type
),
challenge_security_preference=KeyWebSettingsChallengeSecurityPreferenceEnum.from_proto(
resource.challenge_security_preference
),
)
class KeyWebSettingsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KeyWebSettings.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KeyWebSettings.from_proto(i) for i in resources]
class KeyAndroidSettings(object):
def __init__(
self, allow_all_package_names: bool = None, allowed_package_names: list = None
):
self.allow_all_package_names = allow_all_package_names
self.allowed_package_names = allowed_package_names
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = key_pb2.RecaptchaenterpriseKeyAndroidSettings()
if Primitive.to_proto(resource.allow_all_package_names):
res.allow_all_package_names = Primitive.to_proto(
resource.allow_all_package_names
)
if Primitive.to_proto(resource.allowed_package_names):
res.allowed_package_names.extend(
Primitive.to_proto(resource.allowed_package_names)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KeyAndroidSettings(
allow_all_package_names=Primitive.from_proto(
resource.allow_all_package_names
),
allowed_package_names=Primitive.from_proto(resource.allowed_package_names),
)
class KeyAndroidSettingsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KeyAndroidSettings.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KeyAndroidSettings.from_proto(i) for i in resources]
class KeyIosSettings(object):
def __init__(
self, allow_all_bundle_ids: bool = None, allowed_bundle_ids: list = None
):
self.allow_all_bundle_ids = allow_all_bundle_ids
self.allowed_bundle_ids = allowed_bundle_ids
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = key_pb2.RecaptchaenterpriseKeyIosSettings()
if Primitive.to_proto(resource.allow_all_bundle_ids):
res.allow_all_bundle_ids = Primitive.to_proto(resource.allow_all_bundle_ids)
if Primitive.to_proto(resource.allowed_bundle_ids):
res.allowed_bundle_ids.extend(
Primitive.to_proto(resource.allowed_bundle_ids)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KeyIosSettings(
allow_all_bundle_ids=Primitive.from_proto(resource.allow_all_bundle_ids),
allowed_bundle_ids=Primitive.from_proto(resource.allowed_bundle_ids),
)
class KeyIosSettingsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KeyIosSettings.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KeyIosSettings.from_proto(i) for i in resources]
class KeyTestingOptions(object):
def __init__(self, testing_score: float = None, testing_challenge: str = None):
self.testing_score = testing_score
self.testing_challenge = testing_challenge
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = key_pb2.RecaptchaenterpriseKeyTestingOptions()
if Primitive.to_proto(resource.testing_score):
res.testing_score = Primitive.to_proto(resource.testing_score)
if KeyTestingOptionsTestingChallengeEnum.to_proto(resource.testing_challenge):
res.testing_challenge = KeyTestingOptionsTestingChallengeEnum.to_proto(
resource.testing_challenge
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return KeyTestingOptions(
testing_score=Primitive.from_proto(resource.testing_score),
testing_challenge=KeyTestingOptionsTestingChallengeEnum.from_proto(
resource.testing_challenge
),
)
class KeyTestingOptionsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [KeyTestingOptions.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [KeyTestingOptions.from_proto(i) for i in resources]
class KeyWebSettingsIntegrationTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyWebSettingsIntegrationTypeEnum.Value(
"RecaptchaenterpriseKeyWebSettingsIntegrationTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyWebSettingsIntegrationTypeEnum.Name(
resource
)[len("RecaptchaenterpriseKeyWebSettingsIntegrationTypeEnum") :]
class KeyWebSettingsChallengeSecurityPreferenceEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyWebSettingsChallengeSecurityPreferenceEnum.Value(
"RecaptchaenterpriseKeyWebSettingsChallengeSecurityPreferenceEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyWebSettingsChallengeSecurityPreferenceEnum.Name(
resource
)[
len("RecaptchaenterpriseKeyWebSettingsChallengeSecurityPreferenceEnum") :
]
class KeyTestingOptionsTestingChallengeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyTestingOptionsTestingChallengeEnum.Value(
"RecaptchaenterpriseKeyTestingOptionsTestingChallengeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return key_pb2.RecaptchaenterpriseKeyTestingOptionsTestingChallengeEnum.Name(
resource
)[len("RecaptchaenterpriseKeyTestingOptionsTestingChallengeEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 14,281 | 1,498 | 299 |
45ef67b53de68d0622d120dac4f8fe6e7a75efc7 | 597 | py | Python | examples/starting_template_simple.py | jgrigonis/arcade | 9b624da7da52e3909f6e82c552446b90249041f1 | [
"MIT"
] | 1 | 2021-05-23T20:30:46.000Z | 2021-05-23T20:30:46.000Z | examples/starting_template_simple.py | jgrigonis/arcade | 9b624da7da52e3909f6e82c552446b90249041f1 | [
"MIT"
] | null | null | null | examples/starting_template_simple.py | jgrigonis/arcade | 9b624da7da52e3909f6e82c552446b90249041f1 | [
"MIT"
] | null | null | null | import arcade
SCREEN_WIDTH = 500
SCREEN_HEIGHT = 600
class MyApplication(arcade.Window):
"""
Main application class.
"""
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
pass
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)
arcade.run()
| 17.558824 | 58 | 0.61474 | import arcade
SCREEN_WIDTH = 500
SCREEN_HEIGHT = 600
class MyApplication(arcade.Window):
"""
Main application class.
"""
def __init__(self, width, height):
super().__init__(width, height)
arcade.set_background_color(arcade.color.WHITE)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
pass
window = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)
arcade.run()
| 110 | 0 | 27 |
b13e2f504aaa222f8449186cd38cc02cfd083d3e | 90 | py | Python | Blog_API/apps.py | pawel-zielinski/Blog_Project | 2cef9ed1a5d2d4128f7368c9843f5e6d1b3ef1a0 | [
"MIT"
] | null | null | null | Blog_API/apps.py | pawel-zielinski/Blog_Project | 2cef9ed1a5d2d4128f7368c9843f5e6d1b3ef1a0 | [
"MIT"
] | null | null | null | Blog_API/apps.py | pawel-zielinski/Blog_Project | 2cef9ed1a5d2d4128f7368c9843f5e6d1b3ef1a0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 15 | 33 | 0.755556 | from django.apps import AppConfig
class BlogApiConfig(AppConfig):
name = 'Blog_API'
| 0 | 32 | 23 |
1b3e0b5f449b4f057a00a91cbde0336dfb73c591 | 4,976 | py | Python | src/cli.py | VFRAMEio/vframe | 0dbc991697a6de47ccf3b65ced5b1201a45156b6 | [
"MIT"
] | 43 | 2018-11-21T10:22:17.000Z | 2022-03-28T22:21:26.000Z | src/cli.py | VFRAMEio/vframe | 0dbc991697a6de47ccf3b65ced5b1201a45156b6 | [
"MIT"
] | null | null | null | src/cli.py | VFRAMEio/vframe | 0dbc991697a6de47ccf3b65ced5b1201a45156b6 | [
"MIT"
] | 7 | 2018-11-22T11:41:01.000Z | 2021-08-05T13:55:09.000Z | #!/usr/bin/env python
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import os
from os.path import join
import sys
from pathlib import Path
import time
import importlib
from glob import iglob
import argparse
import click
from vframe.settings import app_cfg
from vframe.utils import log_utils
# -----------------------------------------------------------------------------
#
# Argparse pre-process
#
# -----------------------------------------------------------------------------
def choices_description(plugins):
"""Generate custom help menu with colored text
"""
clr_h: str = '\033[1m\033[94m'
clr_t: str = '\033[0m'
sp_max: int = 20 + len(clr_h) + len(clr_t)
t = ['Commands and plugins:']
for plugin in plugins:
t_cli = f'{clr_h}{plugin.name}{clr_t}'
sp = sp_max - len(t_cli)
t.append(f'\t{t_cli}{" " * sp}{plugin.description}')
result: str = "\n".join(t)
return result
# intercept first argument using argparse to select command group
argv_tmp = sys.argv
sys.argv = sys.argv[:2]
help_desc = f"\033[1m\033[94mVFRAME CLI ({app_cfg.VERSION})\033[0m"
ap = argparse.ArgumentParser(usage="vf [command]",
description=help_desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=choices_description(app_cfg.plugins.plugins))
ap.add_argument('commands', choices=app_cfg.plugins.keys(), metavar='[command]')
# exit and how help if no command group supplied
if len(sys.argv) < 2:
ap.print_help()
sys.exit(1)
args = ap.parse_args()
sys.argv = argv_tmp
sys.argv.pop(1) # remove first argument (command group)
plugin_group = app_cfg.plugins.get(args.commands) # create plugin
# -----------------------------------------------------------------------------
#
# Click CLI
#
# -----------------------------------------------------------------------------
# @click.option('--pipe', 'opt_pipe', type=bool, default=plugin_group.pipe)
@click.group(chain=plugin_group.pipe, no_args_is_help=True, help=help_desc)
@click.pass_context
def cli(ctx, opt_pipe=True):
"""\033[1m\033[94mVFRAME\033[0m
"""
# print("plugin_group.pipe", plugin_group.pipe)
# opt_pipe = plugin_group.pipe
opt_verbosity = int(os.environ.get("VFRAME_VERBOSITY", 4)) # 1 - 5
# store reference to opt_pipe for access in callback
ctx.opts = {'opt_pipe': plugin_group.pipe}
# store user object variables
ctx.ensure_object(dict)
ctx.obj['start_time'] = time.time()
# init global logger
log_utils.Logger.create(verbosity=opt_verbosity)
# def process_commands(processors, opt_pipe):
@cli.resultcallback()
def process_commands(processors):
"""This result callback is invoked with an iterable of all the chained
subcommands. As in this example each subcommand returns a function
we can chain them together to feed one into the other, similar to how
a pipe on UNIX works. Copied from Click's docs.
"""
if not plugin_group.pipe:
return
def sink():
"""This is the end of the pipeline
"""
while True:
yield
sink = sink()
sink.__next__()
# Compose and prime processors
for processor in reversed(processors):
sink = processor(sink)
sink.__next__()
sink.close()
# -----------------------------------------------------------------------------
#
# Setup commands
#
# -----------------------------------------------------------------------------
# append files to click groups
import vframe.utils.im_utils
for plugin_script in plugin_group.scripts:
fp_root = '/'.join(plugin_script.filepath.split('/')[:2]) # eg plugins/vframe_custom_plugin
fp_root = join(app_cfg.DIR_SRC, fp_root)
# print(fp_root)
if not Path(fp_root).is_dir():
print(f'{50 * "*"}\nWARNING: {fp_root} does not exist\n{50 * "*"}')
continue
# append plugin directory to import paths
if fp_root not in sys.path:
sys.path.append(fp_root)
# glob for python files inside command directory
fp_dir_glob = join(app_cfg.DIR_SRC, plugin_script.filepath, '*.py')
for fp_py in iglob(fp_dir_glob):
fn = Path(fp_py).stem
# skip files starting with "_"
if plugin_script.include_hidden is False and fn.startswith('_'):
continue
fp_module = str(Path(fp_py).relative_to(Path(app_cfg.DIR_SRC)))
fp_import = fp_module.replace('/', '.').replace('.py', '')
try:
module = importlib.import_module(fp_import)
cli.add_command(module.cli, name=fn)
except Exception as e:
msg = f'Could not import "{fn}": {e}'
print(f"{app_cfg.TERM_COLORS.FAIL}{msg}{app_cfg.TERM_COLORS.ENDC}")
# -----------------------------------------------------------------------------
#
# Start CLI application
#
# -----------------------------------------------------------------------------
cli() | 29.270588 | 94 | 0.581994 | #!/usr/bin/env python
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import os
from os.path import join
import sys
from pathlib import Path
import time
import importlib
from glob import iglob
import argparse
import click
from vframe.settings import app_cfg
from vframe.utils import log_utils
# -----------------------------------------------------------------------------
#
# Argparse pre-process
#
# -----------------------------------------------------------------------------
def choices_description(plugins):
"""Generate custom help menu with colored text
"""
clr_h: str = '\033[1m\033[94m'
clr_t: str = '\033[0m'
sp_max: int = 20 + len(clr_h) + len(clr_t)
t = ['Commands and plugins:']
for plugin in plugins:
t_cli = f'{clr_h}{plugin.name}{clr_t}'
sp = sp_max - len(t_cli)
t.append(f'\t{t_cli}{" " * sp}{plugin.description}')
result: str = "\n".join(t)
return result
# intercept first argument using argparse to select command group
argv_tmp = sys.argv
sys.argv = sys.argv[:2]
help_desc = f"\033[1m\033[94mVFRAME CLI ({app_cfg.VERSION})\033[0m"
ap = argparse.ArgumentParser(usage="vf [command]",
description=help_desc,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=choices_description(app_cfg.plugins.plugins))
ap.add_argument('commands', choices=app_cfg.plugins.keys(), metavar='[command]')
# exit and how help if no command group supplied
if len(sys.argv) < 2:
ap.print_help()
sys.exit(1)
args = ap.parse_args()
sys.argv = argv_tmp
sys.argv.pop(1) # remove first argument (command group)
plugin_group = app_cfg.plugins.get(args.commands) # create plugin
# -----------------------------------------------------------------------------
#
# Click CLI
#
# -----------------------------------------------------------------------------
# @click.option('--pipe', 'opt_pipe', type=bool, default=plugin_group.pipe)
@click.group(chain=plugin_group.pipe, no_args_is_help=True, help=help_desc)
@click.pass_context
def cli(ctx, opt_pipe=True):
"""\033[1m\033[94mVFRAME\033[0m
"""
# print("plugin_group.pipe", plugin_group.pipe)
# opt_pipe = plugin_group.pipe
opt_verbosity = int(os.environ.get("VFRAME_VERBOSITY", 4)) # 1 - 5
# store reference to opt_pipe for access in callback
ctx.opts = {'opt_pipe': plugin_group.pipe}
# store user object variables
ctx.ensure_object(dict)
ctx.obj['start_time'] = time.time()
# init global logger
log_utils.Logger.create(verbosity=opt_verbosity)
# def process_commands(processors, opt_pipe):
@cli.resultcallback()
def process_commands(processors):
"""This result callback is invoked with an iterable of all the chained
subcommands. As in this example each subcommand returns a function
we can chain them together to feed one into the other, similar to how
a pipe on UNIX works. Copied from Click's docs.
"""
if not plugin_group.pipe:
return
def sink():
"""This is the end of the pipeline
"""
while True:
yield
sink = sink()
sink.__next__()
# Compose and prime processors
for processor in reversed(processors):
sink = processor(sink)
sink.__next__()
sink.close()
# -----------------------------------------------------------------------------
#
# Setup commands
#
# -----------------------------------------------------------------------------
# append files to click groups
import vframe.utils.im_utils
for plugin_script in plugin_group.scripts:
fp_root = '/'.join(plugin_script.filepath.split('/')[:2]) # eg plugins/vframe_custom_plugin
fp_root = join(app_cfg.DIR_SRC, fp_root)
# print(fp_root)
if not Path(fp_root).is_dir():
print(f'{50 * "*"}\nWARNING: {fp_root} does not exist\n{50 * "*"}')
continue
# append plugin directory to import paths
if fp_root not in sys.path:
sys.path.append(fp_root)
# glob for python files inside command directory
fp_dir_glob = join(app_cfg.DIR_SRC, plugin_script.filepath, '*.py')
for fp_py in iglob(fp_dir_glob):
fn = Path(fp_py).stem
# skip files starting with "_"
if plugin_script.include_hidden is False and fn.startswith('_'):
continue
fp_module = str(Path(fp_py).relative_to(Path(app_cfg.DIR_SRC)))
fp_import = fp_module.replace('/', '.').replace('.py', '')
try:
module = importlib.import_module(fp_import)
cli.add_command(module.cli, name=fn)
except Exception as e:
msg = f'Could not import "{fn}": {e}'
print(f"{app_cfg.TERM_COLORS.FAIL}{msg}{app_cfg.TERM_COLORS.ENDC}")
# -----------------------------------------------------------------------------
#
# Start CLI application
#
# -----------------------------------------------------------------------------
cli() | 0 | 0 | 0 |
5b3106918eb0668c2e901ae8fcf1934e63af6763 | 1,842 | py | Python | OCR/ColOCR.py | EdJeeOnGitHub/PR1956 | fc904a00272041c886db72759b6e0e576a29f744 | [
"MIT"
] | null | null | null | OCR/ColOCR.py | EdJeeOnGitHub/PR1956 | fc904a00272041c886db72759b6e0e576a29f744 | [
"MIT"
] | null | null | null | OCR/ColOCR.py | EdJeeOnGitHub/PR1956 | fc904a00272041c886db72759b6e0e576a29f744 | [
"MIT"
] | null | null | null | import os
import io
import json
from google.cloud import vision
from google.protobuf.json_format import MessageToJson
from joblib import Parallel, delayed
import argparse
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ""
client = vision.ImageAnnotatorClient()
if __name__ == '__main__':
# construct the argument parse and parse the arguments
parser = argparse.ArgumentParser(description='Page Detection')
parser.add_argument('--imgdir', type=str)
parser.add_argument('--outputdir', type=str)
args = parser.parse_args()
#create output file
if not os.path.isdir(args.outputdir):
os.mkdir(args.outputdir)
print('creating directory ' + args.outputdir)
clean_names = lambda x: [i for i in x if i[0] != '.']
imgdir = os.listdir(args.imgdir)
imgdir = sorted(clean_names(imgdir))
outputdir = [os.path.join(args.outputdir, dir) for dir in imgdir]
imgdir = [os.path.join(args.imgdir, dir) for dir in imgdir]
Parallel(n_jobs=1)(map(delayed(main), imgdir, outputdir)) | 35.423077 | 88 | 0.68241 | import os
import io
import json
from google.cloud import vision
from google.protobuf.json_format import MessageToJson
from joblib import Parallel, delayed
import argparse
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ""
client = vision.ImageAnnotatorClient()
def main(imgdir, outputdir):
print("processing "+imgdir)
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
print('creating directory ' + outputdir)
clean_names = lambda x: [i for i in x if i[0] != '.']
colImgs = sorted(clean_names(os.listdir(imgdir)))
for colImg in colImgs:
imgpath = os.path.join(imgdir, colImg)
with io.open(imgpath, 'rb') as image_file:
col = image_file.read()
col = vision.types.Image(content=col)
col_context = vision.types.ImageContext(language_hints=["zh","ja","en"])
response = client.document_text_detection(image=col, image_context=col_context)
with open(os.path.join(outputdir,colImg.split('.')[0]+'.json'), 'w') as outfile:
json.dump(MessageToJson(response), outfile)
if __name__ == '__main__':
# construct the argument parse and parse the arguments
parser = argparse.ArgumentParser(description='Page Detection')
parser.add_argument('--imgdir', type=str)
parser.add_argument('--outputdir', type=str)
args = parser.parse_args()
#create output file
if not os.path.isdir(args.outputdir):
os.mkdir(args.outputdir)
print('creating directory ' + args.outputdir)
clean_names = lambda x: [i for i in x if i[0] != '.']
imgdir = os.listdir(args.imgdir)
imgdir = sorted(clean_names(imgdir))
outputdir = [os.path.join(args.outputdir, dir) for dir in imgdir]
imgdir = [os.path.join(args.imgdir, dir) for dir in imgdir]
Parallel(n_jobs=1)(map(delayed(main), imgdir, outputdir)) | 789 | 0 | 23 |
9137427570cdaf5914d768375273c8661a355ab0 | 1,134 | py | Python | upcfcardsearch/c280.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c280.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | upcfcardsearch/c280.py | ProfessorSean/Kasutamaiza | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord.utils import get | 49.304348 | 299 | 0.69224 | import discord
from discord.ext import commands
from discord.utils import get
class c280(commands.Cog, name="c280"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Hex_of_Claustrophobia', aliases=['c280', 'Hex_4'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Hex of Claustrophobia',
color=0x1D9E74)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2360775.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Hex)', inline=True)
embed.add_field(name='Type', value='Spell/Continuous', inline=False)
embed.add_field(name='Card Effect', value='Select 2 unoccupied Main Monster Zones on each player\'s field. Neither player can use the selected zones, then, if you only control DARK monsters, you can add 1 "Hex" Spell from your GY to your hand, except "Hex of Claustrophobia".', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c280(bot)) | 862 | 149 | 46 |
98902a2578836780e5d099b3a881ff34ebf06812 | 370 | py | Python | project_2/Tool.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | project_2/Tool.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | project_2/Tool.py | BFreitas16/SSof | c6963b76c1c033324593a40139e5918ad6386380 | [
"MIT"
] | null | null | null | import sys
from Util import write_list_to_file
from ProgramParser import ProgramParser
if __name__ == '__main__':
main()
| 19.473684 | 51 | 0.721622 | import sys
from Util import write_list_to_file
from ProgramParser import ProgramParser
def main():
program = sys.argv[1]
pattern = sys.argv[2]
parser = ProgramParser(program, pattern)
parser.build_graph()
vulnerability_list = parser.evaluate_flows()
write_list_to_file(program, vulnerability_list)
if __name__ == '__main__':
main()
| 219 | 0 | 23 |
0d149bbcdae564f2e4a9b375c1955956a290b80f | 221 | py | Python | hint/serializer.py | DisMosGit/Dodja | 0bcafede607df921b53f6ed501842d4cd03f65d6 | [
"Apache-2.0"
] | null | null | null | hint/serializer.py | DisMosGit/Dodja | 0bcafede607df921b53f6ed501842d4cd03f65d6 | [
"Apache-2.0"
] | null | null | null | hint/serializer.py | DisMosGit/Dodja | 0bcafede607df921b53f6ed501842d4cd03f65d6 | [
"Apache-2.0"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from .models import Hint
| 22.1 | 54 | 0.692308 | from rest_framework.serializers import ModelSerializer
from .models import Hint
class HintSerializer(ModelSerializer):
class Meta:
model = Hint
fields = '__all__'
read_only_fields = ('id', )
| 0 | 117 | 23 |
04349bcacdf78132aeef750e0ad5a40b2317d8c5 | 3,355 | py | Python | src/q_play.py | msHujindou/Tetris-DQN | 8d8f83b151262dd2c392f8d247b1cee76c0bf0fd | [
"MIT"
] | null | null | null | src/q_play.py | msHujindou/Tetris-DQN | 8d8f83b151262dd2c392f8d247b1cee76c0bf0fd | [
"MIT"
] | null | null | null | src/q_play.py | msHujindou/Tetris-DQN | 8d8f83b151262dd2c392f8d247b1cee76c0bf0fd | [
"MIT"
] | null | null | null | """
此脚本负责 - 手工运行游戏或者用AI运行游戏
"""
import sys
import cv2
from utils.util import create_image_from_state
from game.confs import Action_Type, Block_Type, Confs
from game.tetris_engine import tetris_engine
import json
import numpy as np
if __name__ == "__main__":
# human_play()
ai_play("outputs/q_3600000.json")
sys.exit(0)
| 37.277778 | 78 | 0.558271 | """
此脚本负责 - 手工运行游戏或者用AI运行游戏
"""
import sys
import cv2
from utils.util import create_image_from_state
from game.confs import Action_Type, Block_Type, Confs
from game.tetris_engine import tetris_engine
import json
import numpy as np
def ai_play(model_file):
with open(model_file, "r") as fr:
model = json.load(fr)
env = tetris_engine([Block_Type.L])
game_state = env.reset()
debug_img = None
is_end = False
while True:
img = create_image_from_state(game_state)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow("frame", img)
if debug_img is not None:
cv2.imshow("debug", debug_img)
key = cv2.waitKey(10)
# press Q or ESC
if key == ord("q") or key == 27:
break
if is_end:
continue
game_state_key = game_state.tobytes().hex()
if game_state_key not in model:
print("Game State not Found")
# continue
else:
print(model[game_state_key])
if np.max(model[game_state_key]) > 0:
action = np.argmax(model[game_state_key])
action_name = env.action_type_list[action]
print(action, action_name, model[game_state_key])
else:
pass
# print("Q value not set for this state, or state is dead .")
if key == ord("w"):
# rotate
game_state, reward, is_end, debug = env.step(Action_Type.Rotate)
# print(f"reward [{reward}], is_end [{is_end}]")
if debug is not None:
debug_img = create_image_from_state(debug)
debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2RGB)
elif key == ord("s"):
# down
game_state, reward, is_end, debug = env.step(Action_Type.Down)
# print(f"reward [{reward}], is_end [{is_end}]")
if debug is not None:
debug_img = create_image_from_state(debug)
debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2RGB)
elif key == ord("a"):
# left
game_state, reward, is_end, debug = env.step(Action_Type.Left)
# print(f"reward [{reward}], is_end [{is_end}]")
if debug is not None:
debug_img = create_image_from_state(debug)
debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2RGB)
elif key == ord("d"):
# right
game_state, reward, is_end, debug = env.step(Action_Type.Right)
# print(f"reward [{reward}], is_end [{is_end}]")
if debug is not None:
debug_img = create_image_from_state(debug)
debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2RGB)
elif key == ord(" "):
# bottom
game_state, reward, is_end, debug = env.step(Action_Type.Bottom)
# print(f"reward [{reward}], is_end [{is_end}]")
if debug is not None:
debug_img = create_image_from_state(debug)
debug_img = cv2.cvtColor(debug_img, cv2.COLOR_BGR2RGB)
cv2.destroyAllWindows()
if __name__ == "__main__":
# human_play()
ai_play("outputs/q_3600000.json")
sys.exit(0)
| 2,979 | 0 | 25 |
ade1cbdefc111b4b06b7ab346b2328ce27bad434 | 6,522 | py | Python | examples/reconstruct_simul_data_medium.py | s-matrix/smpr3d | f11c36c37bba749fe8aeb43f6cfbf303ab817064 | [
"Apache-2.0"
] | 2 | 2021-04-26T18:35:57.000Z | 2021-08-05T17:49:35.000Z | examples/reconstruct_simul_data_medium.py | s-matrix/smpr3d | f11c36c37bba749fe8aeb43f6cfbf303ab817064 | [
"Apache-2.0"
] | null | null | null | examples/reconstruct_simul_data_medium.py | s-matrix/smpr3d | f11c36c37bba749fe8aeb43f6cfbf303ab817064 | [
"Apache-2.0"
] | null | null | null | from smpr3d.util import *
from smpr3d.algorithm import *
from smpr3d.setup import *
import torch as th
import os
import numpy as np
# salloc -C gpu -N 2 -t 30 -c 10 --gres=gpu:8 -A m1759 --ntasks-per-node=8
# srun -N 2 python ./admm_smatrix_dist_pytorch.py
# module purge
# module load pytorch/v1.4.0-gpu
# module list
# Currently Loaded Modulefiles:
# 1) esslurm 2) modules/3.2.11.1 3) cuda/10.1.168 4) nccl/2.5.6
args = Param()
args.io = Param()
args.io.path = '/home/philipp/drop/Public/nesap_hackathon/'
# args.io.path = '../Inputs/'
if os.environ.get('SLURM_PROCID') is not None:
args.io.path = '/global/cscratch1/sd/pelzphil/'
args.io.summary_log_dir = args.io.path + 'log/'
args.io.logname = 'atoms_aberrations52_2'
args.io.filename_data = 'atoms_aberrations52_2.h5'
summary = setup_logging(args.io.path, args.io.logname)
args.dist_backend = 'mpi' # 'mpi'
args.dist_init_method = f'file://{args.io.path}sharedfile'
args.node_config = configure_node(args.dist_backend, args.dist_init_method)
args.beam_threshold_percent = 5e-3
args.max_phase_error = np.pi / 64
args.use_full_smatrix = True
args.uniform_initial_intensity = False
dC1 = 30
# %% load data
i = 0
args.io.filename_results = f'random4_dC{dC1}perc_res_{i + 5:03d}.h5'
world_size = args.node_config.world_size
rank = args.node_config.rank
device = args.node_config.device
lam, alpha_rad, C, dx, specimen_thickness_angstrom, vacuum_probe, D, K, K_rank, MY, MX, NY, NX, \
fy, fx, detector_shape, r, I_target, y_max, x_max, y_min, x_min, S_sol, Psi_sol, r_sol = load_smatrix_data_list2(
args.io.path + args.io.filename_data, device, rank, world_size, subset=[0, 1, 2, 3])
# dx = 1/2/dx
lam *= 1e10
# %% define data-dependent variables
# Fourier space grid on detector
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
A_init = initial_probe_amplitude(vacuum_probe, I_target, world_size, rank)
# mask which beams to include in the S-matrix input channels
take_beams = vacuum_probe > args.beam_threshold_percent
B, B_tile, tile_order, beam_numbers, tile_map = prepare_beam_parameters(take_beams, q2, specimen_thickness_angstrom,
alpha_rad * 1.1, lam, args.max_phase_error,
args.use_full_smatrix, device)
# shape of reconstruction variables
S_shape = (B_tile, NY, NX)
Psi_shape = (D, MY, MX)
z_shape = tuple(I_target.shape)
# map of convergence angles
alpha = q.norm(dim=0) * lam
beam_alphas = th.zeros_like(take_beams, dtype=th.float32, device=device) * -1
beam_alphas[take_beams] = alpha[take_beams]
alpha_map = beam_alphas[take_beams]
# %%
print(specimen_thickness_angstrom)
S0, depth_init = initial_smatrix(S_shape, beam_numbers, device, is_unitary=True, include_plane_waves=B == B_tile,
initial_depth=specimen_thickness_angstrom, lam=lam, q2=q2,
is_pinned=False)
tile_numbers = beam_numbers[beam_numbers >= 0]
beam_numbers = th.ones_like(take_beams).cpu().long() * -1
beam_numbers[take_beams] = th.arange(B)
# %% define S-matrix forward and adjoint operators
from smpr3d.operators import A as A1, AH_S as AH_S1
r_min = th.zeros(2, device=device)
AH_Psi = None
AH_r = None
a = th.sqrt(I_target)
report_smatrix_parameters(rank, world_size, a, S0, B, D, K, MY, MX, NY, NX, fy, fx, B_tile, K_rank,
specimen_thickness_angstrom, depth_init, y_max, x_max, y_min, x_min)
if world_size == 1:
plot(take_beams.cpu().float().numpy(), 'take_beams')
plot(np.fft.fftshift(beam_numbers.cpu().float().numpy()), 'aperture_tiling', cmap='gist_ncar')
# else:
# dist.barrier()
# %% define initial probes
C_target = C.to(device)
C_target[1] = 10
print('C_target:', C_target)
C_model = th.zeros(12, D).to(device)
C_model[:] = C_target
# define data-dependent variables
# Fourier space grid on detector
detector_shape = np.array([MY, MX])
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
Ap0 = vacuum_probe
# del I_target
# mask which beams to include in the S-matrix input channels
# take_beams = vacuum_probe > args.beam_threshold_percent / 100
Psi_gen = ZernikeProbe2(q, lam, fft_shifted=True)
Psi_target = Psi_gen(C_target, Ap0).detach()
Psi_model = Psi_gen(C_model, Ap0).detach()
psi_model = th.fft.ifft2(Psi_model, norm='ortho')
cb = fftshift_checkerboard(MY // 2, MX // 2)
fpr1 = Psi_target[0].cpu().numpy()
pr1 = np.fft.ifft2(fpr1, norm='ortho')
fpr2 = Psi_model[0].cpu().numpy()
pr2 = np.fft.ifft2(fpr2, norm='ortho')
from smpr3d.core import SMeta
s_meta = SMeta(take_beams, dx, S_shape, MY, MX, device)
print(s_meta.q_dft)
# report_initial_probes(summary, rank, world_size, Psi_model, psi_model, C_model, specimen_thickness_angstrom, q, lam,
# alpha_rad)
# %% perform reconstruction
# m = [MY, MX]
# plotAbsAngle(complex_numpy(S_sol[0, m[0]:-m[0], m[1]:-m[1]].cpu()), f'S_sol[{0}]')
args.reconstruction_opts = Param()
args.reconstruction_opts.max_iters = 100
args.reconstruction_opts.beta = 1.0
args.reconstruction_opts.tau_S = 2e-3
args.reconstruction_opts.tau_Psi = 1e6
args.reconstruction_opts.tau_r = 8e-3
args.reconstruction_opts.optimize_psi = lambda i: i > 1e3
args.reconstruction_opts.node_config = args.node_config
args.reconstruction_opts.verbose = 2
r0 = r
Psi0 = Psi_sol
(S_n, Psi_n, C_n, r_n), outs, opts = fasta2(s_meta, A, AH_S, AH_Psi, AH_r, prox_D_gaussian, Psi_gen, a, S0, Psi0,
C_model, Ap0, r0, args.reconstruction_opts, S_sol=S_sol, Psi_sol=Psi_sol,
r_sol=r_sol, summary=summary)
# save_results(rank, S_n, Psi_n, C_n, r_n, outs, S_sol, Psi_sol, r_sol, beam_numbers, tile_map, alpha_map, A.coords, A.inds,
# take_beams, lam, alpha_rad, dx, specimen_thickness_angstrom, args.io.path + args.io.filename_results)
# if world_size > 1:
# dist.barrier()
# dist.destroy_process_group()
# %%
# plotcx(S_n[2])
| 36.233333 | 124 | 0.693959 | from smpr3d.util import *
from smpr3d.algorithm import *
from smpr3d.setup import *
import torch as th
import os
import numpy as np
# salloc -C gpu -N 2 -t 30 -c 10 --gres=gpu:8 -A m1759 --ntasks-per-node=8
# srun -N 2 python ./admm_smatrix_dist_pytorch.py
# module purge
# module load pytorch/v1.4.0-gpu
# module list
# Currently Loaded Modulefiles:
# 1) esslurm 2) modules/3.2.11.1 3) cuda/10.1.168 4) nccl/2.5.6
args = Param()
args.io = Param()
args.io.path = '/home/philipp/drop/Public/nesap_hackathon/'
# args.io.path = '../Inputs/'
if os.environ.get('SLURM_PROCID') is not None:
args.io.path = '/global/cscratch1/sd/pelzphil/'
args.io.summary_log_dir = args.io.path + 'log/'
args.io.logname = 'atoms_aberrations52_2'
args.io.filename_data = 'atoms_aberrations52_2.h5'
summary = setup_logging(args.io.path, args.io.logname)
args.dist_backend = 'mpi' # 'mpi'
args.dist_init_method = f'file://{args.io.path}sharedfile'
args.node_config = configure_node(args.dist_backend, args.dist_init_method)
args.beam_threshold_percent = 5e-3
args.max_phase_error = np.pi / 64
args.use_full_smatrix = True
args.uniform_initial_intensity = False
dC1 = 30
# %% load data
i = 0
args.io.filename_results = f'random4_dC{dC1}perc_res_{i + 5:03d}.h5'
world_size = args.node_config.world_size
rank = args.node_config.rank
device = args.node_config.device
lam, alpha_rad, C, dx, specimen_thickness_angstrom, vacuum_probe, D, K, K_rank, MY, MX, NY, NX, \
fy, fx, detector_shape, r, I_target, y_max, x_max, y_min, x_min, S_sol, Psi_sol, r_sol = load_smatrix_data_list2(
args.io.path + args.io.filename_data, device, rank, world_size, subset=[0, 1, 2, 3])
# dx = 1/2/dx
lam *= 1e10
# %% define data-dependent variables
# Fourier space grid on detector
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
A_init = initial_probe_amplitude(vacuum_probe, I_target, world_size, rank)
# mask which beams to include in the S-matrix input channels
take_beams = vacuum_probe > args.beam_threshold_percent
B, B_tile, tile_order, beam_numbers, tile_map = prepare_beam_parameters(take_beams, q2, specimen_thickness_angstrom,
alpha_rad * 1.1, lam, args.max_phase_error,
args.use_full_smatrix, device)
# shape of reconstruction variables
S_shape = (B_tile, NY, NX)
Psi_shape = (D, MY, MX)
z_shape = tuple(I_target.shape)
# map of convergence angles
alpha = q.norm(dim=0) * lam
beam_alphas = th.zeros_like(take_beams, dtype=th.float32, device=device) * -1
beam_alphas[take_beams] = alpha[take_beams]
alpha_map = beam_alphas[take_beams]
# %%
print(specimen_thickness_angstrom)
S0, depth_init = initial_smatrix(S_shape, beam_numbers, device, is_unitary=True, include_plane_waves=B == B_tile,
initial_depth=specimen_thickness_angstrom, lam=lam, q2=q2,
is_pinned=False)
tile_numbers = beam_numbers[beam_numbers >= 0]
beam_numbers = th.ones_like(take_beams).cpu().long() * -1
beam_numbers[take_beams] = th.arange(B)
# %% define S-matrix forward and adjoint operators
from smpr3d.operators import A as A1, AH_S as AH_S1
r_min = th.zeros(2, device=device)
def A(S, Psi, r):
return A1(S, Psi, r, r_min=r_min, out=None, Mx=MX, My=MY)
def AH_S(S, Psi, r):
return AH_S1(S, Psi, r, r_min=r_min, out=None, tau=th.tensor([1.0]).to(device), Ny=NY, Nx=NX)
AH_Psi = None
AH_r = None
a = th.sqrt(I_target)
report_smatrix_parameters(rank, world_size, a, S0, B, D, K, MY, MX, NY, NX, fy, fx, B_tile, K_rank,
specimen_thickness_angstrom, depth_init, y_max, x_max, y_min, x_min)
if world_size == 1:
plot(take_beams.cpu().float().numpy(), 'take_beams')
plot(np.fft.fftshift(beam_numbers.cpu().float().numpy()), 'aperture_tiling', cmap='gist_ncar')
# else:
# dist.barrier()
# %% define initial probes
C_target = C.to(device)
C_target[1] = 10
print('C_target:', C_target)
C_model = th.zeros(12, D).to(device)
C_model[:] = C_target
# define data-dependent variables
# Fourier space grid on detector
detector_shape = np.array([MY, MX])
qnp = fourier_coordinates_2D([MY, MX], dx.numpy(), centered=False)
q = th.as_tensor(qnp, device=device)
q2 = th.as_tensor(np.linalg.norm(qnp, axis=0) ** 2, device=device)
# initial aperture amplitude
Ap0 = vacuum_probe
# del I_target
# mask which beams to include in the S-matrix input channels
# take_beams = vacuum_probe > args.beam_threshold_percent / 100
Psi_gen = ZernikeProbe2(q, lam, fft_shifted=True)
Psi_target = Psi_gen(C_target, Ap0).detach()
Psi_model = Psi_gen(C_model, Ap0).detach()
psi_model = th.fft.ifft2(Psi_model, norm='ortho')
cb = fftshift_checkerboard(MY // 2, MX // 2)
fpr1 = Psi_target[0].cpu().numpy()
pr1 = np.fft.ifft2(fpr1, norm='ortho')
fpr2 = Psi_model[0].cpu().numpy()
pr2 = np.fft.ifft2(fpr2, norm='ortho')
from smpr3d.core import SMeta
s_meta = SMeta(take_beams, dx, S_shape, MY, MX, device)
print(s_meta.q_dft)
# report_initial_probes(summary, rank, world_size, Psi_model, psi_model, C_model, specimen_thickness_angstrom, q, lam,
# alpha_rad)
# %% perform reconstruction
# m = [MY, MX]
# plotAbsAngle(complex_numpy(S_sol[0, m[0]:-m[0], m[1]:-m[1]].cpu()), f'S_sol[{0}]')
args.reconstruction_opts = Param()
args.reconstruction_opts.max_iters = 100
args.reconstruction_opts.beta = 1.0
args.reconstruction_opts.tau_S = 2e-3
args.reconstruction_opts.tau_Psi = 1e6
args.reconstruction_opts.tau_r = 8e-3
args.reconstruction_opts.optimize_psi = lambda i: i > 1e3
args.reconstruction_opts.node_config = args.node_config
args.reconstruction_opts.verbose = 2
r0 = r
Psi0 = Psi_sol
(S_n, Psi_n, C_n, r_n), outs, opts = fasta2(s_meta, A, AH_S, AH_Psi, AH_r, prox_D_gaussian, Psi_gen, a, S0, Psi0,
C_model, Ap0, r0, args.reconstruction_opts, S_sol=S_sol, Psi_sol=Psi_sol,
r_sol=r_sol, summary=summary)
# save_results(rank, S_n, Psi_n, C_n, r_n, outs, S_sol, Psi_sol, r_sol, beam_numbers, tile_map, alpha_map, A.coords, A.inds,
# take_beams, lam, alpha_rad, dx, specimen_thickness_angstrom, args.io.path + args.io.filename_results)
# if world_size > 1:
# dist.barrier()
# dist.destroy_process_group()
# %%
# plotcx(S_n[2])
| 155 | 0 | 46 |
fe0ddc5678863bd05d678723d1d8dc0d490f7165 | 6,689 | py | Python | reports/views.py | LCOGT/globalskypartners | ecb4ffc7c8ed0902e71b648907046093ea82dc77 | [
"MIT"
] | null | null | null | reports/views.py | LCOGT/globalskypartners | ecb4ffc7c8ed0902e71b648907046093ea82dc77 | [
"MIT"
] | 2 | 2021-05-25T13:16:56.000Z | 2021-06-18T08:29:36.000Z | reports/views.py | LCOGT/globalskypartners | ecb4ffc7c8ed0902e71b648907046093ea82dc77 | [
"MIT"
] | null | null | null | import csv
from collections import Counter
from crispy_forms.utils import render_crispy_form
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Sum, Count
from django.forms.models import model_to_dict
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.template.context_processors import csrf
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django_countries import countries
from .models import *
from .forms import *
from .plots import cohort_countries, get_partner_counts, breakdown_per_partner, \
choropleth_map, get_partner_sum, meta_plot
| 38.889535 | 122 | 0.682763 | import csv
from collections import Counter
from crispy_forms.utils import render_crispy_form
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Sum, Count
from django.forms.models import model_to_dict
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.template.context_processors import csrf
from django.urls import reverse_lazy
from django.views import View
from django.views.generic import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from django_countries import countries
from .models import *
from .forms import *
from .plots import cohort_countries, get_partner_counts, breakdown_per_partner, \
choropleth_map, get_partner_sum, meta_plot
class PassUserMixin:
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class ReportList(LoginRequiredMixin, ListView):
model = Report
def get_queryset(self):
return Report.objects.filter(created_by=self.request.user)
def get_context_data(self, **kwargs):
context = super(ReportList, self).get_context_data(**kwargs)
context['impacts'] = Imprint.objects.filter(report__created_by=self.request.user, report__status=0)
if years := Cohort.objects.filter(active_report=True).values_list('year',flat=True):
context['active_report'] = years
if self.request.user.is_staff:
context['all_reports'] = Report.objects.all().order_by('period','partner__name')
return context
class ImpactCreate(LoginRequiredMixin, PassUserMixin, CreateView):
form_class = ImpactForm
template_name = 'reports/imprint_create.html'
success_url = reverse_lazy('report-list')
def form_valid(self, form):
partner = Partner.objects.get(id=form.cleaned_data.get("partner"))
now = timezone.now()
cohorts = [c for c in Cohort.objects.all() if c.start <= now and c.end >= now ]
report, created = Report.objects.get_or_create(partner=partner, period=cohorts[0], created_by = self.request.user)
form.instance.report = report
return super().form_valid(form)
class ReportCreate(LoginRequiredMixin, PassUserMixin, CreateView):
form_class = ReportForm
model = Report
template_name = 'reports/report_create.html'
def form_valid(self, form):
context = self.get_context_data()
# impacts = context['impacts']
form.instance.created_by = self.request.user
form.instance.period = Cohort.objects.get(active_report=True)
self.object = form.save()
return super(ReportCreate, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('report-add-impact', kwargs={'pk':self.object.id})
class ReportAddImpact(LoginRequiredMixin, PassUserMixin, CreateView):
form_class = ImpactForm
model = Imprint
template_name = 'reports/report_impact_create.html'
def get_object(self):
return Report.objects.get(id=self.kwargs['pk'])
def get_form_kwargs(self):
kwargs = super(ReportAddImpact, self).get_form_kwargs()
kwargs['report'] =self.get_object()
return kwargs
def get_context_data(self, *args, **kwargs):
data = super(ReportAddImpact, self).get_context_data(**kwargs)
data['impacts'] = Imprint.objects.filter(report=self.get_object())
data['report'] = self.get_object()
return data
def form_valid(self, form):
form.instance.report = self.get_object()
return super(ReportAddImpact, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('report-add-impact', kwargs={'pk':self.get_object().id})
class DeleteImpact(LoginRequiredMixin, DeleteView):
model = Imprint
def get_success_url(self):
return reverse_lazy('report-view', kwargs={'pk':self.get_object().report.id})
class ReportDetail(LoginRequiredMixin, DetailView):
model = Report
def get_context_data(self, *args, **kwargs):
data = super(ReportDetail, self).get_context_data(**kwargs)
data['impacts'] = Imprint.objects.filter(report=self.get_object())
return data
class ReportEdit(LoginRequiredMixin, PassUserMixin, UpdateView):
model = Report
form_class= ReportForm
def get_success_url(self):
return reverse_lazy('report-add-impact', kwargs={'pk':self.get_object().id})
class ReportSubmit(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Report
def test_func(self):
return self.request.user in self.get_object().partner.pi.all()
def get(self, request, *args, **kwargs):
obj = self.get_object()
obj.status = 1 #Submitted
obj.save()
messages.success(request, f'Report for "{obj.partner.name}" in {obj.period.year} submitted')
return redirect(reverse_lazy('report-list'))
class FinalReport(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'reports/final_report.html'
def test_func(self):
return self.request.user.is_staff
def get(self, request, *args, **kwargs):
year = self.kwargs['year']
cohort = Cohort.objects.get(year=year)
reports = Report.objects.filter(period=cohort).annotate(subtotal=Sum('imprint__size')).order_by('partner__name')
demos = Counter()
demographics = Imprint.objects.filter(report__period=cohort)
for d in demographics:
demos.update({d.get_demographic_display():d.size})
other = demographics.filter(demographic=99).exclude(demo_other__isnull=True).values_list('demo_other', flat=True)
countries_dict, regions_dict = cohort_countries(year)
return render(request, self.template_name,
{
'demographics' : dict(demos),
'other_demos' : ", ".join(other),
'total' : Report.objects.filter(period=cohort).aggregate(total=Sum('imprint__size')),
'reports' : reports,
'year' : year,
'country_count' : len(countries_dict),
'regions' : regions_dict,
'partner_data' : get_partner_counts(reports),
'partner_counts': get_partner_sum(year),
'total_partners': Partner.objects.filter(cohorts=cohort).count(),
'map' : choropleth_map(year)
})
| 4,209 | 1,317 | 256 |
ac3dfc4c8ee69f742973412c2b602528b8b42497 | 1,860 | py | Python | neural_algorithm_of_artistic_style/utils.py | pyCERN/neural_style_transfer | 0eb70be4f686cec3bc88855765fd5fc60e695ad9 | [
"MIT"
] | null | null | null | neural_algorithm_of_artistic_style/utils.py | pyCERN/neural_style_transfer | 0eb70be4f686cec3bc88855765fd5fc60e695ad9 | [
"MIT"
] | null | null | null | neural_algorithm_of_artistic_style/utils.py | pyCERN/neural_style_transfer | 0eb70be4f686cec3bc88855765fd5fc60e695ad9 | [
"MIT"
] | null | null | null | import tensorflow as tf
import cv2
from imageio import mimsave
from IPython.display import display as display_fn
from IPython.display import Image
def load_img(path_to_img):
'''
Loads an image and convert it to a tensor with longer side to 512
:param path_to_img (str): directory path to image
:return:
'''
max_size = 512
img = cv2.imread(path_to_img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
shape = tf.shape(img)[:-1]
shape = tf.cast(shape, tf.float32)
longer_size = max(shape)
scale = max_size / longer_size
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape) # additional dim for batch
img = img[tf.newaxis, :]
img = tf.image.convert_image_dtype(img, tf.uint8)
return img
def tensor_to_img(tensor):
'''
Converts a tensor to an image
:param tensor:
:return:
'''
tensor_shape = tf.shape(tensor)
num_dim = tf.shape(tensor_shape)
if num_dim > 3:
assert tensor_shape[0] == 1
tensor = tensor[0]
return tf.keras.preprocessing.image.array_to_img(tensor)
def display_gif(gif_path):
'''displays the generated images as an animated gif'''
with open(gif_path,'rb') as f:
display_fn(Image(data=f.read(), format='png'))
def create_gif(gif_path, images):
'''creates animation of generated images'''
mimsave(gif_path, images, fps=1)
return gif_path | 26.956522 | 91 | 0.667204 | import tensorflow as tf
import cv2
from imageio import mimsave
from IPython.display import display as display_fn
from IPython.display import Image
def load_img(path_to_img):
'''
Loads an image and convert it to a tensor with longer side to 512
:param path_to_img (str): directory path to image
:return:
'''
max_size = 512
img = cv2.imread(path_to_img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
shape = tf.shape(img)[:-1]
shape = tf.cast(shape, tf.float32)
longer_size = max(shape)
scale = max_size / longer_size
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape) # additional dim for batch
img = img[tf.newaxis, :]
img = tf.image.convert_image_dtype(img, tf.uint8)
return img
def preprocess_image(image):
image = tf.cast(image, dtype=tf.float32)
image = tf.keras.applications.vgg19.preprocess_input(image)
return image
def tensor_to_img(tensor):
'''
Converts a tensor to an image
:param tensor:
:return:
'''
tensor_shape = tf.shape(tensor)
num_dim = tf.shape(tensor_shape)
if num_dim > 3:
assert tensor_shape[0] == 1
tensor = tensor[0]
return tf.keras.preprocessing.image.array_to_img(tensor)
def clip_image_values(image, min_value=0., max_value=255.):
return tf.clip_by_value(image, clip_value_min=min_value, clip_value_max=max_value)
def display_gif(gif_path):
'''displays the generated images as an animated gif'''
with open(gif_path,'rb') as f:
display_fn(Image(data=f.read(), format='png'))
def create_gif(gif_path, images):
'''creates animation of generated images'''
mimsave(gif_path, images, fps=1)
return gif_path | 268 | 0 | 50 |
b05b820e91b639366561e7b90fd9769f0b854d8a | 211 | py | Python | lib/conf.py | roopkeed/oscar | 65d4c4bd8816c3b0b19c05eb80d2bbedc73034c9 | [
"MIT"
] | 138 | 2015-01-10T14:23:52.000Z | 2022-03-06T01:23:41.000Z | lib/conf.py | CloCkWeRX/oscar | e0916ebd976d70fbd7bfff7f94f7386019419305 | [
"MIT"
] | 29 | 2015-01-05T21:12:51.000Z | 2020-06-11T04:24:29.000Z | lib/conf.py | CloCkWeRX/oscar | e0916ebd976d70fbd7bfff7f94f7386019419305 | [
"MIT"
] | 33 | 2015-02-17T13:38:57.000Z | 2021-05-18T10:13:08.000Z | import yaml
c = Conf()
| 15.071429 | 67 | 0.601896 | import yaml
class Conf:
def __init__(self):
self._conf_dict = yaml.load(open('/etc/oscar.yaml').read())
def get(self):
return self._conf_dict
c = Conf()
def get():
return c.get()
| 98 | -10 | 98 |
2efc63ddd6190a56449f766f3fc2404e3272e0b1 | 5,271 | py | Python | neuralnets/models.py | opentrack/neuralnet-tracker-traincode | 688ada0f46cb407d1809b50c11a136a239290123 | [
"ISC",
"CC0-1.0",
"Unlicense"
] | null | null | null | neuralnets/models.py | opentrack/neuralnet-tracker-traincode | 688ada0f46cb407d1809b50c11a136a239290123 | [
"ISC",
"CC0-1.0",
"Unlicense"
] | null | null | null | neuralnets/models.py | opentrack/neuralnet-tracker-traincode | 688ada0f46cb407d1809b50c11a136a239290123 | [
"ISC",
"CC0-1.0",
"Unlicense"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
import torchvision.models
from neuralnets.mobilenet_v1 import MobileNet
from neuralnets.modelcomponents import *
| 37.920863 | 123 | 0.593815 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
import torchvision.models
from neuralnets.mobilenet_v1 import MobileNet
from neuralnets.modelcomponents import *
class LocalizerNet(nn.Module):
def __init__(self):
super(LocalizerNet, self).__init__()
self.input_resolution = (224,288) # H x W
IR = torchvision.models.mnasnet._InvertedResidual
self.initial_stage = nn.Sequential(
nn.Conv2d(1, 8, 3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(),
)
self.convnet = nn.Sequential(
self.initial_stage,
depthwise_separable_convolution(8, 8, 3, padding=1),
IR(in_ch=8, out_ch=12, kernel_size=3, stride=2, expansion_factor=2),
IR(in_ch=12, out_ch=12, kernel_size=3, stride=1, expansion_factor=2),
IR(in_ch=12, out_ch=20, kernel_size=3, stride=2, expansion_factor=4),
IR(in_ch=20, out_ch=20, kernel_size=3, stride=1, expansion_factor=4),
IR(in_ch=20, out_ch=20, kernel_size=3, stride=1, expansion_factor=4),
IR(in_ch=20, out_ch=32, kernel_size=5, stride=2, expansion_factor=2),
IR(in_ch=32, out_ch=32, kernel_size=5, stride=1, expansion_factor=2),
IR(in_ch=32, out_ch=32, kernel_size=3, stride=1, expansion_factor=2),
IR(in_ch=32, out_ch=32, kernel_size=3, stride=1, expansion_factor=2),
IR(in_ch=32, out_ch=56, kernel_size=3, stride=2, expansion_factor=2),
IR(in_ch=56, out_ch=56, kernel_size=3, stride=1, expansion_factor=2),
IR(in_ch=56, out_ch=56, kernel_size=3, stride=1, expansion_factor=2),
nn.Conv2d(56, 2, 1, bias=True)
)
self.boxstddev = SpatialMeanAndStd((7,9), half_size=1.5)
def forward(self, x):
assert x.shape[2] == self.input_resolution[0] and \
x.shape[3] == self.input_resolution[1]
x = self.convnet(x)
a = torch.mean(x[:,0,...], dim=[1,2])
x = x[:,1,...]
x = torch.softmax(x.view(x.shape[0], -1), dim=1).view(*x.shape)
self.attentionmap = x
mean, std = self.boxstddev(self.attentionmap)
pred = x.new_empty((x.shape[0],5))
pred[:,0] = a[:]
pred[:,1:3] = mean - std
pred[:,3:5] = mean + std
return pred
def inference(self, x):
assert not self.training
pred = self.forward(x)
pred = {
'hasface' : torch.sigmoid(pred[:,0]),
'roi' : pred[:,1:]
}
return pred
class MobilnetV1WithPointHead(nn.Module):
def __init__(self, enable_point_head=True, point_head_dimensions=3):
super(MobilnetV1WithPointHead, self).__init__()
self.input_resolution = 129
self.enable_point_head = enable_point_head
self.num_eigvecs = 50
# Load the keypoint data even if not used. That is to make
# load_state_dict work with the strict=True option.
self.keypts, self.keyeigvecs = load_deformable_head_keypoints(40, 10)
self.point_head_dimensions = point_head_dimensions
assert point_head_dimensions in (2,3)
num_classes = 7+self.num_eigvecs+4
self.convnet = MobileNet(num_classes=num_classes, input_channel=1, momentum=0.01, skipconnection=True, dropout=0.5)
self.out = PoseOutputStage()
def forward(self, x):
assert x.shape[2] == self.input_resolution and \
x.shape[3] == self.input_resolution
x = self.convnet(x)
coords = x[:,:3]
quats = x[:,3:3+4]
boxparams = x[:,3+4:3+4+4]
kptweights = x[:,3+4+4:]
roi_box = x.new_empty((x.size(0),4))
boxsize = torch.exp(boxparams[:,2:])
boxcenter = boxparams[:,:2]
roi_box[:,:2] = boxcenter - boxsize
roi_box[:,2:] = boxcenter + boxsize
self.roi_box = roi_box
self.roi_pred = roi_box
x = self.out(torch.cat([coords, quats], dim=1))
if self.enable_point_head:
self.deformweights = kptweights
local_keypts = self.keyeigvecs[None,...] * kptweights[:,:,None,None]
local_keypts = torch.sum(local_keypts, dim=1)
local_keypts += self.keypts[None,...]
if self.point_head_dimensions == 2:
self.pt3d_68 = self.out.headcenter_to_screen(local_keypts)
else:
self.pt3d_68 = self.out.headcenter_to_screen_3d(local_keypts)
self.pt3d_68 = self.pt3d_68.transpose(1,2)
assert self.pt3d_68.shape[1] == self.point_head_dimensions and self.pt3d_68.shape[2] == 68
# Return in format (batch x dimensions x points)
return x
def inference(self, x):
assert not self.training
coords, quats = self.forward(x)
pred = {
'pose' : quats,
'coord' : coords,
'roi' : self.roi_pred
}
if self.enable_point_head:
pred.update({
'pt3d_68' : self.pt3d_68
})
return pred | 4,722 | 29 | 210 |
ee18daf6898aa470f98e2c0341ce11cd0e43fda0 | 1,031 | py | Python | vmb/datasets/loader.py | MohsenFayyaz89/VideoModelBenchmark | 71edad107e948d308fba3f5e3f0edd3b2eb6ec25 | [
"Apache-2.0"
] | null | null | null | vmb/datasets/loader.py | MohsenFayyaz89/VideoModelBenchmark | 71edad107e948d308fba3f5e3f0edd3b2eb6ec25 | [
"Apache-2.0"
] | null | null | null | vmb/datasets/loader.py | MohsenFayyaz89/VideoModelBenchmark | 71edad107e948d308fba3f5e3f0edd3b2eb6ec25 | [
"Apache-2.0"
] | 1 | 2022-03-02T12:25:34.000Z | 2022-03-02T12:25:34.000Z | """Data loader."""
import torch
from torch.utils.data.sampler import RandomSampler
from .kinetics import Kinetics
_DATASETS = {
"kinetics": Kinetics,
}
def construct_loader(cfg, split):
"""
Constructs the data loader for the given dataset.
Args:
cfg (CfgNode): configs. Details can be found in
vml/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
"""
assert split in ["train", "val"]
dataset_name = cfg.TRAIN.DATASET
batch_size = cfg.TRAIN.BATCH_SIZE
if split in ["train"]:
shuffle = True
elif split in ["val"]:
shuffle = False
# Construct the dataset
dataset = _DATASETS[dataset_name](cfg, split)
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True,
drop_last=False,
)
return loader
| 21.479167 | 75 | 0.625606 | """Data loader."""
import torch
from torch.utils.data.sampler import RandomSampler
from .kinetics import Kinetics
_DATASETS = {
"kinetics": Kinetics,
}
def construct_loader(cfg, split):
"""
Constructs the data loader for the given dataset.
Args:
cfg (CfgNode): configs. Details can be found in
vml/config/defaults.py
split (str): the split of the data loader. Options include `train`,
`val`, and `test`.
"""
assert split in ["train", "val"]
dataset_name = cfg.TRAIN.DATASET
batch_size = cfg.TRAIN.BATCH_SIZE
if split in ["train"]:
shuffle = True
elif split in ["val"]:
shuffle = False
# Construct the dataset
dataset = _DATASETS[dataset_name](cfg, split)
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True,
drop_last=False,
)
return loader
| 0 | 0 | 0 |
8b15d42fa790505b5b41cf297a18a9c49ddf5408 | 582 | py | Python | code/01_in_out/014_ingest_tips_ratings.py | LadyMiss88/recommendation-engine-using-social-graph | 4ae9978b6251bffdde6793ba394dfa77a7d4a6f4 | [
"MIT"
] | null | null | null | code/01_in_out/014_ingest_tips_ratings.py | LadyMiss88/recommendation-engine-using-social-graph | 4ae9978b6251bffdde6793ba394dfa77a7d4a6f4 | [
"MIT"
] | null | null | null | code/01_in_out/014_ingest_tips_ratings.py | LadyMiss88/recommendation-engine-using-social-graph | 4ae9978b6251bffdde6793ba394dfa77a7d4a6f4 | [
"MIT"
] | null | null | null | import sqlite3
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Set the file paths
db_path ='D:/Workspace-Github/saproject/data/foursquare.db'
# connect and write to database
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT tid, tip FROM tips;')
results = c.fetchall()
for tid, tip in results:
sid = SentimentIntensityAnalyzer()
rating = round((sid.polarity_scores(tip)['compound'] + 1) * 5, 2) # Rating: 1-10
c.execute("""UPDATE tips SET senti_score = ? WHERE tid = ?;""",(rating, tid))
conn.commit()
conn.close()
| 29.1 | 85 | 0.690722 | import sqlite3
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Set the file paths
db_path ='D:/Workspace-Github/saproject/data/foursquare.db'
# connect and write to database
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT tid, tip FROM tips;')
results = c.fetchall()
for tid, tip in results:
sid = SentimentIntensityAnalyzer()
rating = round((sid.polarity_scores(tip)['compound'] + 1) * 5, 2) # Rating: 1-10
c.execute("""UPDATE tips SET senti_score = ? WHERE tid = ?;""",(rating, tid))
conn.commit()
conn.close()
| 0 | 0 | 0 |
9dcdd58e1b986b842d795c2d8517b2cd2331e9bf | 7,664 | py | Python | speed/client.py | neo4j-field/neo4j-arrow | 94a0468ac57c409fbe9694bd7d939d3192353464 | [
"Apache-2.0"
] | 16 | 2021-09-11T11:16:05.000Z | 2022-03-14T23:09:17.000Z | speed/client.py | neo4j-field/neo4j-arrow | 94a0468ac57c409fbe9694bd7d939d3192353464 | [
"Apache-2.0"
] | 6 | 2021-09-24T23:17:28.000Z | 2022-02-15T21:18:31.000Z | speed/client.py | neo4j-field/neo4j-arrow | 94a0468ac57c409fbe9694bd7d939d3192353464 | [
"Apache-2.0"
] | 1 | 2022-02-22T14:44:21.000Z | 2022-02-22T14:44:21.000Z | import pyarrow as pa
import pyarrow.flight as flight
import base64
import cmd
import json
import struct
import sys
from time import sleep, time
pa.enable_signal_handlers(True)
def wait_for_connection(client):
"""Perform a blocking check that a connection can be made to the server"""
try:
client.wait_for_available(5)
print(f"Connected")
except Exception as e:
if type(e) is not flight.FlightUnauthenticatedError:
print(f"⁉ Failed to connect to {location}: {e.args}")
sys.exit(1)
else:
print("Server requires auth, but connection possible")
def get_actions(client, options={}):
"""Discover available actions on the server"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
actions = list(client.list_actions(options=options))
if len(actions) == 0:
print("Found zero actions 😕")
else:
print(f"💥 Found {len(actions)} actions!")
return actions
def cypher_read(client, cypher, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
cypher_bytes = cypher.encode("utf8")
params_bytes = json.dumps(params).encode("utf8")
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
pattern = f"!H{len(cypher_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(pattern,
len(cypher_bytes), cypher_bytes,
len(params_bytes), params_bytes)
ticket = None
try:
results = client.do_action(("cypherRead", buffer), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def gds_read_node_prop(client, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
params_bytes = json.dumps(params).encode("utf8")
ticket = None
try:
results = client.do_action(("gdsNodeProperties", params_bytes), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def check_flight_status(client, ticket, options):
"""Check on a flight's status given a particular Ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
status = None
try:
results = client.do_action(("jobStatus", buffer), options=options)
status = next(results).body.to_pybytes().decode("utf8")
except Exception as e:
print(f"⚠ check_flight_status: {e}")
sys.exit(1)
return status
def list_flights(client, options={}):
"""List all available flights"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
pass
def get_flight_info(client, ticket, options):
"""Find a flight based on the given ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
descriptor = pa.flight.FlightDescriptor.for_command(buffer)
info = None
try:
info = client.get_flight_info(descriptor, options=options)
except Exception as e:
print(f"⚠ get_flight_info: {e}")
return info
def stream_flight(client, ticket, options):
"""Stream back a given flight, assuming it's ready to stream"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
result = client.do_get(ticket, options=options)
start = time()
cnt = 0
for chunk, metadata in result:
cnt = cnt + chunk.num_rows
print(f"Current Row @ {cnt:,}:\t[fields: {chunk.schema.names}, rows: {chunk.num_rows:,}]")
#for col in chunk:
# print(col)
finish = time()
print(f"Done! Time Delta: {round(finish - start, 1):,}s")
print(f"Count: {cnt:,} rows, Rate: {round(cnt / (finish - start)):,} rows/s")
##############################################################################
if __name__ == "__main__":
location = build_location()
client = flight.FlightClient(location)
print(f"Trying to connect to location {location}")
wait_for_connection(client)
# TODO: username/password args? env?
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + base64.b64encode(b'neo4j:password'))
])
print(f"Enumerating available actions from location {location}")
for action in get_actions(client, options):
print(f" {action}")
# TODO: user-supplied cypher/params
print("Submitting a read cypher action/job using:")
cypher = """
UNWIND range(1, $rows) AS row
RETURN row, [_ IN range(1, $dimension) | rand()] as fauxEmbedding
"""
params = {"rows": 1_000_000, "dimension": 128}
print(f" cypher: {cypher}")
print(f" params: {params}")
ticket = cypher_read(client, cypher, params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
### GDS
print("----------------------------------------------------------------")
gds_params = {
"dbName": "neo4j",
"graphName:": "mygraph",
"filters": [],
"properties": ["n"],
}
print(f"Submitting GDS node properties request:\n{gds_params}")
ticket = gds_read_node_prop(client, gds_params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
| 33.614035 | 98 | 0.619128 | import pyarrow as pa
import pyarrow.flight as flight
import base64
import cmd
import json
import struct
import sys
from time import sleep, time
pa.enable_signal_handlers(True)
def build_location(inputs=sys.argv[1:]):
it = iter(inputs)
host = next(it, "localhost")
port = int(next(it, 9999))
return (host, port)
def wait_for_connection(client):
"""Perform a blocking check that a connection can be made to the server"""
try:
client.wait_for_available(5)
print(f"Connected")
except Exception as e:
if type(e) is not flight.FlightUnauthenticatedError:
print(f"⁉ Failed to connect to {location}: {e.args}")
sys.exit(1)
else:
print("Server requires auth, but connection possible")
def get_actions(client, options={}):
"""Discover available actions on the server"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
actions = list(client.list_actions(options=options))
if len(actions) == 0:
print("Found zero actions 😕")
else:
print(f"💥 Found {len(actions)} actions!")
return actions
def cypher_read(client, cypher, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
cypher_bytes = cypher.encode("utf8")
params_bytes = json.dumps(params).encode("utf8")
# Our CypherMessage format is simple:
# - 16 bit unsigned length of the cypher byte string
# - the cypher byte string payload
# - 16 bit unsigned length of the param json payload
# - the param json byte string payload
pattern = f"!H{len(cypher_bytes)}sH{len(params_bytes)}s"
buffer = struct.pack(pattern,
len(cypher_bytes), cypher_bytes,
len(params_bytes), params_bytes)
ticket = None
try:
results = client.do_action(("cypherRead", buffer), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def gds_read_node_prop(client, params={}, options={}):
"""Submit a cypherRead action and get a flight ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
params_bytes = json.dumps(params).encode("utf8")
ticket = None
try:
results = client.do_action(("gdsNodeProperties", params_bytes), options=options)
ticket = pa.flight.Ticket.deserialize((next(results).body.to_pybytes()))
except Exception as e:
print(f"⚠ submit_cypher_read: {e}")
sys.exit(1)
return ticket
def check_flight_status(client, ticket, options):
"""Check on a flight's status given a particular Ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
status = None
try:
results = client.do_action(("jobStatus", buffer), options=options)
status = next(results).body.to_pybytes().decode("utf8")
except Exception as e:
print(f"⚠ check_flight_status: {e}")
sys.exit(1)
return status
def list_flights(client, options={}):
"""List all available flights"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
pass
def get_flight_info(client, ticket, options):
"""Find a flight based on the given ticket"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
if type(ticket) == pa.flight.Ticket:
buffer = ticket.serialize()
else:
buffer = ticket
descriptor = pa.flight.FlightDescriptor.for_command(buffer)
info = None
try:
info = client.get_flight_info(descriptor, options=options)
except Exception as e:
print(f"⚠ get_flight_info: {e}")
return info
def stream_flight(client, ticket, options):
"""Stream back a given flight, assuming it's ready to stream"""
if type(options) == dict:
options = flight.FlightCallOptions(headers=list(options.items()))
result = client.do_get(ticket, options=options)
start = time()
cnt = 0
for chunk, metadata in result:
cnt = cnt + chunk.num_rows
print(f"Current Row @ {cnt:,}:\t[fields: {chunk.schema.names}, rows: {chunk.num_rows:,}]")
#for col in chunk:
# print(col)
finish = time()
print(f"Done! Time Delta: {round(finish - start, 1):,}s")
print(f"Count: {cnt:,} rows, Rate: {round(cnt / (finish - start)):,} rows/s")
##############################################################################
if __name__ == "__main__":
location = build_location()
client = flight.FlightClient(location)
print(f"Trying to connect to location {location}")
wait_for_connection(client)
# TODO: username/password args? env?
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + base64.b64encode(b'neo4j:password'))
])
print(f"Enumerating available actions from location {location}")
for action in get_actions(client, options):
print(f" {action}")
# TODO: user-supplied cypher/params
print("Submitting a read cypher action/job using:")
cypher = """
UNWIND range(1, $rows) AS row
RETURN row, [_ IN range(1, $dimension) | rand()] as fauxEmbedding
"""
params = {"rows": 1_000_000, "dimension": 128}
print(f" cypher: {cypher}")
print(f" params: {params}")
ticket = cypher_read(client, cypher, params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
### GDS
print("----------------------------------------------------------------")
gds_params = {
"dbName": "neo4j",
"graphName:": "mygraph",
"filters": [],
"properties": ["n"],
}
print(f"Submitting GDS node properties request:\n{gds_params}")
ticket = gds_read_node_prop(client, gds_params, options)
print(f"Got ticket: {ticket}")
print("Waiting for flight to be available...")
for i in range(1, 10):
status = check_flight_status(client, ticket, options)
print(f" status: {status}")
if status == "PRODUCING":
break
else:
sleep(3)
print("Flight ready! Getting flight info...")
info = None
while info is None:
sleep(3)
try:
info = get_flight_info(client, ticket, options)
except Exception as e:
print(f"failed to get flight info...retrying in 5s")
sleep(5)
print(f"Got info on our flight: {info}")
print("Boarding flight and getting stream...")
stream_flight(client, ticket, options)
| 129 | 0 | 23 |
5b1cf232bfe8fd96f8b17b4ccef40c350f02007e | 564 | py | Python | apps/funcionarios/migrations/0004_funcionario_unidade.py | emilioeiji/gestao_gbareru | 2f3d196c2f511910cba2857062725b817547896a | [
"CC0-1.0"
] | null | null | null | apps/funcionarios/migrations/0004_funcionario_unidade.py | emilioeiji/gestao_gbareru | 2f3d196c2f511910cba2857062725b817547896a | [
"CC0-1.0"
] | null | null | null | apps/funcionarios/migrations/0004_funcionario_unidade.py | emilioeiji/gestao_gbareru | 2f3d196c2f511910cba2857062725b817547896a | [
"CC0-1.0"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-11 21:29
from django.db import migrations, models
import django.db.models.deletion
| 25.636364 | 114 | 0.634752 | # Generated by Django 4.0.3 on 2022-03-11 21:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('unidade', '0001_initial'),
('funcionarios', '0003_funcionario_skill'),
]
operations = [
migrations.AddField(
model_name='funcionario',
name='unidade',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='unidade.unidade'),
preserve_default=False,
),
]
| 0 | 417 | 23 |
7f1035cd6cef8a5d5e2571cdf2fc422d3d6c3f98 | 1,024 | py | Python | sympy_version/test.py | mazenbesher/simplex | 5cc3013f20f87891658fe64bf73c7c4cc2843240 | [
"MIT"
] | null | null | null | sympy_version/test.py | mazenbesher/simplex | 5cc3013f20f87891658fe64bf73c7c4cc2843240 | [
"MIT"
] | null | null | null | sympy_version/test.py | mazenbesher/simplex | 5cc3013f20f87891658fe64bf73c7c4cc2843240 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
import numpy as np
from sympy import Eq, symbols
from main import sympy_simplex, LP
aufgabe1 = LP( # Blatt 2
np.matrix('2 0 6; -2 8 4; 3 6 5'),
np.matrix('10; 12; 20'),
np.matrix('2; 1; 3; 0; 0; 0'),
[4, 5, 6])
kreise_example = LP( # Book Page 31
np.matrix('-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0'), # A
np.matrix('0; 0; 1'), # b
np.matrix('10; -57; -9; -24; 0; 0; 0'), # c
[5, 6, 7]
)
if __name__ == '__main__':
unittest.main()
| 27.675676 | 87 | 0.591797 | #!/usr/bin/env python3
import unittest
import numpy as np
from sympy import Eq, symbols
from main import sympy_simplex, LP
aufgabe1 = LP( # Blatt 2
np.matrix('2 0 6; -2 8 4; 3 6 5'),
np.matrix('10; 12; 20'),
np.matrix('2; 1; 3; 0; 0; 0'),
[4, 5, 6])
kreise_example = LP( # Book Page 31
np.matrix('-0.5 -5.5 -2.5 9; 0.5 -1.5 -0.5 1; 1 0 0 0'), # A
np.matrix('0; 0; 1'), # b
np.matrix('10; -57; -9; -24; 0; 0; 0'), # c
[5, 6, 7]
)
class TestSimplex(unittest.TestCase):
def test_simplexAufgabe1(self):
ziel = sympy_simplex(aufgabe1)[1]
z = symbols('z')
x3, x4, x6 = symbols('x3 x4 x6')
self.assertEqual(ziel, Eq(z, -7*x3/3 - 3*x4/4 - x6/6 + 65/6))
def test_simplexFractionsAndKreisen(self):
# because of the fractions and int in lines 31, 32 in main.py
ziel = sympy_simplex(kreise_example)[1]
self.assertEqual(ziel.rhs.as_coefficients_dict()[1], 1) # Zielfunktionswert = 1
if __name__ == '__main__':
unittest.main()
| 415 | 16 | 76 |
2c3b17e6816541aeaf37b9841330e147dccadd0e | 3,107 | py | Python | src/xia2/Wrappers/Dials/RefineBravaisSettings.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 10 | 2015-10-30T06:36:55.000Z | 2021-12-10T20:06:22.000Z | src/xia2/Wrappers/Dials/RefineBravaisSettings.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 528 | 2015-11-24T08:20:12.000Z | 2022-03-21T21:47:29.000Z | src/xia2/Wrappers/Dials/RefineBravaisSettings.py | graeme-winter/xia2 | e00d688137d4ddb4b125be9a3f37ae00265886c2 | [
"BSD-3-Clause"
] | 14 | 2016-03-15T22:07:03.000Z | 2020-12-14T07:13:35.000Z | import copy
import json
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Wrappers.Dials.RefineBravaisSettings")
def RefineBravaisSettings(DriverType=None):
"""A factory for RefineBravaisSettingsWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
return RefineBravaisSettingsWrapper()
| 36.988095 | 84 | 0.650467 | import copy
import json
import logging
import os
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Wrappers.Dials.RefineBravaisSettings")
def RefineBravaisSettings(DriverType=None):
"""A factory for RefineBravaisSettingsWrapper classes."""
DriverInstance = DriverFactory.Driver(DriverType)
class RefineBravaisSettingsWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable("dials.refine_bravais_settings")
self._experiments_filename = None
self._indexed_filename = None
self._detector_fix = None
self._beam_fix = None
self._close_to_spindle_cutoff = None
def set_experiments_filename(self, experiments_filename):
self._experiments_filename = experiments_filename
def set_indexed_filename(self, indexed_filename):
self._indexed_filename = indexed_filename
def set_detector_fix(self, detector_fix):
self._detector_fix = detector_fix
def set_beam_fix(self, beam_fix):
self._beam_fix = beam_fix
def set_close_to_spindle_cutoff(self, close_to_spindle_cutoff):
self._close_to_spindle_cutoff = close_to_spindle_cutoff
def get_bravais_summary(self):
bravais_summary = {}
for k in self._bravais_summary:
bravais_summary[int(k)] = copy.deepcopy(self._bravais_summary[k])
bravais_summary[int(k)]["experiments_file"] = os.path.join(
self.get_working_directory(), "bravais_setting_%d.expt" % int(k)
)
return bravais_summary
def run(self):
logger.debug("Running dials.refine_bravais_settings")
self.clear_command_line()
self.add_command_line(self._experiments_filename)
self.add_command_line(self._indexed_filename)
nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
self.set_cpu_threads(nproc)
self.add_command_line("nproc=%i" % nproc)
self.add_command_line("best_monoclinic_beta=False")
# self.add_command_line('reflections_per_degree=10')
if self._detector_fix:
self.add_command_line("detector.fix=%s" % self._detector_fix)
if self._beam_fix:
self.add_command_line("beam.fix=%s" % self._beam_fix)
# self.add_command_line('engine=GaussNewton')
if self._close_to_spindle_cutoff is not None:
self.add_command_line(
"close_to_spindle_cutoff=%f" % self._close_to_spindle_cutoff
)
self.start()
self.close_wait()
self.check_for_errors()
with open(
os.path.join(self.get_working_directory(), "bravais_summary.json"),
) as fh:
self._bravais_summary = json.load(fh)
return RefineBravaisSettingsWrapper()
| 2,371 | 40 | 274 |
d90e114a5d65201c0b8a47c1c02ca6db7d1a479e | 62,256 | py | Python | win32comext/mapi/emsabtags.py | zhanqxun/cv_fish | f78f4f5bdafb070c179efee8b9276719dfaef1d7 | [
"Apache-2.0"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/win32comext/mapi/emsabtags.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/win32comext/mapi/emsabtags.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 4 | 2021-02-11T03:51:39.000Z | 2021-02-12T05:10:43.000Z | # Converted "manually" from EMSABTAG.H
from mapitags import PT_UNSPECIFIED, PT_NULL, PT_I2, PT_LONG, PT_R4, \
PT_DOUBLE, PT_CURRENCY, PT_APPTIME, PT_ERROR, \
PT_BOOLEAN, PT_OBJECT, PT_I8, PT_STRING8, PT_UNICODE, \
PT_SYSTIME, PT_CLSID, PT_BINARY, PT_SHORT, PT_I4, \
PT_FLOAT, PT_DOUBLE, PT_LONGLONG, PT_TSTRING, \
PT_MV_I2, PT_MV_LONG, PT_MV_R4, PT_MV_DOUBLE, \
PT_MV_CURRENCY, PT_MV_APPTIME, PT_MV_SYSTIME, \
PT_MV_STRING8, PT_MV_BINARY, PT_MV_UNICODE, \
PT_MV_CLSID, PT_MV_I8, PT_MV_SHORT, PT_MV_I4, \
PT_MV_FLOAT, PT_MV_R8, PT_MV_LONGLONG, PT_MV_TSTRING, \
PROP_TAG
AB_SHOW_PHANTOMS = 2
AB_SHOW_OTHERS = 4
# Flags for ulFlag on ResolveNames
EMS_AB_ADDRESS_LOOKUP = 1
# Constructed, but externally visible.
PR_EMS_AB_SERVER = PROP_TAG( PT_TSTRING, 65534)
PR_EMS_AB_SERVER_A = PROP_TAG( PT_STRING8, 65534)
PR_EMS_AB_SERVER_W = PROP_TAG( PT_UNICODE, 65534)
PR_EMS_AB_CONTAINERID = PROP_TAG( PT_LONG, 65533)
PR_EMS_AB_DOS_ENTRYID = PR_EMS_AB_CONTAINERID
PR_EMS_AB_PARENT_ENTRYID = PROP_TAG( PT_BINARY, 65532)
PR_EMS_AB_IS_MASTER = PROP_TAG(PT_BOOLEAN, 65531)
PR_EMS_AB_OBJECT_OID = PROP_TAG(PT_BINARY, 65530)
PR_EMS_AB_HIERARCHY_PATH = PROP_TAG(PT_TSTRING, 65529)
PR_EMS_AB_HIERARCHY_PATH_A = PROP_TAG(PT_STRING8, 65529)
PR_EMS_AB_HIERARCHY_PATH_W = PROP_TAG(PT_UNICODE, 65529)
PR_EMS_AB_CHILD_RDNS = PROP_TAG(PT_MV_STRING8, 65528)
MIN_EMS_AB_CONSTRUCTED_PROP_ID = 65528
PR_EMS_AB_OTHER_RECIPS = PROP_TAG(PT_OBJECT, 61440)
# Prop tags defined in the schema.
PR_EMS_AB_DISPLAY_NAME_PRINTABLE = PROP_TAG(PT_TSTRING, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_A = PROP_TAG(PT_STRING8, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_W = PROP_TAG(PT_UNICODE, 14847)
PR_EMS_AB_ACCESS_CATEGORY = PROP_TAG( PT_LONG, 32836)
PR_EMS_AB_ACTIVATION_SCHEDULE = PROP_TAG( PT_BINARY, 32837)
PR_EMS_AB_ACTIVATION_STYLE = PROP_TAG( PT_LONG, 32838)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 32791)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 32839)
PR_EMS_AB_ADDRESS_SYNTAX = PROP_TAG( PT_BINARY, 32792)
PR_EMS_AB_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32840)
PR_EMS_AB_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32840)
PR_EMS_AB_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32840)
PR_EMS_AB_ADMD = PROP_TAG( PT_TSTRING, 32841)
PR_EMS_AB_ADMD_A = PROP_TAG( PT_STRING8, 32841)
PR_EMS_AB_ADMD_W = PROP_TAG( PT_UNICODE, 32841)
PR_EMS_AB_ADMIN_DESCRIPTION = PROP_TAG( PT_TSTRING, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_A = PROP_TAG( PT_STRING8, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_W = PROP_TAG( PT_UNICODE, 32842)
PR_EMS_AB_ADMIN_DISPLAY_NAME = PROP_TAG( PT_TSTRING, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_A = PROP_TAG( PT_STRING8, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_W = PROP_TAG( PT_UNICODE, 32843)
PR_EMS_AB_ADMIN_EXTENSION_DLL = PROP_TAG( PT_TSTRING, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_A = PROP_TAG( PT_STRING8, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_W = PROP_TAG( PT_UNICODE, 32844)
PR_EMS_AB_ALIASED_OBJECT_NAME = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_A = PROP_TAG( PT_STRING8, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_W = PROP_TAG( PT_UNICODE, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_O = PROP_TAG( PT_OBJECT, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_T = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALT_RECIPIENT = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_A = PROP_TAG( PT_STRING8, 32846)
PR_EMS_AB_ALT_RECIPIENT_W = PROP_TAG( PT_UNICODE, 32846)
PR_EMS_AB_ALT_RECIPIENT_O = PROP_TAG( PT_OBJECT, 32846)
PR_EMS_AB_ALT_RECIPIENT_T = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_BL = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_A = PROP_TAG( PT_MV_STRING8, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_W = PROP_TAG( PT_MV_UNICODE, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_O = PROP_TAG( PT_OBJECT, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_T = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ANCESTOR_ID = PROP_TAG( PT_BINARY, 32848)
PR_EMS_AB_ASSOC_NT_ACCOUNT = PROP_TAG( PT_BINARY, 32807)
PR_EMS_AB_ASSOC_REMOTE_DXA = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_A = PROP_TAG( PT_MV_STRING8, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_W = PROP_TAG( PT_MV_UNICODE, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_O = PROP_TAG( PT_OBJECT, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_T = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOCIATION_LIFETIME = PROP_TAG( PT_LONG, 32850)
PR_EMS_AB_AUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 32851)
PR_EMS_AB_AUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 32851)
PR_EMS_AB_AUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 32851)
PR_EMS_AB_AUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTHORITY_REVOCATION_LIST = PROP_TAG( PT_MV_BINARY, 32806)
PR_EMS_AB_AUTHORIZED_DOMAIN = PROP_TAG( PT_TSTRING, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_A = PROP_TAG( PT_STRING8, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_W = PROP_TAG( PT_UNICODE, 32852)
PR_EMS_AB_AUTHORIZED_PASSWORD = PROP_TAG( PT_BINARY, 32853)
PR_EMS_AB_AUTHORIZED_USER = PROP_TAG( PT_TSTRING, 32854)
PR_EMS_AB_AUTHORIZED_USER_A = PROP_TAG( PT_STRING8, 32854)
PR_EMS_AB_AUTHORIZED_USER_W = PROP_TAG( PT_UNICODE, 32854)
PR_EMS_AB_AUTOREPLY = PROP_TAG( PT_BOOLEAN, 32779)
PR_EMS_AB_AUTOREPLY_MESSAGE = PROP_TAG( PT_TSTRING, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_A = PROP_TAG( PT_STRING8, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_W = PROP_TAG( PT_UNICODE, 32778)
PR_EMS_AB_AUTOREPLY_SUBJECT = PROP_TAG( PT_TSTRING, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_A = PROP_TAG( PT_STRING8, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_W = PROP_TAG( PT_UNICODE, 32830)
PR_EMS_AB_BRIDGEHEAD_SERVERS = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_A = PROP_TAG( PT_MV_STRING8, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_O = PROP_TAG( PT_OBJECT, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BUSINESS_CATEGORY = PROP_TAG( PT_MV_TSTRING, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_A = PROP_TAG( PT_MV_STRING8, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_W = PROP_TAG( PT_MV_UNICODE, 32855)
PR_EMS_AB_BUSINESS_ROLES = PROP_TAG( PT_BINARY, 32803)
PR_EMS_AB_CA_CERTIFICATE = PROP_TAG( PT_MV_BINARY, 32771)
PR_EMS_AB_CAN_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32856)
PR_EMS_AB_CAN_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32856)
PR_EMS_AB_CAN_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32856)
PR_EMS_AB_CAN_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_NOT_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_PRESERVE_DNS = PROP_TAG( PT_BOOLEAN, 32864)
PR_EMS_AB_CERTIFICATE_REVOCATION_LIST = PROP_TAG( PT_BINARY, 32790)
PR_EMS_AB_CLOCK_ALERT_OFFSET = PROP_TAG( PT_LONG, 32865)
PR_EMS_AB_CLOCK_ALERT_REPAIR = PROP_TAG( PT_BOOLEAN, 32866)
PR_EMS_AB_CLOCK_WARNING_OFFSET = PROP_TAG( PT_LONG, 32867)
PR_EMS_AB_CLOCK_WARNING_REPAIR = PROP_TAG( PT_BOOLEAN, 32868)
PR_EMS_AB_COMPUTER_NAME = PROP_TAG( PT_TSTRING, 32869)
PR_EMS_AB_COMPUTER_NAME_A = PROP_TAG( PT_STRING8, 32869)
PR_EMS_AB_COMPUTER_NAME_W = PROP_TAG( PT_UNICODE, 32869)
PR_EMS_AB_CONNECTED_DOMAINS = PROP_TAG( PT_MV_TSTRING, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_A = PROP_TAG( PT_MV_STRING8, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_W = PROP_TAG( PT_MV_UNICODE, 32870)
PR_EMS_AB_CONTAINER_INFO = PROP_TAG( PT_LONG, 32871)
PR_EMS_AB_COST = PROP_TAG( PT_LONG, 32872)
PR_EMS_AB_COUNTRY_NAME = PROP_TAG( PT_TSTRING, 32873)
PR_EMS_AB_COUNTRY_NAME_A = PROP_TAG( PT_STRING8, 32873)
PR_EMS_AB_COUNTRY_NAME_W = PROP_TAG( PT_UNICODE, 32873)
PR_EMS_AB_CROSS_CERTIFICATE_PAIR = PROP_TAG( PT_MV_BINARY, 32805)
PR_EMS_AB_DELIV_CONT_LENGTH = PROP_TAG( PT_LONG, 32874)
PR_EMS_AB_DELIV_EITS = PROP_TAG( PT_MV_BINARY, 32875)
PR_EMS_AB_DELIV_EXT_CONT_TYPES = PROP_TAG( PT_MV_BINARY, 32876)
PR_EMS_AB_DELIVER_AND_REDIRECT = PROP_TAG( PT_BOOLEAN, 32877)
PR_EMS_AB_DELIVERY_MECHANISM = PROP_TAG( PT_LONG, 32878)
PR_EMS_AB_DESCRIPTION = PROP_TAG( PT_MV_TSTRING, 32879)
PR_EMS_AB_DESCRIPTION_A = PROP_TAG( PT_MV_STRING8, 32879)
PR_EMS_AB_DESCRIPTION_W = PROP_TAG( PT_MV_UNICODE, 32879)
PR_EMS_AB_DESTINATION_INDICATOR = PROP_TAG( PT_MV_TSTRING, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_A = PROP_TAG( PT_MV_STRING8, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_W = PROP_TAG( PT_MV_UNICODE, 32880)
PR_EMS_AB_DIAGNOSTIC_REG_KEY = PROP_TAG( PT_TSTRING, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_A = PROP_TAG( PT_STRING8, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_W = PROP_TAG( PT_UNICODE, 32881)
PR_EMS_AB_DISPLAY_NAME_OVERRIDE = PROP_TAG( PT_BOOLEAN, 32769)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEMBER_RULE = PROP_TAG( PT_MV_BINARY, 32884)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_A = PROP_TAG( PT_STRING8, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_W = PROP_TAG( PT_UNICODE, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_O = PROP_TAG( PT_OBJECT, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_T = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_NAME = PROP_TAG( PT_TSTRING, 32886)
PR_EMS_AB_DOMAIN_NAME_A = PROP_TAG( PT_STRING8, 32886)
PR_EMS_AB_DOMAIN_NAME_W = PROP_TAG( PT_UNICODE, 32886)
PR_EMS_AB_DSA_SIGNATURE = PROP_TAG( PT_BINARY, 32887)
PR_EMS_AB_DXA_ADMIN_COPY = PROP_TAG( PT_BOOLEAN, 32888)
PR_EMS_AB_DXA_ADMIN_FORWARD = PROP_TAG( PT_BOOLEAN, 32889)
PR_EMS_AB_DXA_ADMIN_UPDATE = PROP_TAG( PT_LONG, 32890)
PR_EMS_AB_DXA_APPEND_REQCN = PROP_TAG( PT_BOOLEAN, 32891)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_REQ_TIME = PROP_TAG( PT_SYSTIME, 32893)
PR_EMS_AB_DXA_CONF_SEQ = PROP_TAG( PT_TSTRING, 32894)
PR_EMS_AB_DXA_CONF_SEQ_A = PROP_TAG( PT_STRING8, 32894)
PR_EMS_AB_DXA_CONF_SEQ_W = PROP_TAG( PT_UNICODE, 32894)
PR_EMS_AB_DXA_CONF_SEQ_USN = PROP_TAG( PT_LONG, 32895)
PR_EMS_AB_DXA_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32896)
PR_EMS_AB_DXA_EXPORT_NOW = PROP_TAG( PT_BOOLEAN, 32897)
PR_EMS_AB_DXA_FLAGS = PROP_TAG( PT_LONG, 32898)
PR_EMS_AB_DXA_IMP_SEQ = PROP_TAG( PT_TSTRING, 32899)
PR_EMS_AB_DXA_IMP_SEQ_A = PROP_TAG( PT_STRING8, 32899)
PR_EMS_AB_DXA_IMP_SEQ_W = PROP_TAG( PT_UNICODE, 32899)
PR_EMS_AB_DXA_IMP_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32900)
PR_EMS_AB_DXA_IMP_SEQ_USN = PROP_TAG( PT_LONG, 32901)
PR_EMS_AB_DXA_IMPORT_NOW = PROP_TAG( PT_BOOLEAN, 32902)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32903)
PR_EMS_AB_DXA_LOCAL_ADMIN = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_A = PROP_TAG( PT_STRING8, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_W = PROP_TAG( PT_UNICODE, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_O = PROP_TAG( PT_OBJECT, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_T = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOGGING_LEVEL = PROP_TAG( PT_LONG, 32905)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32906)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32907)
PR_EMS_AB_DXA_PASSWORD = PROP_TAG( PT_TSTRING, 32908)
PR_EMS_AB_DXA_PASSWORD_A = PROP_TAG( PT_STRING8, 32908)
PR_EMS_AB_DXA_PASSWORD_W = PROP_TAG( PT_UNICODE, 32908)
PR_EMS_AB_DXA_PREV_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32909)
PR_EMS_AB_DXA_PREV_EXPORT_NATIVE_ONLY = PROP_TAG( PT_BOOLEAN, 32910)
PR_EMS_AB_DXA_PREV_IN_EXCHANGE_SENSITIVITY = PROP_TAG( PT_LONG, 32911)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_A = PROP_TAG( PT_STRING8, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_W = PROP_TAG( PT_UNICODE, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_O = PROP_TAG( PT_OBJECT, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_T = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 32913)
PR_EMS_AB_DXA_PREV_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32914)
PR_EMS_AB_DXA_PREV_TYPES = PROP_TAG( PT_LONG, 32915)
PR_EMS_AB_DXA_RECIPIENT_CP = PROP_TAG( PT_TSTRING, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_A = PROP_TAG( PT_STRING8, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_W = PROP_TAG( PT_UNICODE, 32916)
PR_EMS_AB_DXA_REMOTE_CLIENT = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_A = PROP_TAG( PT_STRING8, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_W = PROP_TAG( PT_UNICODE, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_O = PROP_TAG( PT_OBJECT, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_T = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REQ_SEQ = PROP_TAG( PT_TSTRING, 32918)
PR_EMS_AB_DXA_REQ_SEQ_A = PROP_TAG( PT_STRING8, 32918)
PR_EMS_AB_DXA_REQ_SEQ_W = PROP_TAG( PT_UNICODE, 32918)
PR_EMS_AB_DXA_REQ_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32919)
PR_EMS_AB_DXA_REQ_SEQ_USN = PROP_TAG( PT_LONG, 32920)
PR_EMS_AB_DXA_REQNAME = PROP_TAG( PT_TSTRING, 32921)
PR_EMS_AB_DXA_REQNAME_A = PROP_TAG( PT_STRING8, 32921)
PR_EMS_AB_DXA_REQNAME_W = PROP_TAG( PT_UNICODE, 32921)
PR_EMS_AB_DXA_SVR_SEQ = PROP_TAG( PT_TSTRING, 32922)
PR_EMS_AB_DXA_SVR_SEQ_A = PROP_TAG( PT_STRING8, 32922)
PR_EMS_AB_DXA_SVR_SEQ_W = PROP_TAG( PT_UNICODE, 32922)
PR_EMS_AB_DXA_SVR_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32923)
PR_EMS_AB_DXA_SVR_SEQ_USN = PROP_TAG( PT_LONG, 32924)
PR_EMS_AB_DXA_TASK = PROP_TAG( PT_LONG, 32925)
PR_EMS_AB_DXA_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32926)
PR_EMS_AB_DXA_TEMPLATE_TIMESTAMP = PROP_TAG( PT_SYSTIME, 32927)
PR_EMS_AB_DXA_TYPES = PROP_TAG( PT_LONG, 32928)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_ENABLED_PROTOCOLS = PROP_TAG( PT_LONG, 33151)
PR_EMS_AB_ENCAPSULATION_METHOD = PROP_TAG( PT_LONG, 32930)
PR_EMS_AB_ENCRYPT = PROP_TAG( PT_BOOLEAN, 32931)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA = PROP_TAG( PT_MV_TSTRING, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_A = PROP_TAG( PT_MV_STRING8, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_W = PROP_TAG( PT_MV_UNICODE, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER = PROP_TAG( PT_MV_TSTRING, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_A = PROP_TAG( PT_MV_STRING8, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_W = PROP_TAG( PT_MV_UNICODE, 32833)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA = PROP_TAG( PT_TSTRING, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_A = PROP_TAG( PT_STRING8, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_W = PROP_TAG( PT_UNICODE, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER = PROP_TAG( PT_TSTRING, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_A = PROP_TAG( PT_STRING8, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_W = PROP_TAG( PT_UNICODE, 32829)
PR_EMS_AB_EXPAND_DLS_LOCALLY = PROP_TAG( PT_BOOLEAN, 32932)
PR_EMS_AB_EXPIRATION_TIME = PROP_TAG( PT_SYSTIME, 32808)
PR_EMS_AB_EXPORT_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_O = PROP_TAG( PT_OBJECT, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CUSTOM_RECIPIENTS = PROP_TAG( PT_BOOLEAN, 32934)
PR_EMS_AB_EXTENDED_CHARS_ALLOWED = PROP_TAG( PT_BOOLEAN, 32935)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1 = PROP_TAG( PT_TSTRING, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_A = PROP_TAG( PT_STRING8, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_W = PROP_TAG( PT_UNICODE, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10 = PROP_TAG( PT_TSTRING, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_A = PROP_TAG( PT_STRING8, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_W = PROP_TAG( PT_UNICODE, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2 = PROP_TAG( PT_TSTRING, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_A = PROP_TAG( PT_STRING8, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_W = PROP_TAG( PT_UNICODE, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3 = PROP_TAG( PT_TSTRING, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_A = PROP_TAG( PT_STRING8, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_W = PROP_TAG( PT_UNICODE, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4 = PROP_TAG( PT_TSTRING, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_A = PROP_TAG( PT_STRING8, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_W = PROP_TAG( PT_UNICODE, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5 = PROP_TAG( PT_TSTRING, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_A = PROP_TAG( PT_STRING8, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_W = PROP_TAG( PT_UNICODE, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6 = PROP_TAG( PT_TSTRING, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_A = PROP_TAG( PT_STRING8, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_W = PROP_TAG( PT_UNICODE, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7 = PROP_TAG( PT_TSTRING, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_A = PROP_TAG( PT_STRING8, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_W = PROP_TAG( PT_UNICODE, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8 = PROP_TAG( PT_TSTRING, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_A = PROP_TAG( PT_STRING8, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_W = PROP_TAG( PT_UNICODE, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9 = PROP_TAG( PT_TSTRING, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_A = PROP_TAG( PT_STRING8, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_W = PROP_TAG( PT_UNICODE, 32821)
PR_EMS_AB_EXTENSION_DATA = PROP_TAG( PT_MV_BINARY, 32936)
PR_EMS_AB_EXTENSION_NAME = PROP_TAG( PT_MV_TSTRING, 32937)
PR_EMS_AB_EXTENSION_NAME_A = PROP_TAG( PT_MV_STRING8, 32937)
PR_EMS_AB_EXTENSION_NAME_W = PROP_TAG( PT_MV_UNICODE, 32937)
PR_EMS_AB_EXTENSION_NAME_INHERITED = PROP_TAG( PT_MV_TSTRING, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_A = PROP_TAG( PT_MV_STRING8, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_W = PROP_TAG( PT_MV_UNICODE, 32938)
PR_EMS_AB_FACSIMILE_TELEPHONE_NUMBER = PROP_TAG( PT_MV_BINARY, 32939)
PR_EMS_AB_FILE_VERSION = PROP_TAG( PT_BINARY, 32940)
PR_EMS_AB_FILTER_LOCAL_ADDRESSES = PROP_TAG( PT_BOOLEAN, 32941)
PR_EMS_AB_FOLDER_PATHNAME = PROP_TAG( PT_TSTRING, 32772)
PR_EMS_AB_FOLDER_PATHNAME_A = PROP_TAG( PT_STRING8, 32772)
PR_EMS_AB_FOLDER_PATHNAME_W = PROP_TAG( PT_UNICODE, 32772)
PR_EMS_AB_FOLDERS_CONTAINER = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_A = PROP_TAG( PT_STRING8, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_W = PROP_TAG( PT_UNICODE, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_O = PROP_TAG( PT_OBJECT, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_T = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_GARBAGE_COLL_PERIOD = PROP_TAG( PT_LONG, 32943)
PR_EMS_AB_GATEWAY_LOCAL_CRED = PROP_TAG( PT_TSTRING, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 32944)
PR_EMS_AB_GATEWAY_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 32945)
PR_EMS_AB_GATEWAY_PROXY = PROP_TAG( PT_MV_TSTRING, 32946)
PR_EMS_AB_GATEWAY_PROXY_A = PROP_TAG( PT_MV_STRING8, 32946)
PR_EMS_AB_GATEWAY_PROXY_W = PROP_TAG( PT_MV_UNICODE, 32946)
PR_EMS_AB_GATEWAY_ROUTING_TREE = PROP_TAG( PT_BINARY, 32947)
PR_EMS_AB_GWART_LAST_MODIFIED = PROP_TAG( PT_SYSTIME, 32948)
PR_EMS_AB_HAS_FULL_REPLICA_NCS = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_A = PROP_TAG( PT_MV_STRING8, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_W = PROP_TAG( PT_MV_UNICODE, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_O = PROP_TAG( PT_OBJECT, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_T = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_MASTER_NCS = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HAS_MASTER_NCS_A = PROP_TAG( PT_MV_STRING8, 32950)
PR_EMS_AB_HAS_MASTER_NCS_W = PROP_TAG( PT_MV_UNICODE, 32950)
PR_EMS_AB_HAS_MASTER_NCS_O = PROP_TAG( PT_OBJECT, 32950)
PR_EMS_AB_HAS_MASTER_NCS_T = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HELP_DATA16 = PROP_TAG( PT_BINARY, 32826)
PR_EMS_AB_HELP_DATA32 = PROP_TAG( PT_BINARY, 32784)
PR_EMS_AB_HELP_FILE_NAME = PROP_TAG( PT_TSTRING, 32827)
PR_EMS_AB_HELP_FILE_NAME_A = PROP_TAG( PT_STRING8, 32827)
PR_EMS_AB_HELP_FILE_NAME_W = PROP_TAG( PT_UNICODE, 32827)
PR_EMS_AB_HEURISTICS = PROP_TAG( PT_LONG, 32951)
PR_EMS_AB_HIDE_DL_MEMBERSHIP = PROP_TAG( PT_BOOLEAN, 32952)
PR_EMS_AB_HIDE_FROM_ADDRESS_BOOK = PROP_TAG( PT_BOOLEAN, 32953)
PR_EMS_AB_HOME_MDB = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_A = PROP_TAG( PT_STRING8, 32774)
PR_EMS_AB_HOME_MDB_W = PROP_TAG( PT_UNICODE, 32774)
PR_EMS_AB_HOME_MDB_O = PROP_TAG( PT_OBJECT, 32774)
PR_EMS_AB_HOME_MDB_T = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_BL = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MDB_BL_A = PROP_TAG( PT_MV_STRING8, 32788)
PR_EMS_AB_HOME_MDB_BL_W = PROP_TAG( PT_MV_UNICODE, 32788)
PR_EMS_AB_HOME_MDB_BL_O = PROP_TAG( PT_OBJECT, 32788)
PR_EMS_AB_HOME_MDB_BL_T = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MTA = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_MTA_A = PROP_TAG( PT_STRING8, 32775)
PR_EMS_AB_HOME_MTA_W = PROP_TAG( PT_UNICODE, 32775)
PR_EMS_AB_HOME_MTA_O = PROP_TAG( PT_OBJECT, 32775)
PR_EMS_AB_HOME_MTA_T = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_PUBLIC_SERVER = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_A = PROP_TAG( PT_STRING8, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_W = PROP_TAG( PT_UNICODE, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_O = PROP_TAG( PT_OBJECT, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_T = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_IMPORT_CONTAINER = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_CONTAINER_A = PROP_TAG( PT_STRING8, 32954)
PR_EMS_AB_IMPORT_CONTAINER_W = PROP_TAG( PT_UNICODE, 32954)
PR_EMS_AB_IMPORT_CONTAINER_O = PROP_TAG( PT_OBJECT, 32954)
PR_EMS_AB_IMPORT_CONTAINER_T = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_SENSITIVITY = PROP_TAG( PT_LONG, 32955)
PR_EMS_AB_IMPORTED_FROM = PROP_TAG( PT_TSTRING, 32834)
PR_EMS_AB_IMPORTED_FROM_A = PROP_TAG( PT_STRING8, 32834)
PR_EMS_AB_IMPORTED_FROM_W = PROP_TAG( PT_UNICODE, 32834)
PR_EMS_AB_INBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 32956)
PR_EMS_AB_INBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 32956)
PR_EMS_AB_INBOUND_SITES_O = PROP_TAG( PT_OBJECT, 32956)
PR_EMS_AB_INBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INSTANCE_TYPE = PROP_TAG( PT_LONG, 32957)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER = PROP_TAG( PT_MV_TSTRING, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32958)
PR_EMS_AB_INVOCATION_ID = PROP_TAG( PT_BINARY, 32959)
PR_EMS_AB_IS_DELETED = PROP_TAG( PT_BOOLEAN, 32960)
PR_EMS_AB_IS_MEMBER_OF_DL = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_A = PROP_TAG( PT_MV_STRING8, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_W = PROP_TAG( PT_MV_UNICODE, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_O = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_T = PROP_TAG( PT_MV_TSTRING, 32776)
PR_EMS_AB_IS_SINGLE_VALUED = PROP_TAG( PT_BOOLEAN, 32961)
PR_EMS_AB_KCC_STATUS = PROP_TAG( PT_MV_BINARY, 32962)
PR_EMS_AB_KM_SERVER = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KM_SERVER_A = PROP_TAG( PT_STRING8, 32781)
PR_EMS_AB_KM_SERVER_W = PROP_TAG( PT_UNICODE, 32781)
PR_EMS_AB_KM_SERVER_O = PROP_TAG( PT_OBJECT, 32781)
PR_EMS_AB_KM_SERVER_T = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KNOWLEDGE_INFORMATION = PROP_TAG( PT_MV_TSTRING, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_A = PROP_TAG( PT_MV_STRING8, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_W = PROP_TAG( PT_MV_UNICODE, 32963)
PR_EMS_AB_LANGUAGE = PROP_TAG( PT_LONG, 33144)
PR_EMS_AB_LDAP_DISPLAY_NAME = PROP_TAG( PT_MV_TSTRING, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_A = PROP_TAG( PT_MV_STRING8, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_W = PROP_TAG( PT_MV_UNICODE, 33137)
PR_EMS_AB_LINE_WRAP = PROP_TAG( PT_LONG, 32964)
PR_EMS_AB_LINK_ID = PROP_TAG( PT_LONG, 32965)
PR_EMS_AB_LOCAL_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 32967)
PR_EMS_AB_LOCAL_INITIAL_TURN = PROP_TAG( PT_BOOLEAN, 32968)
PR_EMS_AB_LOCAL_SCOPE = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOCAL_SCOPE_A = PROP_TAG( PT_MV_STRING8, 32969)
PR_EMS_AB_LOCAL_SCOPE_W = PROP_TAG( PT_MV_UNICODE, 32969)
PR_EMS_AB_LOCAL_SCOPE_O = PROP_TAG( PT_OBJECT, 32969)
PR_EMS_AB_LOCAL_SCOPE_T = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOG_FILENAME = PROP_TAG( PT_TSTRING, 32970)
PR_EMS_AB_LOG_FILENAME_A = PROP_TAG( PT_STRING8, 32970)
PR_EMS_AB_LOG_FILENAME_W = PROP_TAG( PT_UNICODE, 32970)
PR_EMS_AB_LOG_ROLLOVER_INTERVAL = PROP_TAG( PT_LONG, 32971)
PR_EMS_AB_MAINTAIN_AUTOREPLY_HISTORY = PROP_TAG( PT_BOOLEAN, 32972)
PR_EMS_AB_MANAGER = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_A = PROP_TAG( PT_STRING8, 32773)
PR_EMS_AB_MANAGER_W = PROP_TAG( PT_UNICODE, 32773)
PR_EMS_AB_MANAGER_O = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_T = PROP_TAG( PT_TSTRING, 32773)
PR_EMS_AB_MAPI_DISPLAY_TYPE = PROP_TAG( PT_LONG, 32973)
PR_EMS_AB_MAPI_ID = PROP_TAG( PT_LONG, 32974)
PR_EMS_AB_MAXIMUM_OBJECT_ID = PROP_TAG( PT_BINARY, 33129)
PR_EMS_AB_MDB_BACKOFF_INTERVAL = PROP_TAG( PT_LONG, 32975)
PR_EMS_AB_MDB_MSG_TIME_OUT_PERIOD = PROP_TAG( PT_LONG, 32976)
PR_EMS_AB_MDB_OVER_QUOTA_LIMIT = PROP_TAG( PT_LONG, 32977)
PR_EMS_AB_MDB_STORAGE_QUOTA = PROP_TAG( PT_LONG, 32978)
PR_EMS_AB_MDB_UNREAD_LIMIT = PROP_TAG( PT_LONG, 32979)
PR_EMS_AB_MDB_USE_DEFAULTS = PROP_TAG( PT_BOOLEAN, 32980)
PR_EMS_AB_MEMBER = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_A = PROP_TAG( PT_MV_STRING8, 32777)
PR_EMS_AB_MEMBER_W = PROP_TAG( PT_MV_UNICODE, 32777)
PR_EMS_AB_MEMBER_O = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_T = PROP_TAG( PT_MV_TSTRING, 32777)
PR_EMS_AB_MESSAGE_TRACKING_ENABLED = PROP_TAG( PT_BOOLEAN, 32981)
PR_EMS_AB_MONITOR_CLOCK = PROP_TAG( PT_BOOLEAN, 32982)
PR_EMS_AB_MONITOR_SERVERS = PROP_TAG( PT_BOOLEAN, 32983)
PR_EMS_AB_MONITOR_SERVICES = PROP_TAG( PT_BOOLEAN, 32984)
PR_EMS_AB_MONITORED_CONFIGURATIONS = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_A = PROP_TAG( PT_MV_STRING8, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_W = PROP_TAG( PT_MV_UNICODE, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_O = PROP_TAG( PT_OBJECT, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_T = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_SERVERS = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVERS_A = PROP_TAG( PT_MV_STRING8, 32986)
PR_EMS_AB_MONITORED_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 32986)
PR_EMS_AB_MONITORED_SERVERS_O = PROP_TAG( PT_OBJECT, 32986)
PR_EMS_AB_MONITORED_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVICES = PROP_TAG( PT_MV_TSTRING, 32987)
PR_EMS_AB_MONITORED_SERVICES_A = PROP_TAG( PT_MV_STRING8, 32987)
PR_EMS_AB_MONITORED_SERVICES_W = PROP_TAG( PT_MV_UNICODE, 32987)
PR_EMS_AB_MONITORING_ALERT_DELAY = PROP_TAG( PT_LONG, 32988)
PR_EMS_AB_MONITORING_ALERT_UNITS = PROP_TAG( PT_LONG, 32989)
PR_EMS_AB_MONITORING_AVAILABILITY_STYLE = PROP_TAG( PT_LONG, 32990)
PR_EMS_AB_MONITORING_AVAILABILITY_WINDOW = PROP_TAG( PT_BINARY, 32991)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_A = PROP_TAG( PT_MV_STRING8, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_W = PROP_TAG( PT_MV_UNICODE, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_O = PROP_TAG( PT_OBJECT, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_T = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_A = PROP_TAG( PT_MV_STRING8, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_W = PROP_TAG( PT_MV_UNICODE, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_O = PROP_TAG( PT_OBJECT, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_T = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_ESCALATION_PROCEDURE = PROP_TAG( PT_MV_BINARY, 32994)
PR_EMS_AB_MONITORING_HOTSITE_POLL_INTERVAL = PROP_TAG( PT_LONG, 32995)
PR_EMS_AB_MONITORING_HOTSITE_POLL_UNITS = PROP_TAG( PT_LONG, 32996)
PR_EMS_AB_MONITORING_MAIL_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 32997)
PR_EMS_AB_MONITORING_MAIL_UPDATE_UNITS = PROP_TAG( PT_LONG, 32998)
PR_EMS_AB_MONITORING_NORMAL_POLL_INTERVAL = PROP_TAG( PT_LONG, 32999)
PR_EMS_AB_MONITORING_NORMAL_POLL_UNITS = PROP_TAG( PT_LONG, 33000)
PR_EMS_AB_MONITORING_RECIPIENTS = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_A = PROP_TAG( PT_MV_STRING8, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_W = PROP_TAG( PT_MV_UNICODE, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_O = PROP_TAG( PT_OBJECT, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_T = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_A = PROP_TAG( PT_MV_STRING8, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_W = PROP_TAG( PT_MV_UNICODE, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_O = PROP_TAG( PT_OBJECT, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_T = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RPC_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 33003)
PR_EMS_AB_MONITORING_RPC_UPDATE_UNITS = PROP_TAG( PT_LONG, 33004)
PR_EMS_AB_MONITORING_WARNING_DELAY = PROP_TAG( PT_LONG, 33005)
PR_EMS_AB_MONITORING_WARNING_UNITS = PROP_TAG( PT_LONG, 33006)
PR_EMS_AB_MTA_LOCAL_CRED = PROP_TAG( PT_TSTRING, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 33007)
PR_EMS_AB_MTA_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 33008)
PR_EMS_AB_N_ADDRESS = PROP_TAG( PT_BINARY, 33009)
PR_EMS_AB_N_ADDRESS_TYPE = PROP_TAG( PT_LONG, 33010)
PR_EMS_AB_NETWORK_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33136)
PR_EMS_AB_NETWORK_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33136)
PR_EMS_AB_NETWORK_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33136)
PR_EMS_AB_NNTP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33149)
PR_EMS_AB_NNTP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33142)
PR_EMS_AB_NT_MACHINE_NAME = PROP_TAG( PT_TSTRING, 33011)
PR_EMS_AB_NT_MACHINE_NAME_A = PROP_TAG( PT_STRING8, 33011)
PR_EMS_AB_NT_MACHINE_NAME_W = PROP_TAG( PT_UNICODE, 33011)
PR_EMS_AB_NT_SECURITY_DESCRIPTOR = PROP_TAG( PT_BINARY, 32787)
PR_EMS_AB_NUM_OF_OPEN_RETRIES = PROP_TAG( PT_LONG, 33012)
PR_EMS_AB_NUM_OF_TRANSFER_RETRIES = PROP_TAG( PT_LONG, 33013)
PR_EMS_AB_OBJ_DIST_NAME = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJ_DIST_NAME_A = PROP_TAG( PT_STRING8, 32828)
PR_EMS_AB_OBJ_DIST_NAME_W = PROP_TAG( PT_UNICODE, 32828)
PR_EMS_AB_OBJ_DIST_NAME_O = PROP_TAG( PT_OBJECT, 32828)
PR_EMS_AB_OBJ_DIST_NAME_T = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJECT_CLASS_CATEGORY = PROP_TAG( PT_LONG, 33014)
PR_EMS_AB_OBJECT_VERSION = PROP_TAG( PT_LONG, 33015)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_O = PROP_TAG( PT_OBJECT, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_SCHEDULE = PROP_TAG( PT_BINARY, 33017)
PR_EMS_AB_OFF_LINE_AB_SERVER = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_A = PROP_TAG( PT_STRING8, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_W = PROP_TAG( PT_UNICODE, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_O = PROP_TAG( PT_OBJECT, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_T = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_STYLE = PROP_TAG( PT_LONG, 33019)
PR_EMS_AB_OID_TYPE = PROP_TAG( PT_LONG, 33020)
PR_EMS_AB_OM_OBJECT_CLASS = PROP_TAG( PT_BINARY, 33021)
PR_EMS_AB_OM_SYNTAX = PROP_TAG( PT_LONG, 33022)
PR_EMS_AB_OOF_REPLY_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33023)
PR_EMS_AB_OPEN_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33024)
PR_EMS_AB_ORGANIZATION_NAME = PROP_TAG( PT_MV_TSTRING, 33025)
PR_EMS_AB_ORGANIZATION_NAME_A = PROP_TAG( PT_MV_STRING8, 33025)
PR_EMS_AB_ORGANIZATION_NAME_W = PROP_TAG( PT_MV_UNICODE, 33025)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME = PROP_TAG( PT_MV_TSTRING, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_A = PROP_TAG( PT_MV_STRING8, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_W = PROP_TAG( PT_MV_UNICODE, 33026)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33027)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 33028)
PR_EMS_AB_OUTBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OUTBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 33029)
PR_EMS_AB_OUTBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 33029)
PR_EMS_AB_OUTBOUND_SITES_O = PROP_TAG( PT_OBJECT, 33029)
PR_EMS_AB_OUTBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OWNER = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_A = PROP_TAG( PT_STRING8, 32780)
PR_EMS_AB_OWNER_W = PROP_TAG( PT_UNICODE, 32780)
PR_EMS_AB_OWNER_O = PROP_TAG( PT_OBJECT, 32780)
PR_EMS_AB_OWNER_T = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_BL = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_OWNER_BL_A = PROP_TAG( PT_STRING8, 32804)
PR_EMS_AB_OWNER_BL_W = PROP_TAG( PT_UNICODE, 32804)
PR_EMS_AB_OWNER_BL_O = PROP_TAG( PT_OBJECT, 32804)
PR_EMS_AB_OWNER_BL_T = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_P_SELECTOR = PROP_TAG( PT_BINARY, 33030)
PR_EMS_AB_P_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33031)
PR_EMS_AB_PER_MSG_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33032)
PR_EMS_AB_PER_RECIP_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33033)
PR_EMS_AB_PERIOD_REP_SYNC_TIMES = PROP_TAG( PT_BINARY, 33034)
PR_EMS_AB_PERIOD_REPL_STAGGER = PROP_TAG( PT_LONG, 33035)
PR_EMS_AB_PF_CONTACTS = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_PF_CONTACTS_A = PROP_TAG( PT_MV_STRING8, 32824)
PR_EMS_AB_PF_CONTACTS_W = PROP_TAG( PT_MV_UNICODE, 32824)
PR_EMS_AB_PF_CONTACTS_O = PROP_TAG( PT_OBJECT, 32824)
PR_EMS_AB_PF_CONTACTS_T = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_POP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33145)
PR_EMS_AB_POP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33145)
PR_EMS_AB_POP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33145)
PR_EMS_AB_POP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33143)
PR_EMS_AB_POSTAL_ADDRESS = PROP_TAG( PT_MV_BINARY, 33036)
PR_EMS_AB_PREFERRED_DELIVERY_METHOD = PROP_TAG( PT_MV_LONG, 33037)
PR_EMS_AB_PRMD = PROP_TAG( PT_TSTRING, 33038)
PR_EMS_AB_PRMD_A = PROP_TAG( PT_STRING8, 33038)
PR_EMS_AB_PRMD_W = PROP_TAG( PT_UNICODE, 33038)
PR_EMS_AB_PROXY_ADDRESSES = PROP_TAG( PT_MV_TSTRING, 32783)
PR_EMS_AB_PROXY_ADDRESSES_A = PROP_TAG( PT_MV_STRING8, 32783)
PR_EMS_AB_PROXY_ADDRESSES_W = PROP_TAG( PT_MV_UNICODE, 32783)
PR_EMS_AB_PROXY_GENERATOR_DLL = PROP_TAG( PT_TSTRING, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_A = PROP_TAG( PT_STRING8, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_W = PROP_TAG( PT_UNICODE, 33039)
PR_EMS_AB_PUBLIC_DELEGATES = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_A = PROP_TAG( PT_MV_STRING8, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_W = PROP_TAG( PT_MV_UNICODE, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_O = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_T = PROP_TAG( PT_MV_TSTRING, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_BL = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_A = PROP_TAG( PT_MV_STRING8, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_W = PROP_TAG( PT_MV_UNICODE, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_O = PROP_TAG( PT_OBJECT, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_T = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_QUOTA_NOTIFICATION_SCHEDULE = PROP_TAG( PT_BINARY, 33041)
PR_EMS_AB_QUOTA_NOTIFICATION_STYLE = PROP_TAG( PT_LONG, 33042)
PR_EMS_AB_RANGE_LOWER = PROP_TAG( PT_LONG, 33043)
PR_EMS_AB_RANGE_UPPER = PROP_TAG( PT_LONG, 33044)
PR_EMS_AB_RAS_CALLBACK_NUMBER = PROP_TAG( PT_TSTRING, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_A = PROP_TAG( PT_STRING8, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_W = PROP_TAG( PT_UNICODE, 33045)
PR_EMS_AB_RAS_PHONE_NUMBER = PROP_TAG( PT_TSTRING, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_A = PROP_TAG( PT_STRING8, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_W = PROP_TAG( PT_UNICODE, 33046)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME = PROP_TAG( PT_TSTRING, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_A = PROP_TAG( PT_STRING8, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_W = PROP_TAG( PT_UNICODE, 33047)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME = PROP_TAG( PT_TSTRING, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_A = PROP_TAG( PT_STRING8, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_W = PROP_TAG( PT_UNICODE, 33048)
PR_EMS_AB_REGISTERED_ADDRESS = PROP_TAG( PT_MV_BINARY, 33049)
PR_EMS_AB_REMOTE_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 33051)
PR_EMS_AB_REMOTE_OUT_BH_SERVER = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_A = PROP_TAG( PT_STRING8, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_W = PROP_TAG( PT_UNICODE, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_O = PROP_TAG( PT_OBJECT, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_T = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_SITE = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REMOTE_SITE_A = PROP_TAG( PT_STRING8, 33053)
PR_EMS_AB_REMOTE_SITE_W = PROP_TAG( PT_UNICODE, 33053)
PR_EMS_AB_REMOTE_SITE_O = PROP_TAG( PT_OBJECT, 33053)
PR_EMS_AB_REMOTE_SITE_T = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REPLICATION_MAIL_MSG_SIZE = PROP_TAG( PT_LONG, 33128)
PR_EMS_AB_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 33054)
PR_EMS_AB_REPLICATION_STAGGER = PROP_TAG( PT_LONG, 33055)
PR_EMS_AB_REPORT_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33056)
PR_EMS_AB_REPORT_TO_OWNER = PROP_TAG( PT_BOOLEAN, 33057)
PR_EMS_AB_REPORTS = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_A = PROP_TAG( PT_MV_STRING8, 32782)
PR_EMS_AB_REPORTS_W = PROP_TAG( PT_MV_UNICODE, 32782)
PR_EMS_AB_REPORTS_O = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_T = PROP_TAG( PT_MV_TSTRING, 32782)
PR_EMS_AB_REQ_SEQ = PROP_TAG( PT_LONG, 33058)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_A = PROP_TAG( PT_STRING8, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_W = PROP_TAG( PT_UNICODE, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_O = PROP_TAG( PT_OBJECT, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_T = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RID_SERVER = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_RID_SERVER_A = PROP_TAG( PT_STRING8, 33060)
PR_EMS_AB_RID_SERVER_W = PROP_TAG( PT_UNICODE, 33060)
PR_EMS_AB_RID_SERVER_O = PROP_TAG( PT_OBJECT, 33060)
PR_EMS_AB_RID_SERVER_T = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_ROLE_OCCUPANT = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROLE_OCCUPANT_A = PROP_TAG( PT_MV_STRING8, 33061)
PR_EMS_AB_ROLE_OCCUPANT_W = PROP_TAG( PT_MV_UNICODE, 33061)
PR_EMS_AB_ROLE_OCCUPANT_O = PROP_TAG( PT_OBJECT, 33061)
PR_EMS_AB_ROLE_OCCUPANT_T = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROUTING_LIST = PROP_TAG( PT_MV_TSTRING, 33062)
PR_EMS_AB_ROUTING_LIST_A = PROP_TAG( PT_MV_STRING8, 33062)
PR_EMS_AB_ROUTING_LIST_W = PROP_TAG( PT_MV_UNICODE, 33062)
PR_EMS_AB_RTS_CHECKPOINT_SIZE = PROP_TAG( PT_LONG, 33063)
PR_EMS_AB_RTS_RECOVERY_TIMEOUT = PROP_TAG( PT_LONG, 33064)
PR_EMS_AB_RTS_WINDOW_SIZE = PROP_TAG( PT_LONG, 33065)
PR_EMS_AB_RUNS_ON = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_RUNS_ON_A = PROP_TAG( PT_MV_STRING8, 33066)
PR_EMS_AB_RUNS_ON_W = PROP_TAG( PT_MV_UNICODE, 33066)
PR_EMS_AB_RUNS_ON_O = PROP_TAG( PT_OBJECT, 33066)
PR_EMS_AB_RUNS_ON_T = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_S_SELECTOR = PROP_TAG( PT_BINARY, 33067)
PR_EMS_AB_S_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33068)
PR_EMS_AB_SCHEMA_FLAGS = PROP_TAG( PT_LONG, 33139)
PR_EMS_AB_SCHEMA_VERSION = PROP_TAG( PT_MV_LONG, 33148)
PR_EMS_AB_SEARCH_FLAGS = PROP_TAG( PT_LONG, 33069)
PR_EMS_AB_SEARCH_GUIDE = PROP_TAG( PT_MV_BINARY, 33070)
PR_EMS_AB_SECURITY_PROTOCOL = PROP_TAG( PT_MV_BINARY, 32823)
PR_EMS_AB_SEE_ALSO = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SEE_ALSO_A = PROP_TAG( PT_MV_STRING8, 33071)
PR_EMS_AB_SEE_ALSO_W = PROP_TAG( PT_MV_UNICODE, 33071)
PR_EMS_AB_SEE_ALSO_O = PROP_TAG( PT_OBJECT, 33071)
PR_EMS_AB_SEE_ALSO_T = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SERIAL_NUMBER = PROP_TAG( PT_MV_TSTRING, 33072)
PR_EMS_AB_SERIAL_NUMBER_A = PROP_TAG( PT_MV_STRING8, 33072)
PR_EMS_AB_SERIAL_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 33072)
PR_EMS_AB_SERVICE_ACTION_FIRST = PROP_TAG( PT_LONG, 33073)
PR_EMS_AB_SERVICE_ACTION_OTHER = PROP_TAG( PT_LONG, 33074)
PR_EMS_AB_SERVICE_ACTION_SECOND = PROP_TAG( PT_LONG, 33075)
PR_EMS_AB_SERVICE_RESTART_DELAY = PROP_TAG( PT_LONG, 33076)
PR_EMS_AB_SERVICE_RESTART_MESSAGE = PROP_TAG( PT_TSTRING, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_A = PROP_TAG( PT_STRING8, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_W = PROP_TAG( PT_UNICODE, 33077)
PR_EMS_AB_SESSION_DISCONNECT_TIMER = PROP_TAG( PT_LONG, 33078)
PR_EMS_AB_SITE_AFFINITY = PROP_TAG( PT_MV_TSTRING, 33079)
PR_EMS_AB_SITE_AFFINITY_A = PROP_TAG( PT_MV_STRING8, 33079)
PR_EMS_AB_SITE_AFFINITY_W = PROP_TAG( PT_MV_UNICODE, 33079)
PR_EMS_AB_SITE_FOLDER_GUID = PROP_TAG( PT_BINARY, 33126)
PR_EMS_AB_SITE_FOLDER_SERVER = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_A = PROP_TAG( PT_STRING8, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_W = PROP_TAG( PT_UNICODE, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_O = PROP_TAG( PT_OBJECT, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_T = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_PROXY_SPACE = PROP_TAG( PT_MV_TSTRING, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_A = PROP_TAG( PT_MV_STRING8, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_W = PROP_TAG( PT_MV_UNICODE, 33080)
PR_EMS_AB_SPACE_LAST_COMPUTED = PROP_TAG( PT_SYSTIME, 33081)
PR_EMS_AB_STREET_ADDRESS = PROP_TAG( PT_TSTRING, 33082)
PR_EMS_AB_STREET_ADDRESS_A = PROP_TAG( PT_STRING8, 33082)
PR_EMS_AB_STREET_ADDRESS_W = PROP_TAG( PT_UNICODE, 33082)
PR_EMS_AB_SUB_REFS = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_REFS_A = PROP_TAG( PT_MV_STRING8, 33083)
PR_EMS_AB_SUB_REFS_W = PROP_TAG( PT_MV_UNICODE, 33083)
PR_EMS_AB_SUB_REFS_O = PROP_TAG( PT_OBJECT, 33083)
PR_EMS_AB_SUB_REFS_T = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_SITE = PROP_TAG( PT_TSTRING, 33147)
PR_EMS_AB_SUB_SITE_A = PROP_TAG( PT_STRING8, 33147)
PR_EMS_AB_SUB_SITE_W = PROP_TAG( PT_UNICODE, 33147)
PR_EMS_AB_SUBMISSION_CONT_LENGTH = PROP_TAG( PT_LONG, 33084)
PR_EMS_AB_SUPPORTED_APPLICATION_CONTEXT = PROP_TAG( PT_MV_BINARY, 33085)
PR_EMS_AB_SUPPORTING_STACK = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_A = PROP_TAG( PT_MV_STRING8, 33086)
PR_EMS_AB_SUPPORTING_STACK_W = PROP_TAG( PT_MV_UNICODE, 33086)
PR_EMS_AB_SUPPORTING_STACK_O = PROP_TAG( PT_OBJECT, 33086)
PR_EMS_AB_SUPPORTING_STACK_T = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_BL = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_A = PROP_TAG( PT_MV_STRING8, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_W = PROP_TAG( PT_MV_UNICODE, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_O = PROP_TAG( PT_OBJECT, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_T = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_T_SELECTOR = PROP_TAG( PT_BINARY, 33088)
PR_EMS_AB_T_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33089)
PR_EMS_AB_TARGET_ADDRESS = PROP_TAG( PT_TSTRING, 32785)
PR_EMS_AB_TARGET_ADDRESS_A = PROP_TAG( PT_STRING8, 32785)
PR_EMS_AB_TARGET_ADDRESS_W = PROP_TAG( PT_UNICODE, 32785)
PR_EMS_AB_TARGET_MTAS = PROP_TAG( PT_MV_TSTRING, 33090)
PR_EMS_AB_TARGET_MTAS_A = PROP_TAG( PT_MV_STRING8, 33090)
PR_EMS_AB_TARGET_MTAS_W = PROP_TAG( PT_MV_UNICODE, 33090)
PR_EMS_AB_TELEPHONE_NUMBER = PROP_TAG( PT_MV_TSTRING, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32786)
PR_EMS_AB_TELETEX_TERMINAL_IDENTIFIER = PROP_TAG( PT_MV_BINARY, 33091)
PR_EMS_AB_TEMP_ASSOC_THRESHOLD = PROP_TAG( PT_LONG, 33092)
PR_EMS_AB_TOMBSTONE_LIFETIME = PROP_TAG( PT_LONG, 33093)
PR_EMS_AB_TRACKING_LOG_PATH_NAME = PROP_TAG( PT_TSTRING, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_A = PROP_TAG( PT_STRING8, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_W = PROP_TAG( PT_UNICODE, 33094)
PR_EMS_AB_TRANS_RETRY_MINS = PROP_TAG( PT_LONG, 33095)
PR_EMS_AB_TRANS_TIMEOUT_MINS = PROP_TAG( PT_LONG, 33096)
PR_EMS_AB_TRANSFER_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33097)
PR_EMS_AB_TRANSFER_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33098)
PR_EMS_AB_TRANSFER_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33099)
PR_EMS_AB_TRANSFER_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33100)
PR_EMS_AB_TRANSLATION_TABLE_USED = PROP_TAG( PT_LONG, 33101)
PR_EMS_AB_TRANSPORT_EXPEDITED_DATA = PROP_TAG( PT_BOOLEAN, 33102)
PR_EMS_AB_TRUST_LEVEL = PROP_TAG( PT_LONG, 33103)
PR_EMS_AB_TURN_REQUEST_THRESHOLD = PROP_TAG( PT_LONG, 33104)
PR_EMS_AB_TWO_WAY_ALTERNATE_FACILITY = PROP_TAG( PT_BOOLEAN, 33105)
PR_EMS_AB_UNAUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_USE_SERVER_VALUES = PROP_TAG( PT_BOOLEAN, 33150)
PR_EMS_AB_USER_PASSWORD = PROP_TAG( PT_MV_BINARY, 33107)
PR_EMS_AB_USN_CHANGED = PROP_TAG( PT_LONG, 32809)
PR_EMS_AB_USN_CREATED = PROP_TAG( PT_LONG, 33108)
PR_EMS_AB_USN_DSA_LAST_OBJ_REMOVED = PROP_TAG( PT_LONG, 33109)
PR_EMS_AB_USN_INTERSITE = PROP_TAG( PT_LONG, 33146)
PR_EMS_AB_USN_LAST_OBJ_REM = PROP_TAG( PT_LONG, 33110)
PR_EMS_AB_USN_SOURCE = PROP_TAG( PT_LONG, 33111)
PR_EMS_AB_WWW_HOME_PAGE = PROP_TAG( PT_TSTRING, 33141)
PR_EMS_AB_WWW_HOME_PAGE_A = PROP_TAG( PT_STRING8, 33141)
PR_EMS_AB_WWW_HOME_PAGE_W = PROP_TAG( PT_UNICODE, 33141)
PR_EMS_AB_X121_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33112)
PR_EMS_AB_X121_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33112)
PR_EMS_AB_X121_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33112)
PR_EMS_AB_X25_CALL_USER_DATA_INCOMING = PROP_TAG( PT_BINARY, 33113)
PR_EMS_AB_X25_CALL_USER_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33114)
PR_EMS_AB_X25_FACILITIES_DATA_INCOMING = PROP_TAG( PT_BINARY, 33115)
PR_EMS_AB_X25_FACILITIES_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33116)
PR_EMS_AB_X25_LEASED_LINE_PORT = PROP_TAG( PT_BINARY, 33117)
PR_EMS_AB_X25_LEASED_OR_SWITCHED = PROP_TAG( PT_BOOLEAN, 33118)
PR_EMS_AB_X25_REMOTE_MTA_PHONE = PROP_TAG( PT_TSTRING, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_A = PROP_TAG( PT_STRING8, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_W = PROP_TAG( PT_UNICODE, 33119)
PR_EMS_AB_X400_ATTACHMENT_TYPE = PROP_TAG( PT_BINARY, 33120)
PR_EMS_AB_X400_SELECTOR_SYNTAX = PROP_TAG( PT_LONG, 33121)
PR_EMS_AB_X500_ACCESS_CONTROL_LIST = PROP_TAG( PT_BINARY, 33122)
PR_EMS_AB_XMIT_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33123)
PR_EMS_AB_XMIT_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33124)
PR_EMS_AB_XMIT_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33125)
| 73.588652 | 81 | 0.682697 | # Converted "manually" from EMSABTAG.H
from mapitags import PT_UNSPECIFIED, PT_NULL, PT_I2, PT_LONG, PT_R4, \
PT_DOUBLE, PT_CURRENCY, PT_APPTIME, PT_ERROR, \
PT_BOOLEAN, PT_OBJECT, PT_I8, PT_STRING8, PT_UNICODE, \
PT_SYSTIME, PT_CLSID, PT_BINARY, PT_SHORT, PT_I4, \
PT_FLOAT, PT_DOUBLE, PT_LONGLONG, PT_TSTRING, \
PT_MV_I2, PT_MV_LONG, PT_MV_R4, PT_MV_DOUBLE, \
PT_MV_CURRENCY, PT_MV_APPTIME, PT_MV_SYSTIME, \
PT_MV_STRING8, PT_MV_BINARY, PT_MV_UNICODE, \
PT_MV_CLSID, PT_MV_I8, PT_MV_SHORT, PT_MV_I4, \
PT_MV_FLOAT, PT_MV_R8, PT_MV_LONGLONG, PT_MV_TSTRING, \
PROP_TAG
AB_SHOW_PHANTOMS = 2
AB_SHOW_OTHERS = 4
# Flags for ulFlag on ResolveNames
EMS_AB_ADDRESS_LOOKUP = 1
# Constructed, but externally visible.
PR_EMS_AB_SERVER = PROP_TAG( PT_TSTRING, 65534)
PR_EMS_AB_SERVER_A = PROP_TAG( PT_STRING8, 65534)
PR_EMS_AB_SERVER_W = PROP_TAG( PT_UNICODE, 65534)
PR_EMS_AB_CONTAINERID = PROP_TAG( PT_LONG, 65533)
PR_EMS_AB_DOS_ENTRYID = PR_EMS_AB_CONTAINERID
PR_EMS_AB_PARENT_ENTRYID = PROP_TAG( PT_BINARY, 65532)
PR_EMS_AB_IS_MASTER = PROP_TAG(PT_BOOLEAN, 65531)
PR_EMS_AB_OBJECT_OID = PROP_TAG(PT_BINARY, 65530)
PR_EMS_AB_HIERARCHY_PATH = PROP_TAG(PT_TSTRING, 65529)
PR_EMS_AB_HIERARCHY_PATH_A = PROP_TAG(PT_STRING8, 65529)
PR_EMS_AB_HIERARCHY_PATH_W = PROP_TAG(PT_UNICODE, 65529)
PR_EMS_AB_CHILD_RDNS = PROP_TAG(PT_MV_STRING8, 65528)
MIN_EMS_AB_CONSTRUCTED_PROP_ID = 65528
PR_EMS_AB_OTHER_RECIPS = PROP_TAG(PT_OBJECT, 61440)
# Prop tags defined in the schema.
PR_EMS_AB_DISPLAY_NAME_PRINTABLE = PROP_TAG(PT_TSTRING, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_A = PROP_TAG(PT_STRING8, 14847)
PR_EMS_AB_DISPLAY_NAME_PRINTABLE_W = PROP_TAG(PT_UNICODE, 14847)
PR_EMS_AB_ACCESS_CATEGORY = PROP_TAG( PT_LONG, 32836)
PR_EMS_AB_ACTIVATION_SCHEDULE = PROP_TAG( PT_BINARY, 32837)
PR_EMS_AB_ACTIVATION_STYLE = PROP_TAG( PT_LONG, 32838)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 32791)
PR_EMS_AB_ADDRESS_ENTRY_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 32839)
PR_EMS_AB_ADDRESS_SYNTAX = PROP_TAG( PT_BINARY, 32792)
PR_EMS_AB_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32840)
PR_EMS_AB_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32840)
PR_EMS_AB_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32840)
PR_EMS_AB_ADMD = PROP_TAG( PT_TSTRING, 32841)
PR_EMS_AB_ADMD_A = PROP_TAG( PT_STRING8, 32841)
PR_EMS_AB_ADMD_W = PROP_TAG( PT_UNICODE, 32841)
PR_EMS_AB_ADMIN_DESCRIPTION = PROP_TAG( PT_TSTRING, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_A = PROP_TAG( PT_STRING8, 32842)
PR_EMS_AB_ADMIN_DESCRIPTION_W = PROP_TAG( PT_UNICODE, 32842)
PR_EMS_AB_ADMIN_DISPLAY_NAME = PROP_TAG( PT_TSTRING, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_A = PROP_TAG( PT_STRING8, 32843)
PR_EMS_AB_ADMIN_DISPLAY_NAME_W = PROP_TAG( PT_UNICODE, 32843)
PR_EMS_AB_ADMIN_EXTENSION_DLL = PROP_TAG( PT_TSTRING, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_A = PROP_TAG( PT_STRING8, 32844)
PR_EMS_AB_ADMIN_EXTENSION_DLL_W = PROP_TAG( PT_UNICODE, 32844)
PR_EMS_AB_ALIASED_OBJECT_NAME = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_A = PROP_TAG( PT_STRING8, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_W = PROP_TAG( PT_UNICODE, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_O = PROP_TAG( PT_OBJECT, 32845)
PR_EMS_AB_ALIASED_OBJECT_NAME_T = PROP_TAG( PT_TSTRING, 32845)
PR_EMS_AB_ALT_RECIPIENT = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_A = PROP_TAG( PT_STRING8, 32846)
PR_EMS_AB_ALT_RECIPIENT_W = PROP_TAG( PT_UNICODE, 32846)
PR_EMS_AB_ALT_RECIPIENT_O = PROP_TAG( PT_OBJECT, 32846)
PR_EMS_AB_ALT_RECIPIENT_T = PROP_TAG( PT_TSTRING, 32846)
PR_EMS_AB_ALT_RECIPIENT_BL = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_A = PROP_TAG( PT_MV_STRING8, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_W = PROP_TAG( PT_MV_UNICODE, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_O = PROP_TAG( PT_OBJECT, 32847)
PR_EMS_AB_ALT_RECIPIENT_BL_T = PROP_TAG( PT_MV_TSTRING, 32847)
PR_EMS_AB_ANCESTOR_ID = PROP_TAG( PT_BINARY, 32848)
PR_EMS_AB_ASSOC_NT_ACCOUNT = PROP_TAG( PT_BINARY, 32807)
PR_EMS_AB_ASSOC_REMOTE_DXA = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_A = PROP_TAG( PT_MV_STRING8, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_W = PROP_TAG( PT_MV_UNICODE, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_O = PROP_TAG( PT_OBJECT, 32849)
PR_EMS_AB_ASSOC_REMOTE_DXA_T = PROP_TAG( PT_MV_TSTRING, 32849)
PR_EMS_AB_ASSOCIATION_LIFETIME = PROP_TAG( PT_LONG, 32850)
PR_EMS_AB_AUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 32851)
PR_EMS_AB_AUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 32851)
PR_EMS_AB_AUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 32851)
PR_EMS_AB_AUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 32851)
PR_EMS_AB_AUTHORITY_REVOCATION_LIST = PROP_TAG( PT_MV_BINARY, 32806)
PR_EMS_AB_AUTHORIZED_DOMAIN = PROP_TAG( PT_TSTRING, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_A = PROP_TAG( PT_STRING8, 32852)
PR_EMS_AB_AUTHORIZED_DOMAIN_W = PROP_TAG( PT_UNICODE, 32852)
PR_EMS_AB_AUTHORIZED_PASSWORD = PROP_TAG( PT_BINARY, 32853)
PR_EMS_AB_AUTHORIZED_USER = PROP_TAG( PT_TSTRING, 32854)
PR_EMS_AB_AUTHORIZED_USER_A = PROP_TAG( PT_STRING8, 32854)
PR_EMS_AB_AUTHORIZED_USER_W = PROP_TAG( PT_UNICODE, 32854)
PR_EMS_AB_AUTOREPLY = PROP_TAG( PT_BOOLEAN, 32779)
PR_EMS_AB_AUTOREPLY_MESSAGE = PROP_TAG( PT_TSTRING, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_A = PROP_TAG( PT_STRING8, 32778)
PR_EMS_AB_AUTOREPLY_MESSAGE_W = PROP_TAG( PT_UNICODE, 32778)
PR_EMS_AB_AUTOREPLY_SUBJECT = PROP_TAG( PT_TSTRING, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_A = PROP_TAG( PT_STRING8, 32830)
PR_EMS_AB_AUTOREPLY_SUBJECT_W = PROP_TAG( PT_UNICODE, 32830)
PR_EMS_AB_BRIDGEHEAD_SERVERS = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_A = PROP_TAG( PT_MV_STRING8, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_O = PROP_TAG( PT_OBJECT, 33140)
PR_EMS_AB_BRIDGEHEAD_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 33140)
PR_EMS_AB_BUSINESS_CATEGORY = PROP_TAG( PT_MV_TSTRING, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_A = PROP_TAG( PT_MV_STRING8, 32855)
PR_EMS_AB_BUSINESS_CATEGORY_W = PROP_TAG( PT_MV_UNICODE, 32855)
PR_EMS_AB_BUSINESS_ROLES = PROP_TAG( PT_BINARY, 32803)
PR_EMS_AB_CA_CERTIFICATE = PROP_TAG( PT_MV_BINARY, 32771)
PR_EMS_AB_CAN_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32856)
PR_EMS_AB_CAN_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32856)
PR_EMS_AB_CAN_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32856)
PR_EMS_AB_CAN_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32856)
PR_EMS_AB_CAN_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32857)
PR_EMS_AB_CAN_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32857)
PR_EMS_AB_CAN_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32858)
PR_EMS_AB_CAN_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32859)
PR_EMS_AB_CAN_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32859)
PR_EMS_AB_CAN_NOT_CREATE_PF = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_A = PROP_TAG( PT_MV_STRING8, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_W = PROP_TAG( PT_MV_UNICODE, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_O = PROP_TAG( PT_OBJECT, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_T = PROP_TAG( PT_MV_TSTRING, 32860)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_A = PROP_TAG( PT_MV_STRING8, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_W = PROP_TAG( PT_MV_UNICODE, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_O = PROP_TAG( PT_OBJECT, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_BL_T = PROP_TAG( PT_MV_TSTRING, 32861)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_A = PROP_TAG( PT_MV_STRING8, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_W = PROP_TAG( PT_MV_UNICODE, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_O = PROP_TAG( PT_OBJECT, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_T = PROP_TAG( PT_MV_TSTRING, 32862)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_A = PROP_TAG( PT_MV_STRING8, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_W = PROP_TAG( PT_MV_UNICODE, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_O = PROP_TAG( PT_OBJECT, 32863)
PR_EMS_AB_CAN_NOT_CREATE_PF_DL_BL_T = PROP_TAG( PT_MV_TSTRING, 32863)
PR_EMS_AB_CAN_PRESERVE_DNS = PROP_TAG( PT_BOOLEAN, 32864)
PR_EMS_AB_CERTIFICATE_REVOCATION_LIST = PROP_TAG( PT_BINARY, 32790)
PR_EMS_AB_CLOCK_ALERT_OFFSET = PROP_TAG( PT_LONG, 32865)
PR_EMS_AB_CLOCK_ALERT_REPAIR = PROP_TAG( PT_BOOLEAN, 32866)
PR_EMS_AB_CLOCK_WARNING_OFFSET = PROP_TAG( PT_LONG, 32867)
PR_EMS_AB_CLOCK_WARNING_REPAIR = PROP_TAG( PT_BOOLEAN, 32868)
PR_EMS_AB_COMPUTER_NAME = PROP_TAG( PT_TSTRING, 32869)
PR_EMS_AB_COMPUTER_NAME_A = PROP_TAG( PT_STRING8, 32869)
PR_EMS_AB_COMPUTER_NAME_W = PROP_TAG( PT_UNICODE, 32869)
PR_EMS_AB_CONNECTED_DOMAINS = PROP_TAG( PT_MV_TSTRING, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_A = PROP_TAG( PT_MV_STRING8, 32870)
PR_EMS_AB_CONNECTED_DOMAINS_W = PROP_TAG( PT_MV_UNICODE, 32870)
PR_EMS_AB_CONTAINER_INFO = PROP_TAG( PT_LONG, 32871)
PR_EMS_AB_COST = PROP_TAG( PT_LONG, 32872)
PR_EMS_AB_COUNTRY_NAME = PROP_TAG( PT_TSTRING, 32873)
PR_EMS_AB_COUNTRY_NAME_A = PROP_TAG( PT_STRING8, 32873)
PR_EMS_AB_COUNTRY_NAME_W = PROP_TAG( PT_UNICODE, 32873)
PR_EMS_AB_CROSS_CERTIFICATE_PAIR = PROP_TAG( PT_MV_BINARY, 32805)
PR_EMS_AB_DELIV_CONT_LENGTH = PROP_TAG( PT_LONG, 32874)
PR_EMS_AB_DELIV_EITS = PROP_TAG( PT_MV_BINARY, 32875)
PR_EMS_AB_DELIV_EXT_CONT_TYPES = PROP_TAG( PT_MV_BINARY, 32876)
PR_EMS_AB_DELIVER_AND_REDIRECT = PROP_TAG( PT_BOOLEAN, 32877)
PR_EMS_AB_DELIVERY_MECHANISM = PROP_TAG( PT_LONG, 32878)
PR_EMS_AB_DESCRIPTION = PROP_TAG( PT_MV_TSTRING, 32879)
PR_EMS_AB_DESCRIPTION_A = PROP_TAG( PT_MV_STRING8, 32879)
PR_EMS_AB_DESCRIPTION_W = PROP_TAG( PT_MV_UNICODE, 32879)
PR_EMS_AB_DESTINATION_INDICATOR = PROP_TAG( PT_MV_TSTRING, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_A = PROP_TAG( PT_MV_STRING8, 32880)
PR_EMS_AB_DESTINATION_INDICATOR_W = PROP_TAG( PT_MV_UNICODE, 32880)
PR_EMS_AB_DIAGNOSTIC_REG_KEY = PROP_TAG( PT_TSTRING, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_A = PROP_TAG( PT_STRING8, 32881)
PR_EMS_AB_DIAGNOSTIC_REG_KEY_W = PROP_TAG( PT_UNICODE, 32881)
PR_EMS_AB_DISPLAY_NAME_OVERRIDE = PROP_TAG( PT_BOOLEAN, 32769)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32882)
PR_EMS_AB_DL_MEM_REJECT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32882)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_A = PROP_TAG( PT_MV_STRING8, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_W = PROP_TAG( PT_MV_UNICODE, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_O = PROP_TAG( PT_OBJECT, 32883)
PR_EMS_AB_DL_MEM_SUBMIT_PERMS_BL_T = PROP_TAG( PT_MV_TSTRING, 32883)
PR_EMS_AB_DL_MEMBER_RULE = PROP_TAG( PT_MV_BINARY, 32884)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_A = PROP_TAG( PT_STRING8, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_W = PROP_TAG( PT_UNICODE, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_O = PROP_TAG( PT_OBJECT, 32885)
PR_EMS_AB_DOMAIN_DEF_ALT_RECIP_T = PROP_TAG( PT_TSTRING, 32885)
PR_EMS_AB_DOMAIN_NAME = PROP_TAG( PT_TSTRING, 32886)
PR_EMS_AB_DOMAIN_NAME_A = PROP_TAG( PT_STRING8, 32886)
PR_EMS_AB_DOMAIN_NAME_W = PROP_TAG( PT_UNICODE, 32886)
PR_EMS_AB_DSA_SIGNATURE = PROP_TAG( PT_BINARY, 32887)
PR_EMS_AB_DXA_ADMIN_COPY = PROP_TAG( PT_BOOLEAN, 32888)
PR_EMS_AB_DXA_ADMIN_FORWARD = PROP_TAG( PT_BOOLEAN, 32889)
PR_EMS_AB_DXA_ADMIN_UPDATE = PROP_TAG( PT_LONG, 32890)
PR_EMS_AB_DXA_APPEND_REQCN = PROP_TAG( PT_BOOLEAN, 32891)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32892)
PR_EMS_AB_DXA_CONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32892)
PR_EMS_AB_DXA_CONF_REQ_TIME = PROP_TAG( PT_SYSTIME, 32893)
PR_EMS_AB_DXA_CONF_SEQ = PROP_TAG( PT_TSTRING, 32894)
PR_EMS_AB_DXA_CONF_SEQ_A = PROP_TAG( PT_STRING8, 32894)
PR_EMS_AB_DXA_CONF_SEQ_W = PROP_TAG( PT_UNICODE, 32894)
PR_EMS_AB_DXA_CONF_SEQ_USN = PROP_TAG( PT_LONG, 32895)
PR_EMS_AB_DXA_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32896)
PR_EMS_AB_DXA_EXPORT_NOW = PROP_TAG( PT_BOOLEAN, 32897)
PR_EMS_AB_DXA_FLAGS = PROP_TAG( PT_LONG, 32898)
PR_EMS_AB_DXA_IMP_SEQ = PROP_TAG( PT_TSTRING, 32899)
PR_EMS_AB_DXA_IMP_SEQ_A = PROP_TAG( PT_STRING8, 32899)
PR_EMS_AB_DXA_IMP_SEQ_W = PROP_TAG( PT_UNICODE, 32899)
PR_EMS_AB_DXA_IMP_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32900)
PR_EMS_AB_DXA_IMP_SEQ_USN = PROP_TAG( PT_LONG, 32901)
PR_EMS_AB_DXA_IMPORT_NOW = PROP_TAG( PT_BOOLEAN, 32902)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32903)
PR_EMS_AB_DXA_IN_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32903)
PR_EMS_AB_DXA_LOCAL_ADMIN = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_A = PROP_TAG( PT_STRING8, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_W = PROP_TAG( PT_UNICODE, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_O = PROP_TAG( PT_OBJECT, 32904)
PR_EMS_AB_DXA_LOCAL_ADMIN_T = PROP_TAG( PT_TSTRING, 32904)
PR_EMS_AB_DXA_LOGGING_LEVEL = PROP_TAG( PT_LONG, 32905)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE = PROP_TAG( PT_TSTRING, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_A = PROP_TAG( PT_STRING8, 32906)
PR_EMS_AB_DXA_NATIVE_ADDRESS_TYPE_W = PROP_TAG( PT_UNICODE, 32906)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP = PROP_TAG( PT_MV_TSTRING, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_A = PROP_TAG( PT_MV_STRING8, 32907)
PR_EMS_AB_DXA_OUT_TEMPLATE_MAP_W = PROP_TAG( PT_MV_UNICODE, 32907)
PR_EMS_AB_DXA_PASSWORD = PROP_TAG( PT_TSTRING, 32908)
PR_EMS_AB_DXA_PASSWORD_A = PROP_TAG( PT_STRING8, 32908)
PR_EMS_AB_DXA_PASSWORD_W = PROP_TAG( PT_UNICODE, 32908)
PR_EMS_AB_DXA_PREV_EXCHANGE_OPTIONS = PROP_TAG( PT_LONG, 32909)
PR_EMS_AB_DXA_PREV_EXPORT_NATIVE_ONLY = PROP_TAG( PT_BOOLEAN, 32910)
PR_EMS_AB_DXA_PREV_IN_EXCHANGE_SENSITIVITY = PROP_TAG( PT_LONG, 32911)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_A = PROP_TAG( PT_STRING8, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_W = PROP_TAG( PT_UNICODE, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_O = PROP_TAG( PT_OBJECT, 32912)
PR_EMS_AB_DXA_PREV_REMOTE_ENTRIES_T = PROP_TAG( PT_TSTRING, 32912)
PR_EMS_AB_DXA_PREV_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 32913)
PR_EMS_AB_DXA_PREV_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32914)
PR_EMS_AB_DXA_PREV_TYPES = PROP_TAG( PT_LONG, 32915)
PR_EMS_AB_DXA_RECIPIENT_CP = PROP_TAG( PT_TSTRING, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_A = PROP_TAG( PT_STRING8, 32916)
PR_EMS_AB_DXA_RECIPIENT_CP_W = PROP_TAG( PT_UNICODE, 32916)
PR_EMS_AB_DXA_REMOTE_CLIENT = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_A = PROP_TAG( PT_STRING8, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_W = PROP_TAG( PT_UNICODE, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_O = PROP_TAG( PT_OBJECT, 32917)
PR_EMS_AB_DXA_REMOTE_CLIENT_T = PROP_TAG( PT_TSTRING, 32917)
PR_EMS_AB_DXA_REQ_SEQ = PROP_TAG( PT_TSTRING, 32918)
PR_EMS_AB_DXA_REQ_SEQ_A = PROP_TAG( PT_STRING8, 32918)
PR_EMS_AB_DXA_REQ_SEQ_W = PROP_TAG( PT_UNICODE, 32918)
PR_EMS_AB_DXA_REQ_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32919)
PR_EMS_AB_DXA_REQ_SEQ_USN = PROP_TAG( PT_LONG, 32920)
PR_EMS_AB_DXA_REQNAME = PROP_TAG( PT_TSTRING, 32921)
PR_EMS_AB_DXA_REQNAME_A = PROP_TAG( PT_STRING8, 32921)
PR_EMS_AB_DXA_REQNAME_W = PROP_TAG( PT_UNICODE, 32921)
PR_EMS_AB_DXA_SVR_SEQ = PROP_TAG( PT_TSTRING, 32922)
PR_EMS_AB_DXA_SVR_SEQ_A = PROP_TAG( PT_STRING8, 32922)
PR_EMS_AB_DXA_SVR_SEQ_W = PROP_TAG( PT_UNICODE, 32922)
PR_EMS_AB_DXA_SVR_SEQ_TIME = PROP_TAG( PT_SYSTIME, 32923)
PR_EMS_AB_DXA_SVR_SEQ_USN = PROP_TAG( PT_LONG, 32924)
PR_EMS_AB_DXA_TASK = PROP_TAG( PT_LONG, 32925)
PR_EMS_AB_DXA_TEMPLATE_OPTIONS = PROP_TAG( PT_LONG, 32926)
PR_EMS_AB_DXA_TEMPLATE_TIMESTAMP = PROP_TAG( PT_SYSTIME, 32927)
PR_EMS_AB_DXA_TYPES = PROP_TAG( PT_LONG, 32928)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_A = PROP_TAG( PT_MV_STRING8, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_W = PROP_TAG( PT_MV_UNICODE, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_O = PROP_TAG( PT_OBJECT, 32929)
PR_EMS_AB_DXA_UNCONF_CONTAINER_LIST_T = PROP_TAG( PT_MV_TSTRING, 32929)
PR_EMS_AB_ENABLED_PROTOCOLS = PROP_TAG( PT_LONG, 33151)
PR_EMS_AB_ENCAPSULATION_METHOD = PROP_TAG( PT_LONG, 32930)
PR_EMS_AB_ENCRYPT = PROP_TAG( PT_BOOLEAN, 32931)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA = PROP_TAG( PT_MV_TSTRING, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_A = PROP_TAG( PT_MV_STRING8, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_NA_W = PROP_TAG( PT_MV_UNICODE, 32832)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER = PROP_TAG( PT_MV_TSTRING, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_A = PROP_TAG( PT_MV_STRING8, 32833)
PR_EMS_AB_ENCRYPT_ALG_LIST_OTHER_W = PROP_TAG( PT_MV_UNICODE, 32833)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA = PROP_TAG( PT_TSTRING, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_A = PROP_TAG( PT_STRING8, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_NA_W = PROP_TAG( PT_UNICODE, 32835)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER = PROP_TAG( PT_TSTRING, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_A = PROP_TAG( PT_STRING8, 32829)
PR_EMS_AB_ENCRYPT_ALG_SELECTED_OTHER_W = PROP_TAG( PT_UNICODE, 32829)
PR_EMS_AB_EXPAND_DLS_LOCALLY = PROP_TAG( PT_BOOLEAN, 32932)
PR_EMS_AB_EXPIRATION_TIME = PROP_TAG( PT_SYSTIME, 32808)
PR_EMS_AB_EXPORT_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_O = PROP_TAG( PT_OBJECT, 32933)
PR_EMS_AB_EXPORT_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 32933)
PR_EMS_AB_EXPORT_CUSTOM_RECIPIENTS = PROP_TAG( PT_BOOLEAN, 32934)
PR_EMS_AB_EXTENDED_CHARS_ALLOWED = PROP_TAG( PT_BOOLEAN, 32935)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1 = PROP_TAG( PT_TSTRING, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_A = PROP_TAG( PT_STRING8, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_1_W = PROP_TAG( PT_UNICODE, 32813)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10 = PROP_TAG( PT_TSTRING, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_A = PROP_TAG( PT_STRING8, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_10_W = PROP_TAG( PT_UNICODE, 32822)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2 = PROP_TAG( PT_TSTRING, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_A = PROP_TAG( PT_STRING8, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_2_W = PROP_TAG( PT_UNICODE, 32814)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3 = PROP_TAG( PT_TSTRING, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_A = PROP_TAG( PT_STRING8, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_3_W = PROP_TAG( PT_UNICODE, 32815)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4 = PROP_TAG( PT_TSTRING, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_A = PROP_TAG( PT_STRING8, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_4_W = PROP_TAG( PT_UNICODE, 32816)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5 = PROP_TAG( PT_TSTRING, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_A = PROP_TAG( PT_STRING8, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_5_W = PROP_TAG( PT_UNICODE, 32817)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6 = PROP_TAG( PT_TSTRING, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_A = PROP_TAG( PT_STRING8, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_6_W = PROP_TAG( PT_UNICODE, 32818)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7 = PROP_TAG( PT_TSTRING, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_A = PROP_TAG( PT_STRING8, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_7_W = PROP_TAG( PT_UNICODE, 32819)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8 = PROP_TAG( PT_TSTRING, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_A = PROP_TAG( PT_STRING8, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_8_W = PROP_TAG( PT_UNICODE, 32820)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9 = PROP_TAG( PT_TSTRING, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_A = PROP_TAG( PT_STRING8, 32821)
PR_EMS_AB_EXTENSION_ATTRIBUTE_9_W = PROP_TAG( PT_UNICODE, 32821)
PR_EMS_AB_EXTENSION_DATA = PROP_TAG( PT_MV_BINARY, 32936)
PR_EMS_AB_EXTENSION_NAME = PROP_TAG( PT_MV_TSTRING, 32937)
PR_EMS_AB_EXTENSION_NAME_A = PROP_TAG( PT_MV_STRING8, 32937)
PR_EMS_AB_EXTENSION_NAME_W = PROP_TAG( PT_MV_UNICODE, 32937)
PR_EMS_AB_EXTENSION_NAME_INHERITED = PROP_TAG( PT_MV_TSTRING, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_A = PROP_TAG( PT_MV_STRING8, 32938)
PR_EMS_AB_EXTENSION_NAME_INHERITED_W = PROP_TAG( PT_MV_UNICODE, 32938)
PR_EMS_AB_FACSIMILE_TELEPHONE_NUMBER = PROP_TAG( PT_MV_BINARY, 32939)
PR_EMS_AB_FILE_VERSION = PROP_TAG( PT_BINARY, 32940)
PR_EMS_AB_FILTER_LOCAL_ADDRESSES = PROP_TAG( PT_BOOLEAN, 32941)
PR_EMS_AB_FOLDER_PATHNAME = PROP_TAG( PT_TSTRING, 32772)
PR_EMS_AB_FOLDER_PATHNAME_A = PROP_TAG( PT_STRING8, 32772)
PR_EMS_AB_FOLDER_PATHNAME_W = PROP_TAG( PT_UNICODE, 32772)
PR_EMS_AB_FOLDERS_CONTAINER = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_A = PROP_TAG( PT_STRING8, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_W = PROP_TAG( PT_UNICODE, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_O = PROP_TAG( PT_OBJECT, 32942)
PR_EMS_AB_FOLDERS_CONTAINER_T = PROP_TAG( PT_TSTRING, 32942)
PR_EMS_AB_GARBAGE_COLL_PERIOD = PROP_TAG( PT_LONG, 32943)
PR_EMS_AB_GATEWAY_LOCAL_CRED = PROP_TAG( PT_TSTRING, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 32944)
PR_EMS_AB_GATEWAY_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 32944)
PR_EMS_AB_GATEWAY_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 32945)
PR_EMS_AB_GATEWAY_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 32945)
PR_EMS_AB_GATEWAY_PROXY = PROP_TAG( PT_MV_TSTRING, 32946)
PR_EMS_AB_GATEWAY_PROXY_A = PROP_TAG( PT_MV_STRING8, 32946)
PR_EMS_AB_GATEWAY_PROXY_W = PROP_TAG( PT_MV_UNICODE, 32946)
PR_EMS_AB_GATEWAY_ROUTING_TREE = PROP_TAG( PT_BINARY, 32947)
PR_EMS_AB_GWART_LAST_MODIFIED = PROP_TAG( PT_SYSTIME, 32948)
PR_EMS_AB_HAS_FULL_REPLICA_NCS = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_A = PROP_TAG( PT_MV_STRING8, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_W = PROP_TAG( PT_MV_UNICODE, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_O = PROP_TAG( PT_OBJECT, 32949)
PR_EMS_AB_HAS_FULL_REPLICA_NCS_T = PROP_TAG( PT_MV_TSTRING, 32949)
PR_EMS_AB_HAS_MASTER_NCS = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HAS_MASTER_NCS_A = PROP_TAG( PT_MV_STRING8, 32950)
PR_EMS_AB_HAS_MASTER_NCS_W = PROP_TAG( PT_MV_UNICODE, 32950)
PR_EMS_AB_HAS_MASTER_NCS_O = PROP_TAG( PT_OBJECT, 32950)
PR_EMS_AB_HAS_MASTER_NCS_T = PROP_TAG( PT_MV_TSTRING, 32950)
PR_EMS_AB_HELP_DATA16 = PROP_TAG( PT_BINARY, 32826)
PR_EMS_AB_HELP_DATA32 = PROP_TAG( PT_BINARY, 32784)
PR_EMS_AB_HELP_FILE_NAME = PROP_TAG( PT_TSTRING, 32827)
PR_EMS_AB_HELP_FILE_NAME_A = PROP_TAG( PT_STRING8, 32827)
PR_EMS_AB_HELP_FILE_NAME_W = PROP_TAG( PT_UNICODE, 32827)
PR_EMS_AB_HEURISTICS = PROP_TAG( PT_LONG, 32951)
PR_EMS_AB_HIDE_DL_MEMBERSHIP = PROP_TAG( PT_BOOLEAN, 32952)
PR_EMS_AB_HIDE_FROM_ADDRESS_BOOK = PROP_TAG( PT_BOOLEAN, 32953)
PR_EMS_AB_HOME_MDB = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_A = PROP_TAG( PT_STRING8, 32774)
PR_EMS_AB_HOME_MDB_W = PROP_TAG( PT_UNICODE, 32774)
PR_EMS_AB_HOME_MDB_O = PROP_TAG( PT_OBJECT, 32774)
PR_EMS_AB_HOME_MDB_T = PROP_TAG( PT_TSTRING, 32774)
PR_EMS_AB_HOME_MDB_BL = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MDB_BL_A = PROP_TAG( PT_MV_STRING8, 32788)
PR_EMS_AB_HOME_MDB_BL_W = PROP_TAG( PT_MV_UNICODE, 32788)
PR_EMS_AB_HOME_MDB_BL_O = PROP_TAG( PT_OBJECT, 32788)
PR_EMS_AB_HOME_MDB_BL_T = PROP_TAG( PT_MV_TSTRING, 32788)
PR_EMS_AB_HOME_MTA = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_MTA_A = PROP_TAG( PT_STRING8, 32775)
PR_EMS_AB_HOME_MTA_W = PROP_TAG( PT_UNICODE, 32775)
PR_EMS_AB_HOME_MTA_O = PROP_TAG( PT_OBJECT, 32775)
PR_EMS_AB_HOME_MTA_T = PROP_TAG( PT_TSTRING, 32775)
PR_EMS_AB_HOME_PUBLIC_SERVER = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_A = PROP_TAG( PT_STRING8, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_W = PROP_TAG( PT_UNICODE, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_O = PROP_TAG( PT_OBJECT, 32831)
PR_EMS_AB_HOME_PUBLIC_SERVER_T = PROP_TAG( PT_TSTRING, 32831)
PR_EMS_AB_IMPORT_CONTAINER = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_CONTAINER_A = PROP_TAG( PT_STRING8, 32954)
PR_EMS_AB_IMPORT_CONTAINER_W = PROP_TAG( PT_UNICODE, 32954)
PR_EMS_AB_IMPORT_CONTAINER_O = PROP_TAG( PT_OBJECT, 32954)
PR_EMS_AB_IMPORT_CONTAINER_T = PROP_TAG( PT_TSTRING, 32954)
PR_EMS_AB_IMPORT_SENSITIVITY = PROP_TAG( PT_LONG, 32955)
PR_EMS_AB_IMPORTED_FROM = PROP_TAG( PT_TSTRING, 32834)
PR_EMS_AB_IMPORTED_FROM_A = PROP_TAG( PT_STRING8, 32834)
PR_EMS_AB_IMPORTED_FROM_W = PROP_TAG( PT_UNICODE, 32834)
PR_EMS_AB_INBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 32956)
PR_EMS_AB_INBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 32956)
PR_EMS_AB_INBOUND_SITES_O = PROP_TAG( PT_OBJECT, 32956)
PR_EMS_AB_INBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 32956)
PR_EMS_AB_INSTANCE_TYPE = PROP_TAG( PT_LONG, 32957)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER = PROP_TAG( PT_MV_TSTRING, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32958)
PR_EMS_AB_INTERNATIONAL_ISDN_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32958)
PR_EMS_AB_INVOCATION_ID = PROP_TAG( PT_BINARY, 32959)
PR_EMS_AB_IS_DELETED = PROP_TAG( PT_BOOLEAN, 32960)
PR_EMS_AB_IS_MEMBER_OF_DL = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_A = PROP_TAG( PT_MV_STRING8, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_W = PROP_TAG( PT_MV_UNICODE, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_O = PROP_TAG( PT_OBJECT, 32776)
PR_EMS_AB_IS_MEMBER_OF_DL_T = PROP_TAG( PT_MV_TSTRING, 32776)
PR_EMS_AB_IS_SINGLE_VALUED = PROP_TAG( PT_BOOLEAN, 32961)
PR_EMS_AB_KCC_STATUS = PROP_TAG( PT_MV_BINARY, 32962)
PR_EMS_AB_KM_SERVER = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KM_SERVER_A = PROP_TAG( PT_STRING8, 32781)
PR_EMS_AB_KM_SERVER_W = PROP_TAG( PT_UNICODE, 32781)
PR_EMS_AB_KM_SERVER_O = PROP_TAG( PT_OBJECT, 32781)
PR_EMS_AB_KM_SERVER_T = PROP_TAG( PT_TSTRING, 32781)
PR_EMS_AB_KNOWLEDGE_INFORMATION = PROP_TAG( PT_MV_TSTRING, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_A = PROP_TAG( PT_MV_STRING8, 32963)
PR_EMS_AB_KNOWLEDGE_INFORMATION_W = PROP_TAG( PT_MV_UNICODE, 32963)
PR_EMS_AB_LANGUAGE = PROP_TAG( PT_LONG, 33144)
PR_EMS_AB_LDAP_DISPLAY_NAME = PROP_TAG( PT_MV_TSTRING, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_A = PROP_TAG( PT_MV_STRING8, 33137)
PR_EMS_AB_LDAP_DISPLAY_NAME_W = PROP_TAG( PT_MV_UNICODE, 33137)
PR_EMS_AB_LINE_WRAP = PROP_TAG( PT_LONG, 32964)
PR_EMS_AB_LINK_ID = PROP_TAG( PT_LONG, 32965)
PR_EMS_AB_LOCAL_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 32966)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 32967)
PR_EMS_AB_LOCAL_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 32967)
PR_EMS_AB_LOCAL_INITIAL_TURN = PROP_TAG( PT_BOOLEAN, 32968)
PR_EMS_AB_LOCAL_SCOPE = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOCAL_SCOPE_A = PROP_TAG( PT_MV_STRING8, 32969)
PR_EMS_AB_LOCAL_SCOPE_W = PROP_TAG( PT_MV_UNICODE, 32969)
PR_EMS_AB_LOCAL_SCOPE_O = PROP_TAG( PT_OBJECT, 32969)
PR_EMS_AB_LOCAL_SCOPE_T = PROP_TAG( PT_MV_TSTRING, 32969)
PR_EMS_AB_LOG_FILENAME = PROP_TAG( PT_TSTRING, 32970)
PR_EMS_AB_LOG_FILENAME_A = PROP_TAG( PT_STRING8, 32970)
PR_EMS_AB_LOG_FILENAME_W = PROP_TAG( PT_UNICODE, 32970)
PR_EMS_AB_LOG_ROLLOVER_INTERVAL = PROP_TAG( PT_LONG, 32971)
PR_EMS_AB_MAINTAIN_AUTOREPLY_HISTORY = PROP_TAG( PT_BOOLEAN, 32972)
PR_EMS_AB_MANAGER = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_A = PROP_TAG( PT_STRING8, 32773)
PR_EMS_AB_MANAGER_W = PROP_TAG( PT_UNICODE, 32773)
PR_EMS_AB_MANAGER_O = PROP_TAG( PT_OBJECT, 32773)
PR_EMS_AB_MANAGER_T = PROP_TAG( PT_TSTRING, 32773)
PR_EMS_AB_MAPI_DISPLAY_TYPE = PROP_TAG( PT_LONG, 32973)
PR_EMS_AB_MAPI_ID = PROP_TAG( PT_LONG, 32974)
PR_EMS_AB_MAXIMUM_OBJECT_ID = PROP_TAG( PT_BINARY, 33129)
PR_EMS_AB_MDB_BACKOFF_INTERVAL = PROP_TAG( PT_LONG, 32975)
PR_EMS_AB_MDB_MSG_TIME_OUT_PERIOD = PROP_TAG( PT_LONG, 32976)
PR_EMS_AB_MDB_OVER_QUOTA_LIMIT = PROP_TAG( PT_LONG, 32977)
PR_EMS_AB_MDB_STORAGE_QUOTA = PROP_TAG( PT_LONG, 32978)
PR_EMS_AB_MDB_UNREAD_LIMIT = PROP_TAG( PT_LONG, 32979)
PR_EMS_AB_MDB_USE_DEFAULTS = PROP_TAG( PT_BOOLEAN, 32980)
PR_EMS_AB_MEMBER = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_A = PROP_TAG( PT_MV_STRING8, 32777)
PR_EMS_AB_MEMBER_W = PROP_TAG( PT_MV_UNICODE, 32777)
PR_EMS_AB_MEMBER_O = PROP_TAG( PT_OBJECT, 32777)
PR_EMS_AB_MEMBER_T = PROP_TAG( PT_MV_TSTRING, 32777)
PR_EMS_AB_MESSAGE_TRACKING_ENABLED = PROP_TAG( PT_BOOLEAN, 32981)
PR_EMS_AB_MONITOR_CLOCK = PROP_TAG( PT_BOOLEAN, 32982)
PR_EMS_AB_MONITOR_SERVERS = PROP_TAG( PT_BOOLEAN, 32983)
PR_EMS_AB_MONITOR_SERVICES = PROP_TAG( PT_BOOLEAN, 32984)
PR_EMS_AB_MONITORED_CONFIGURATIONS = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_A = PROP_TAG( PT_MV_STRING8, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_W = PROP_TAG( PT_MV_UNICODE, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_O = PROP_TAG( PT_OBJECT, 32985)
PR_EMS_AB_MONITORED_CONFIGURATIONS_T = PROP_TAG( PT_MV_TSTRING, 32985)
PR_EMS_AB_MONITORED_SERVERS = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVERS_A = PROP_TAG( PT_MV_STRING8, 32986)
PR_EMS_AB_MONITORED_SERVERS_W = PROP_TAG( PT_MV_UNICODE, 32986)
PR_EMS_AB_MONITORED_SERVERS_O = PROP_TAG( PT_OBJECT, 32986)
PR_EMS_AB_MONITORED_SERVERS_T = PROP_TAG( PT_MV_TSTRING, 32986)
PR_EMS_AB_MONITORED_SERVICES = PROP_TAG( PT_MV_TSTRING, 32987)
PR_EMS_AB_MONITORED_SERVICES_A = PROP_TAG( PT_MV_STRING8, 32987)
PR_EMS_AB_MONITORED_SERVICES_W = PROP_TAG( PT_MV_UNICODE, 32987)
PR_EMS_AB_MONITORING_ALERT_DELAY = PROP_TAG( PT_LONG, 32988)
PR_EMS_AB_MONITORING_ALERT_UNITS = PROP_TAG( PT_LONG, 32989)
PR_EMS_AB_MONITORING_AVAILABILITY_STYLE = PROP_TAG( PT_LONG, 32990)
PR_EMS_AB_MONITORING_AVAILABILITY_WINDOW = PROP_TAG( PT_BINARY, 32991)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_A = PROP_TAG( PT_MV_STRING8, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_W = PROP_TAG( PT_MV_UNICODE, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_O = PROP_TAG( PT_OBJECT, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_MAIL_T = PROP_TAG( PT_MV_TSTRING, 32992)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_A = PROP_TAG( PT_MV_STRING8, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_W = PROP_TAG( PT_MV_UNICODE, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_O = PROP_TAG( PT_OBJECT, 32993)
PR_EMS_AB_MONITORING_CACHED_VIA_RPC_T = PROP_TAG( PT_MV_TSTRING, 32993)
PR_EMS_AB_MONITORING_ESCALATION_PROCEDURE = PROP_TAG( PT_MV_BINARY, 32994)
PR_EMS_AB_MONITORING_HOTSITE_POLL_INTERVAL = PROP_TAG( PT_LONG, 32995)
PR_EMS_AB_MONITORING_HOTSITE_POLL_UNITS = PROP_TAG( PT_LONG, 32996)
PR_EMS_AB_MONITORING_MAIL_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 32997)
PR_EMS_AB_MONITORING_MAIL_UPDATE_UNITS = PROP_TAG( PT_LONG, 32998)
PR_EMS_AB_MONITORING_NORMAL_POLL_INTERVAL = PROP_TAG( PT_LONG, 32999)
PR_EMS_AB_MONITORING_NORMAL_POLL_UNITS = PROP_TAG( PT_LONG, 33000)
PR_EMS_AB_MONITORING_RECIPIENTS = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_A = PROP_TAG( PT_MV_STRING8, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_W = PROP_TAG( PT_MV_UNICODE, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_O = PROP_TAG( PT_OBJECT, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_T = PROP_TAG( PT_MV_TSTRING, 33001)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_A = PROP_TAG( PT_MV_STRING8, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_W = PROP_TAG( PT_MV_UNICODE, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_O = PROP_TAG( PT_OBJECT, 33002)
PR_EMS_AB_MONITORING_RECIPIENTS_NDR_T = PROP_TAG( PT_MV_TSTRING, 33002)
PR_EMS_AB_MONITORING_RPC_UPDATE_INTERVAL = PROP_TAG( PT_LONG, 33003)
PR_EMS_AB_MONITORING_RPC_UPDATE_UNITS = PROP_TAG( PT_LONG, 33004)
PR_EMS_AB_MONITORING_WARNING_DELAY = PROP_TAG( PT_LONG, 33005)
PR_EMS_AB_MONITORING_WARNING_UNITS = PROP_TAG( PT_LONG, 33006)
PR_EMS_AB_MTA_LOCAL_CRED = PROP_TAG( PT_TSTRING, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_A = PROP_TAG( PT_STRING8, 33007)
PR_EMS_AB_MTA_LOCAL_CRED_W = PROP_TAG( PT_UNICODE, 33007)
PR_EMS_AB_MTA_LOCAL_DESIG = PROP_TAG( PT_TSTRING, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_A = PROP_TAG( PT_STRING8, 33008)
PR_EMS_AB_MTA_LOCAL_DESIG_W = PROP_TAG( PT_UNICODE, 33008)
PR_EMS_AB_N_ADDRESS = PROP_TAG( PT_BINARY, 33009)
PR_EMS_AB_N_ADDRESS_TYPE = PROP_TAG( PT_LONG, 33010)
PR_EMS_AB_NETWORK_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33136)
PR_EMS_AB_NETWORK_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33136)
PR_EMS_AB_NETWORK_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33136)
PR_EMS_AB_NNTP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33149)
PR_EMS_AB_NNTP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33149)
PR_EMS_AB_NNTP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33142)
PR_EMS_AB_NNTP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33142)
PR_EMS_AB_NT_MACHINE_NAME = PROP_TAG( PT_TSTRING, 33011)
PR_EMS_AB_NT_MACHINE_NAME_A = PROP_TAG( PT_STRING8, 33011)
PR_EMS_AB_NT_MACHINE_NAME_W = PROP_TAG( PT_UNICODE, 33011)
PR_EMS_AB_NT_SECURITY_DESCRIPTOR = PROP_TAG( PT_BINARY, 32787)
PR_EMS_AB_NUM_OF_OPEN_RETRIES = PROP_TAG( PT_LONG, 33012)
PR_EMS_AB_NUM_OF_TRANSFER_RETRIES = PROP_TAG( PT_LONG, 33013)
PR_EMS_AB_OBJ_DIST_NAME = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJ_DIST_NAME_A = PROP_TAG( PT_STRING8, 32828)
PR_EMS_AB_OBJ_DIST_NAME_W = PROP_TAG( PT_UNICODE, 32828)
PR_EMS_AB_OBJ_DIST_NAME_O = PROP_TAG( PT_OBJECT, 32828)
PR_EMS_AB_OBJ_DIST_NAME_T = PROP_TAG( PT_TSTRING, 32828)
PR_EMS_AB_OBJECT_CLASS_CATEGORY = PROP_TAG( PT_LONG, 33014)
PR_EMS_AB_OBJECT_VERSION = PROP_TAG( PT_LONG, 33015)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_A = PROP_TAG( PT_MV_STRING8, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_W = PROP_TAG( PT_MV_UNICODE, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_O = PROP_TAG( PT_OBJECT, 33016)
PR_EMS_AB_OFF_LINE_AB_CONTAINERS_T = PROP_TAG( PT_MV_TSTRING, 33016)
PR_EMS_AB_OFF_LINE_AB_SCHEDULE = PROP_TAG( PT_BINARY, 33017)
PR_EMS_AB_OFF_LINE_AB_SERVER = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_A = PROP_TAG( PT_STRING8, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_W = PROP_TAG( PT_UNICODE, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_O = PROP_TAG( PT_OBJECT, 33018)
PR_EMS_AB_OFF_LINE_AB_SERVER_T = PROP_TAG( PT_TSTRING, 33018)
PR_EMS_AB_OFF_LINE_AB_STYLE = PROP_TAG( PT_LONG, 33019)
PR_EMS_AB_OID_TYPE = PROP_TAG( PT_LONG, 33020)
PR_EMS_AB_OM_OBJECT_CLASS = PROP_TAG( PT_BINARY, 33021)
PR_EMS_AB_OM_SYNTAX = PROP_TAG( PT_LONG, 33022)
PR_EMS_AB_OOF_REPLY_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33023)
PR_EMS_AB_OPEN_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33024)
PR_EMS_AB_ORGANIZATION_NAME = PROP_TAG( PT_MV_TSTRING, 33025)
PR_EMS_AB_ORGANIZATION_NAME_A = PROP_TAG( PT_MV_STRING8, 33025)
PR_EMS_AB_ORGANIZATION_NAME_W = PROP_TAG( PT_MV_UNICODE, 33025)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME = PROP_TAG( PT_MV_TSTRING, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_A = PROP_TAG( PT_MV_STRING8, 33026)
PR_EMS_AB_ORGANIZATIONAL_UNIT_NAME_W = PROP_TAG( PT_MV_UNICODE, 33026)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33027)
PR_EMS_AB_ORIGINAL_DISPLAY_TABLE_MSDOS = PROP_TAG( PT_BINARY, 33028)
PR_EMS_AB_OUTBOUND_SITES = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OUTBOUND_SITES_A = PROP_TAG( PT_MV_STRING8, 33029)
PR_EMS_AB_OUTBOUND_SITES_W = PROP_TAG( PT_MV_UNICODE, 33029)
PR_EMS_AB_OUTBOUND_SITES_O = PROP_TAG( PT_OBJECT, 33029)
PR_EMS_AB_OUTBOUND_SITES_T = PROP_TAG( PT_MV_TSTRING, 33029)
PR_EMS_AB_OWNER = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_A = PROP_TAG( PT_STRING8, 32780)
PR_EMS_AB_OWNER_W = PROP_TAG( PT_UNICODE, 32780)
PR_EMS_AB_OWNER_O = PROP_TAG( PT_OBJECT, 32780)
PR_EMS_AB_OWNER_T = PROP_TAG( PT_TSTRING, 32780)
PR_EMS_AB_OWNER_BL = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_OWNER_BL_A = PROP_TAG( PT_STRING8, 32804)
PR_EMS_AB_OWNER_BL_W = PROP_TAG( PT_UNICODE, 32804)
PR_EMS_AB_OWNER_BL_O = PROP_TAG( PT_OBJECT, 32804)
PR_EMS_AB_OWNER_BL_T = PROP_TAG( PT_TSTRING, 32804)
PR_EMS_AB_P_SELECTOR = PROP_TAG( PT_BINARY, 33030)
PR_EMS_AB_P_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33031)
PR_EMS_AB_PER_MSG_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33032)
PR_EMS_AB_PER_RECIP_DIALOG_DISPLAY_TABLE = PROP_TAG( PT_BINARY, 33033)
PR_EMS_AB_PERIOD_REP_SYNC_TIMES = PROP_TAG( PT_BINARY, 33034)
PR_EMS_AB_PERIOD_REPL_STAGGER = PROP_TAG( PT_LONG, 33035)
PR_EMS_AB_PF_CONTACTS = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_PF_CONTACTS_A = PROP_TAG( PT_MV_STRING8, 32824)
PR_EMS_AB_PF_CONTACTS_W = PROP_TAG( PT_MV_UNICODE, 32824)
PR_EMS_AB_PF_CONTACTS_O = PROP_TAG( PT_OBJECT, 32824)
PR_EMS_AB_PF_CONTACTS_T = PROP_TAG( PT_MV_TSTRING, 32824)
PR_EMS_AB_POP_CHARACTER_SET = PROP_TAG( PT_TSTRING, 33145)
PR_EMS_AB_POP_CHARACTER_SET_A = PROP_TAG( PT_STRING8, 33145)
PR_EMS_AB_POP_CHARACTER_SET_W = PROP_TAG( PT_UNICODE, 33145)
PR_EMS_AB_POP_CONTENT_FORMAT = PROP_TAG( PT_TSTRING, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_A = PROP_TAG( PT_STRING8, 33143)
PR_EMS_AB_POP_CONTENT_FORMAT_W = PROP_TAG( PT_UNICODE, 33143)
PR_EMS_AB_POSTAL_ADDRESS = PROP_TAG( PT_MV_BINARY, 33036)
PR_EMS_AB_PREFERRED_DELIVERY_METHOD = PROP_TAG( PT_MV_LONG, 33037)
PR_EMS_AB_PRMD = PROP_TAG( PT_TSTRING, 33038)
PR_EMS_AB_PRMD_A = PROP_TAG( PT_STRING8, 33038)
PR_EMS_AB_PRMD_W = PROP_TAG( PT_UNICODE, 33038)
PR_EMS_AB_PROXY_ADDRESSES = PROP_TAG( PT_MV_TSTRING, 32783)
PR_EMS_AB_PROXY_ADDRESSES_A = PROP_TAG( PT_MV_STRING8, 32783)
PR_EMS_AB_PROXY_ADDRESSES_W = PROP_TAG( PT_MV_UNICODE, 32783)
PR_EMS_AB_PROXY_GENERATOR_DLL = PROP_TAG( PT_TSTRING, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_A = PROP_TAG( PT_STRING8, 33039)
PR_EMS_AB_PROXY_GENERATOR_DLL_W = PROP_TAG( PT_UNICODE, 33039)
PR_EMS_AB_PUBLIC_DELEGATES = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_A = PROP_TAG( PT_MV_STRING8, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_W = PROP_TAG( PT_MV_UNICODE, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_O = PROP_TAG( PT_OBJECT, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_T = PROP_TAG( PT_MV_TSTRING, 32789)
PR_EMS_AB_PUBLIC_DELEGATES_BL = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_A = PROP_TAG( PT_MV_STRING8, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_W = PROP_TAG( PT_MV_UNICODE, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_O = PROP_TAG( PT_OBJECT, 33040)
PR_EMS_AB_PUBLIC_DELEGATES_BL_T = PROP_TAG( PT_MV_TSTRING, 33040)
PR_EMS_AB_QUOTA_NOTIFICATION_SCHEDULE = PROP_TAG( PT_BINARY, 33041)
PR_EMS_AB_QUOTA_NOTIFICATION_STYLE = PROP_TAG( PT_LONG, 33042)
PR_EMS_AB_RANGE_LOWER = PROP_TAG( PT_LONG, 33043)
PR_EMS_AB_RANGE_UPPER = PROP_TAG( PT_LONG, 33044)
PR_EMS_AB_RAS_CALLBACK_NUMBER = PROP_TAG( PT_TSTRING, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_A = PROP_TAG( PT_STRING8, 33045)
PR_EMS_AB_RAS_CALLBACK_NUMBER_W = PROP_TAG( PT_UNICODE, 33045)
PR_EMS_AB_RAS_PHONE_NUMBER = PROP_TAG( PT_TSTRING, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_A = PROP_TAG( PT_STRING8, 33046)
PR_EMS_AB_RAS_PHONE_NUMBER_W = PROP_TAG( PT_UNICODE, 33046)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME = PROP_TAG( PT_TSTRING, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_A = PROP_TAG( PT_STRING8, 33047)
PR_EMS_AB_RAS_PHONEBOOK_ENTRY_NAME_W = PROP_TAG( PT_UNICODE, 33047)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME = PROP_TAG( PT_TSTRING, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_A = PROP_TAG( PT_STRING8, 33048)
PR_EMS_AB_RAS_REMOTE_SRVR_NAME_W = PROP_TAG( PT_UNICODE, 33048)
PR_EMS_AB_REGISTERED_ADDRESS = PROP_TAG( PT_MV_BINARY, 33049)
PR_EMS_AB_REMOTE_BRIDGE_HEAD = PROP_TAG( PT_TSTRING, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_A = PROP_TAG( PT_STRING8, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_W = PROP_TAG( PT_UNICODE, 33050)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS = PROP_TAG( PT_TSTRING, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_A = PROP_TAG( PT_STRING8, 33051)
PR_EMS_AB_REMOTE_BRIDGE_HEAD_ADDRESS_W = PROP_TAG( PT_UNICODE, 33051)
PR_EMS_AB_REMOTE_OUT_BH_SERVER = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_A = PROP_TAG( PT_STRING8, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_W = PROP_TAG( PT_UNICODE, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_O = PROP_TAG( PT_OBJECT, 33052)
PR_EMS_AB_REMOTE_OUT_BH_SERVER_T = PROP_TAG( PT_TSTRING, 33052)
PR_EMS_AB_REMOTE_SITE = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REMOTE_SITE_A = PROP_TAG( PT_STRING8, 33053)
PR_EMS_AB_REMOTE_SITE_W = PROP_TAG( PT_UNICODE, 33053)
PR_EMS_AB_REMOTE_SITE_O = PROP_TAG( PT_OBJECT, 33053)
PR_EMS_AB_REMOTE_SITE_T = PROP_TAG( PT_TSTRING, 33053)
PR_EMS_AB_REPLICATION_MAIL_MSG_SIZE = PROP_TAG( PT_LONG, 33128)
PR_EMS_AB_REPLICATION_SENSITIVITY = PROP_TAG( PT_LONG, 33054)
PR_EMS_AB_REPLICATION_STAGGER = PROP_TAG( PT_LONG, 33055)
PR_EMS_AB_REPORT_TO_ORIGINATOR = PROP_TAG( PT_BOOLEAN, 33056)
PR_EMS_AB_REPORT_TO_OWNER = PROP_TAG( PT_BOOLEAN, 33057)
PR_EMS_AB_REPORTS = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_A = PROP_TAG( PT_MV_STRING8, 32782)
PR_EMS_AB_REPORTS_W = PROP_TAG( PT_MV_UNICODE, 32782)
PR_EMS_AB_REPORTS_O = PROP_TAG( PT_OBJECT, 32782)
PR_EMS_AB_REPORTS_T = PROP_TAG( PT_MV_TSTRING, 32782)
PR_EMS_AB_REQ_SEQ = PROP_TAG( PT_LONG, 33058)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_A = PROP_TAG( PT_STRING8, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_W = PROP_TAG( PT_UNICODE, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_O = PROP_TAG( PT_OBJECT, 33059)
PR_EMS_AB_RESPONSIBLE_LOCAL_DXA_T = PROP_TAG( PT_TSTRING, 33059)
PR_EMS_AB_RID_SERVER = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_RID_SERVER_A = PROP_TAG( PT_STRING8, 33060)
PR_EMS_AB_RID_SERVER_W = PROP_TAG( PT_UNICODE, 33060)
PR_EMS_AB_RID_SERVER_O = PROP_TAG( PT_OBJECT, 33060)
PR_EMS_AB_RID_SERVER_T = PROP_TAG( PT_TSTRING, 33060)
PR_EMS_AB_ROLE_OCCUPANT = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROLE_OCCUPANT_A = PROP_TAG( PT_MV_STRING8, 33061)
PR_EMS_AB_ROLE_OCCUPANT_W = PROP_TAG( PT_MV_UNICODE, 33061)
PR_EMS_AB_ROLE_OCCUPANT_O = PROP_TAG( PT_OBJECT, 33061)
PR_EMS_AB_ROLE_OCCUPANT_T = PROP_TAG( PT_MV_TSTRING, 33061)
PR_EMS_AB_ROUTING_LIST = PROP_TAG( PT_MV_TSTRING, 33062)
PR_EMS_AB_ROUTING_LIST_A = PROP_TAG( PT_MV_STRING8, 33062)
PR_EMS_AB_ROUTING_LIST_W = PROP_TAG( PT_MV_UNICODE, 33062)
PR_EMS_AB_RTS_CHECKPOINT_SIZE = PROP_TAG( PT_LONG, 33063)
PR_EMS_AB_RTS_RECOVERY_TIMEOUT = PROP_TAG( PT_LONG, 33064)
PR_EMS_AB_RTS_WINDOW_SIZE = PROP_TAG( PT_LONG, 33065)
PR_EMS_AB_RUNS_ON = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_RUNS_ON_A = PROP_TAG( PT_MV_STRING8, 33066)
PR_EMS_AB_RUNS_ON_W = PROP_TAG( PT_MV_UNICODE, 33066)
PR_EMS_AB_RUNS_ON_O = PROP_TAG( PT_OBJECT, 33066)
PR_EMS_AB_RUNS_ON_T = PROP_TAG( PT_MV_TSTRING, 33066)
PR_EMS_AB_S_SELECTOR = PROP_TAG( PT_BINARY, 33067)
PR_EMS_AB_S_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33068)
PR_EMS_AB_SCHEMA_FLAGS = PROP_TAG( PT_LONG, 33139)
PR_EMS_AB_SCHEMA_VERSION = PROP_TAG( PT_MV_LONG, 33148)
PR_EMS_AB_SEARCH_FLAGS = PROP_TAG( PT_LONG, 33069)
PR_EMS_AB_SEARCH_GUIDE = PROP_TAG( PT_MV_BINARY, 33070)
PR_EMS_AB_SECURITY_PROTOCOL = PROP_TAG( PT_MV_BINARY, 32823)
PR_EMS_AB_SEE_ALSO = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SEE_ALSO_A = PROP_TAG( PT_MV_STRING8, 33071)
PR_EMS_AB_SEE_ALSO_W = PROP_TAG( PT_MV_UNICODE, 33071)
PR_EMS_AB_SEE_ALSO_O = PROP_TAG( PT_OBJECT, 33071)
PR_EMS_AB_SEE_ALSO_T = PROP_TAG( PT_MV_TSTRING, 33071)
PR_EMS_AB_SERIAL_NUMBER = PROP_TAG( PT_MV_TSTRING, 33072)
PR_EMS_AB_SERIAL_NUMBER_A = PROP_TAG( PT_MV_STRING8, 33072)
PR_EMS_AB_SERIAL_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 33072)
PR_EMS_AB_SERVICE_ACTION_FIRST = PROP_TAG( PT_LONG, 33073)
PR_EMS_AB_SERVICE_ACTION_OTHER = PROP_TAG( PT_LONG, 33074)
PR_EMS_AB_SERVICE_ACTION_SECOND = PROP_TAG( PT_LONG, 33075)
PR_EMS_AB_SERVICE_RESTART_DELAY = PROP_TAG( PT_LONG, 33076)
PR_EMS_AB_SERVICE_RESTART_MESSAGE = PROP_TAG( PT_TSTRING, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_A = PROP_TAG( PT_STRING8, 33077)
PR_EMS_AB_SERVICE_RESTART_MESSAGE_W = PROP_TAG( PT_UNICODE, 33077)
PR_EMS_AB_SESSION_DISCONNECT_TIMER = PROP_TAG( PT_LONG, 33078)
PR_EMS_AB_SITE_AFFINITY = PROP_TAG( PT_MV_TSTRING, 33079)
PR_EMS_AB_SITE_AFFINITY_A = PROP_TAG( PT_MV_STRING8, 33079)
PR_EMS_AB_SITE_AFFINITY_W = PROP_TAG( PT_MV_UNICODE, 33079)
PR_EMS_AB_SITE_FOLDER_GUID = PROP_TAG( PT_BINARY, 33126)
PR_EMS_AB_SITE_FOLDER_SERVER = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_A = PROP_TAG( PT_STRING8, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_W = PROP_TAG( PT_UNICODE, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_O = PROP_TAG( PT_OBJECT, 33127)
PR_EMS_AB_SITE_FOLDER_SERVER_T = PROP_TAG( PT_TSTRING, 33127)
PR_EMS_AB_SITE_PROXY_SPACE = PROP_TAG( PT_MV_TSTRING, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_A = PROP_TAG( PT_MV_STRING8, 33080)
PR_EMS_AB_SITE_PROXY_SPACE_W = PROP_TAG( PT_MV_UNICODE, 33080)
PR_EMS_AB_SPACE_LAST_COMPUTED = PROP_TAG( PT_SYSTIME, 33081)
PR_EMS_AB_STREET_ADDRESS = PROP_TAG( PT_TSTRING, 33082)
PR_EMS_AB_STREET_ADDRESS_A = PROP_TAG( PT_STRING8, 33082)
PR_EMS_AB_STREET_ADDRESS_W = PROP_TAG( PT_UNICODE, 33082)
PR_EMS_AB_SUB_REFS = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_REFS_A = PROP_TAG( PT_MV_STRING8, 33083)
PR_EMS_AB_SUB_REFS_W = PROP_TAG( PT_MV_UNICODE, 33083)
PR_EMS_AB_SUB_REFS_O = PROP_TAG( PT_OBJECT, 33083)
PR_EMS_AB_SUB_REFS_T = PROP_TAG( PT_MV_TSTRING, 33083)
PR_EMS_AB_SUB_SITE = PROP_TAG( PT_TSTRING, 33147)
PR_EMS_AB_SUB_SITE_A = PROP_TAG( PT_STRING8, 33147)
PR_EMS_AB_SUB_SITE_W = PROP_TAG( PT_UNICODE, 33147)
PR_EMS_AB_SUBMISSION_CONT_LENGTH = PROP_TAG( PT_LONG, 33084)
PR_EMS_AB_SUPPORTED_APPLICATION_CONTEXT = PROP_TAG( PT_MV_BINARY, 33085)
PR_EMS_AB_SUPPORTING_STACK = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_A = PROP_TAG( PT_MV_STRING8, 33086)
PR_EMS_AB_SUPPORTING_STACK_W = PROP_TAG( PT_MV_UNICODE, 33086)
PR_EMS_AB_SUPPORTING_STACK_O = PROP_TAG( PT_OBJECT, 33086)
PR_EMS_AB_SUPPORTING_STACK_T = PROP_TAG( PT_MV_TSTRING, 33086)
PR_EMS_AB_SUPPORTING_STACK_BL = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_A = PROP_TAG( PT_MV_STRING8, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_W = PROP_TAG( PT_MV_UNICODE, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_O = PROP_TAG( PT_OBJECT, 33087)
PR_EMS_AB_SUPPORTING_STACK_BL_T = PROP_TAG( PT_MV_TSTRING, 33087)
PR_EMS_AB_T_SELECTOR = PROP_TAG( PT_BINARY, 33088)
PR_EMS_AB_T_SELECTOR_INBOUND = PROP_TAG( PT_BINARY, 33089)
PR_EMS_AB_TARGET_ADDRESS = PROP_TAG( PT_TSTRING, 32785)
PR_EMS_AB_TARGET_ADDRESS_A = PROP_TAG( PT_STRING8, 32785)
PR_EMS_AB_TARGET_ADDRESS_W = PROP_TAG( PT_UNICODE, 32785)
PR_EMS_AB_TARGET_MTAS = PROP_TAG( PT_MV_TSTRING, 33090)
PR_EMS_AB_TARGET_MTAS_A = PROP_TAG( PT_MV_STRING8, 33090)
PR_EMS_AB_TARGET_MTAS_W = PROP_TAG( PT_MV_UNICODE, 33090)
PR_EMS_AB_TELEPHONE_NUMBER = PROP_TAG( PT_MV_TSTRING, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_A = PROP_TAG( PT_MV_STRING8, 32786)
PR_EMS_AB_TELEPHONE_NUMBER_W = PROP_TAG( PT_MV_UNICODE, 32786)
PR_EMS_AB_TELETEX_TERMINAL_IDENTIFIER = PROP_TAG( PT_MV_BINARY, 33091)
PR_EMS_AB_TEMP_ASSOC_THRESHOLD = PROP_TAG( PT_LONG, 33092)
PR_EMS_AB_TOMBSTONE_LIFETIME = PROP_TAG( PT_LONG, 33093)
PR_EMS_AB_TRACKING_LOG_PATH_NAME = PROP_TAG( PT_TSTRING, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_A = PROP_TAG( PT_STRING8, 33094)
PR_EMS_AB_TRACKING_LOG_PATH_NAME_W = PROP_TAG( PT_UNICODE, 33094)
PR_EMS_AB_TRANS_RETRY_MINS = PROP_TAG( PT_LONG, 33095)
PR_EMS_AB_TRANS_TIMEOUT_MINS = PROP_TAG( PT_LONG, 33096)
PR_EMS_AB_TRANSFER_RETRY_INTERVAL = PROP_TAG( PT_LONG, 33097)
PR_EMS_AB_TRANSFER_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33098)
PR_EMS_AB_TRANSFER_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33099)
PR_EMS_AB_TRANSFER_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33100)
PR_EMS_AB_TRANSLATION_TABLE_USED = PROP_TAG( PT_LONG, 33101)
PR_EMS_AB_TRANSPORT_EXPEDITED_DATA = PROP_TAG( PT_BOOLEAN, 33102)
PR_EMS_AB_TRUST_LEVEL = PROP_TAG( PT_LONG, 33103)
PR_EMS_AB_TURN_REQUEST_THRESHOLD = PROP_TAG( PT_LONG, 33104)
PR_EMS_AB_TWO_WAY_ALTERNATE_FACILITY = PROP_TAG( PT_BOOLEAN, 33105)
PR_EMS_AB_UNAUTH_ORIG_BL = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_A = PROP_TAG( PT_MV_STRING8, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_W = PROP_TAG( PT_MV_UNICODE, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_O = PROP_TAG( PT_OBJECT, 33106)
PR_EMS_AB_UNAUTH_ORIG_BL_T = PROP_TAG( PT_MV_TSTRING, 33106)
PR_EMS_AB_USE_SERVER_VALUES = PROP_TAG( PT_BOOLEAN, 33150)
PR_EMS_AB_USER_PASSWORD = PROP_TAG( PT_MV_BINARY, 33107)
PR_EMS_AB_USN_CHANGED = PROP_TAG( PT_LONG, 32809)
PR_EMS_AB_USN_CREATED = PROP_TAG( PT_LONG, 33108)
PR_EMS_AB_USN_DSA_LAST_OBJ_REMOVED = PROP_TAG( PT_LONG, 33109)
PR_EMS_AB_USN_INTERSITE = PROP_TAG( PT_LONG, 33146)
PR_EMS_AB_USN_LAST_OBJ_REM = PROP_TAG( PT_LONG, 33110)
PR_EMS_AB_USN_SOURCE = PROP_TAG( PT_LONG, 33111)
PR_EMS_AB_WWW_HOME_PAGE = PROP_TAG( PT_TSTRING, 33141)
PR_EMS_AB_WWW_HOME_PAGE_A = PROP_TAG( PT_STRING8, 33141)
PR_EMS_AB_WWW_HOME_PAGE_W = PROP_TAG( PT_UNICODE, 33141)
PR_EMS_AB_X121_ADDRESS = PROP_TAG( PT_MV_TSTRING, 33112)
PR_EMS_AB_X121_ADDRESS_A = PROP_TAG( PT_MV_STRING8, 33112)
PR_EMS_AB_X121_ADDRESS_W = PROP_TAG( PT_MV_UNICODE, 33112)
PR_EMS_AB_X25_CALL_USER_DATA_INCOMING = PROP_TAG( PT_BINARY, 33113)
PR_EMS_AB_X25_CALL_USER_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33114)
PR_EMS_AB_X25_FACILITIES_DATA_INCOMING = PROP_TAG( PT_BINARY, 33115)
PR_EMS_AB_X25_FACILITIES_DATA_OUTGOING = PROP_TAG( PT_BINARY, 33116)
PR_EMS_AB_X25_LEASED_LINE_PORT = PROP_TAG( PT_BINARY, 33117)
PR_EMS_AB_X25_LEASED_OR_SWITCHED = PROP_TAG( PT_BOOLEAN, 33118)
PR_EMS_AB_X25_REMOTE_MTA_PHONE = PROP_TAG( PT_TSTRING, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_A = PROP_TAG( PT_STRING8, 33119)
PR_EMS_AB_X25_REMOTE_MTA_PHONE_W = PROP_TAG( PT_UNICODE, 33119)
PR_EMS_AB_X400_ATTACHMENT_TYPE = PROP_TAG( PT_BINARY, 33120)
PR_EMS_AB_X400_SELECTOR_SYNTAX = PROP_TAG( PT_LONG, 33121)
PR_EMS_AB_X500_ACCESS_CONTROL_LIST = PROP_TAG( PT_BINARY, 33122)
PR_EMS_AB_XMIT_TIMEOUT_NON_URGENT = PROP_TAG( PT_LONG, 33123)
PR_EMS_AB_XMIT_TIMEOUT_NORMAL = PROP_TAG( PT_LONG, 33124)
PR_EMS_AB_XMIT_TIMEOUT_URGENT = PROP_TAG( PT_LONG, 33125)
| 0 | 0 | 0 |
2ee7510b6582c800b601149caa017d21c99b81dc | 631 | py | Python | ocr_attribute_extraction/document.py | IW276/IW276WS21-P19 | ba836967dd80f74ec8a6a4c3fe3a9efc28f1b0d2 | [
"MIT"
] | 1 | 2021-11-26T16:44:43.000Z | 2021-11-26T16:44:43.000Z | ocr_attribute_extraction/document.py | IW276/IW276WS21-P19 | ba836967dd80f74ec8a6a4c3fe3a9efc28f1b0d2 | [
"MIT"
] | null | null | null | ocr_attribute_extraction/document.py | IW276/IW276WS21-P19 | ba836967dd80f74ec8a6a4c3fe3a9efc28f1b0d2 | [
"MIT"
] | 1 | 2021-12-05T18:28:32.000Z | 2021-12-05T18:28:32.000Z | import textwrap
from .attribute_name import AttributeName
| 28.681818 | 79 | 0.55626 | import textwrap
from .attribute_name import AttributeName
class Document:
def __init__(self, path):
self.path = path
self.text = None
self.attributes = {name.value: -1 for name in AttributeName}
def __str__(self):
attributes_serialized = "\n".join([
f"{k}: { True if v == 1 else (False if (v == 0) else 'Undefined')}"
for k, v in self.attributes.items()
])
return (
f"# file: {self.path}\n"
f"text:\n{textwrap.indent(self.text, ' ')}\n"
f"attributes:\n{textwrap.indent(attributes_serialized, ' ')}"
)
| 502 | -6 | 76 |
e4abc59af409a75acee164061f7554cbffdbca77 | 6,975 | py | Python | skills/alexa-prize-news/src/topic_modeling/utils.py | stefanrer/commonbsecret | bb527f9b3e460124ccc307c0d39baba9a2490fcd | [
"Apache-2.0"
] | null | null | null | skills/alexa-prize-news/src/topic_modeling/utils.py | stefanrer/commonbsecret | bb527f9b3e460124ccc307c0d39baba9a2490fcd | [
"Apache-2.0"
] | null | null | null | skills/alexa-prize-news/src/topic_modeling/utils.py | stefanrer/commonbsecret | bb527f9b3e460124ccc307c0d39baba9a2490fcd | [
"Apache-2.0"
] | null | null | null | import os
import gensim
from gensim.utils import simple_preprocess
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from src.consts import (
SubtopicSummaryType,
LDA_SUBTOPIC_KEY_WORDS_PATH,
LDA_SUBTOPIC_KEY_PHRASES_PATH,
LDA_SUBTOPIC_N_WORDS_PATH,
LDA_SUBTOPIC_SENTENCE_PATH,
SUBTOPIC_SUMMARY_TYPE,
MAX_SUBTOPICS_NUM,
N_BASE_UNIGRAMS,
N_PARAMETER,
)
from src.utils import load
stemmer = SnowballStemmer("english")
topic2original_topic = {
"Arts & Entertainment": [
"Arts & Entertainment",
"Multimedia",
"Arts and Living",
"Entertainment",
"Arts and Living_Books",
"Arts and Living_Food and Dining",
"BookWorld",
"Arts and Living_Movies",
"Arts and Living_Home and Garden",
"Arts and Living_Music",
"Arts and Living_Travel",
"Style",
],
"Business": [
"Business",
"Business_U.S. Economy",
"Economy",
"Capital Business",
"National-Economy",
"Business_Metro Business",
"Economic Policy",
],
"By The Way - Travel": ["By The Way - Travel", "Travel"],
"Climate & Environment": ["Climate & Environment", "Capital Weather Gang", "Animals", "Climate Solutions"],
"D.C., Md. & Va.": ["D.C., Md. & Va."],
"Discussions": ["Discussions", "Live Discussions"],
"Education": [
"Education",
"Higher Education",
"High Schools",
"Colleges",
"KidsPost",
"The Answer Sheet",
"Parenting",
],
"Health": ["Health", "Health_Wires", "Health & Science", "Health-Environment-Science", "National/health-science"],
"History": ["History", "Made by History", "Retropolis"],
"Immigration": ["Immigration"],
"Lifestyle": [
"Lifestyle",
"LocalLiving",
"Lifestyle/food",
"Food",
"Local",
"Obituaries",
"Local-Enterprise",
"The Extras_Montgomery",
"The Extras_Southern Md.",
"The Extras_Fairfax",
"Morning Mix",
"Going Out Guide",
"Weekend",
"Lifestyle/magazine",
"Internet Culture",
"Pop Culture",
"Inspired Life",
"PostEverything",
"Magazine",
"Lifestyle/style",
"Brand-studio",
],
"Live Chats": ["Live Chats"],
"National": ["National", "Nation", "Nationals & MLB", "National-Enterprise"],
"National Security": ["National Security", "National-Security", "Crime", "Cops-Courts", "True Crime", "Military"],
"Opinions": [
"Opinions",
"Editorial-Opinion",
"Opinions_Columnists",
"Opinions_Feedback",
"Local Opinions",
"Global Opinions",
"Opinions_Columns and Blogs",
"Post Opinión",
"Opinions/global-opinions",
"The Plum Line",
"Fact Checker",
],
"Outlook": ["Outlook"],
"Photography": ["Photography"],
"Podcasts": ["Podcasts"],
"Politics": [
"Politics",
"National-Politics",
"Local-Politics",
"Politics_Federal Page",
"Monkey Cage",
"Politics_Elections",
"World_Middle East_Iraq",
"Powerpost",
"powerpost",
"The Fix",
],
"Public Relations": [
"Public Relations",
"The Extras_Prince William",
"The Extras_Prince George's",
"The Extras_Loudoun",
],
"Real Estate": ["Real Estate", "RealEstate"],
"Religion": ["Religion", "OnFaith"],
"Science": ["Science"],
"Sports": [
"Sports",
"Sports_High Schools",
"High School Sports",
"Sports_Redskins",
"Redskins",
"Sports_MLB",
"Sports_Nationals",
"Sports_Wizards",
"Sports_NFL",
"Sports_NBA",
"Sports_Capitals",
"NFL",
"NBA",
"College Sports",
"MLB",
"Washington Nationals",
"D.C. Sports Bog",
"Golf",
"Soccer",
"NHL",
"Fantasy Sports",
"Esports",
],
"Tablet": ["Tablet"],
"Technology": [
"Technology",
"Technology_Personal Tech",
"Technology_Special Reports_Satellite Radio",
"Tech Policy",
"Innovations",
],
"Topics": ["Topics"],
"Transportation": [
"Transportation",
"Metro_Obituaries",
"Metro_Virginia",
"Metro_The District",
"Gridlock",
"Metro_Crime",
"Metro_Maryland",
"Metro_Maryland_Montgomery",
"Future of Transportation",
"Metro_Maryland_Pr. George's",
"Metro",
"Cars",
"Development-Transportation",
],
"U.S. Policy": ["U.S. Policy"],
"Utils": ["Utils", "Express", "Print_A Section", "Print", "Print_Editorial Pages", "Print_Style Print_Weekend"],
"Video Games": ["Video Games", "Video Game News", "Video Gaming"],
"Washington Post Live": [
"Washington Post Live",
"Washington Post Magazine",
"Washington Post PR Blog",
"The Washington Post Magazine",
"Washington Wizards",
"Washington Capitals",
],
"World": ["World", "Foreign", "World_Asia/Pacific", "Europe", "Asia", "Africa"],
}
original_topic2topic = {v: k for k, vs in topic2original_topic.items() for v in vs}
| 28.125 | 120 | 0.594982 | import os
import gensim
from gensim.utils import simple_preprocess
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from src.consts import (
SubtopicSummaryType,
LDA_SUBTOPIC_KEY_WORDS_PATH,
LDA_SUBTOPIC_KEY_PHRASES_PATH,
LDA_SUBTOPIC_N_WORDS_PATH,
LDA_SUBTOPIC_SENTENCE_PATH,
SUBTOPIC_SUMMARY_TYPE,
MAX_SUBTOPICS_NUM,
N_BASE_UNIGRAMS,
N_PARAMETER,
)
from src.utils import load
stemmer = SnowballStemmer("english")
topic2original_topic = {
"Arts & Entertainment": [
"Arts & Entertainment",
"Multimedia",
"Arts and Living",
"Entertainment",
"Arts and Living_Books",
"Arts and Living_Food and Dining",
"BookWorld",
"Arts and Living_Movies",
"Arts and Living_Home and Garden",
"Arts and Living_Music",
"Arts and Living_Travel",
"Style",
],
"Business": [
"Business",
"Business_U.S. Economy",
"Economy",
"Capital Business",
"National-Economy",
"Business_Metro Business",
"Economic Policy",
],
"By The Way - Travel": ["By The Way - Travel", "Travel"],
"Climate & Environment": ["Climate & Environment", "Capital Weather Gang", "Animals", "Climate Solutions"],
"D.C., Md. & Va.": ["D.C., Md. & Va."],
"Discussions": ["Discussions", "Live Discussions"],
"Education": [
"Education",
"Higher Education",
"High Schools",
"Colleges",
"KidsPost",
"The Answer Sheet",
"Parenting",
],
"Health": ["Health", "Health_Wires", "Health & Science", "Health-Environment-Science", "National/health-science"],
"History": ["History", "Made by History", "Retropolis"],
"Immigration": ["Immigration"],
"Lifestyle": [
"Lifestyle",
"LocalLiving",
"Lifestyle/food",
"Food",
"Local",
"Obituaries",
"Local-Enterprise",
"The Extras_Montgomery",
"The Extras_Southern Md.",
"The Extras_Fairfax",
"Morning Mix",
"Going Out Guide",
"Weekend",
"Lifestyle/magazine",
"Internet Culture",
"Pop Culture",
"Inspired Life",
"PostEverything",
"Magazine",
"Lifestyle/style",
"Brand-studio",
],
"Live Chats": ["Live Chats"],
"National": ["National", "Nation", "Nationals & MLB", "National-Enterprise"],
"National Security": ["National Security", "National-Security", "Crime", "Cops-Courts", "True Crime", "Military"],
"Opinions": [
"Opinions",
"Editorial-Opinion",
"Opinions_Columnists",
"Opinions_Feedback",
"Local Opinions",
"Global Opinions",
"Opinions_Columns and Blogs",
"Post Opinión",
"Opinions/global-opinions",
"The Plum Line",
"Fact Checker",
],
"Outlook": ["Outlook"],
"Photography": ["Photography"],
"Podcasts": ["Podcasts"],
"Politics": [
"Politics",
"National-Politics",
"Local-Politics",
"Politics_Federal Page",
"Monkey Cage",
"Politics_Elections",
"World_Middle East_Iraq",
"Powerpost",
"powerpost",
"The Fix",
],
"Public Relations": [
"Public Relations",
"The Extras_Prince William",
"The Extras_Prince George's",
"The Extras_Loudoun",
],
"Real Estate": ["Real Estate", "RealEstate"],
"Religion": ["Religion", "OnFaith"],
"Science": ["Science"],
"Sports": [
"Sports",
"Sports_High Schools",
"High School Sports",
"Sports_Redskins",
"Redskins",
"Sports_MLB",
"Sports_Nationals",
"Sports_Wizards",
"Sports_NFL",
"Sports_NBA",
"Sports_Capitals",
"NFL",
"NBA",
"College Sports",
"MLB",
"Washington Nationals",
"D.C. Sports Bog",
"Golf",
"Soccer",
"NHL",
"Fantasy Sports",
"Esports",
],
"Tablet": ["Tablet"],
"Technology": [
"Technology",
"Technology_Personal Tech",
"Technology_Special Reports_Satellite Radio",
"Tech Policy",
"Innovations",
],
"Topics": ["Topics"],
"Transportation": [
"Transportation",
"Metro_Obituaries",
"Metro_Virginia",
"Metro_The District",
"Gridlock",
"Metro_Crime",
"Metro_Maryland",
"Metro_Maryland_Montgomery",
"Future of Transportation",
"Metro_Maryland_Pr. George's",
"Metro",
"Cars",
"Development-Transportation",
],
"U.S. Policy": ["U.S. Policy"],
"Utils": ["Utils", "Express", "Print_A Section", "Print", "Print_Editorial Pages", "Print_Style Print_Weekend"],
"Video Games": ["Video Games", "Video Game News", "Video Gaming"],
"Washington Post Live": [
"Washington Post Live",
"Washington Post Magazine",
"Washington Post PR Blog",
"The Washington Post Magazine",
"Washington Wizards",
"Washington Capitals",
],
"World": ["World", "Foreign", "World_Asia/Pacific", "Europe", "Asia", "Africa"],
}
original_topic2topic = {v: k for k, vs in topic2original_topic.items() for v in vs}
def load_subtopic_summaries():
type2path = {
SubtopicSummaryType.KEY_WORDS: LDA_SUBTOPIC_KEY_WORDS_PATH,
SubtopicSummaryType.KEY_PHRASES: LDA_SUBTOPIC_KEY_PHRASES_PATH,
SubtopicSummaryType.N_WORDS: LDA_SUBTOPIC_N_WORDS_PATH,
SubtopicSummaryType.SENTENCE: LDA_SUBTOPIC_SENTENCE_PATH,
}
return load(type2path[SUBTOPIC_SUMMARY_TYPE])
def remove_file(path):
if os.path.isfile(path):
os.remove(path)
def remove_subtopic_summaries():
remove_file(LDA_SUBTOPIC_KEY_WORDS_PATH)
remove_file(LDA_SUBTOPIC_KEY_PHRASES_PATH)
remove_file(LDA_SUBTOPIC_N_WORDS_PATH)
remove_file(LDA_SUBTOPIC_SENTENCE_PATH)
def check_parameters(parameters):
if (
parameters is not None
and parameters["max_subtopic_num"] == MAX_SUBTOPICS_NUM
and parameters["n_base_unigrams"] == N_BASE_UNIGRAMS
and parameters["n_parameter"] == N_PARAMETER
):
return parameters
return None
def get_parameters():
parameters = {"max_subtopic_num": MAX_SUBTOPICS_NUM, "n_base_unigrams": N_BASE_UNIGRAMS, "n_parameter": N_PARAMETER}
return parameters
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos="v"))
def paired(text):
if len(text) > 1:
text = [t1 + " " + t2 for t1, t2 in zip(text[:-1], text[1:])]
return text
def preprocess(word):
result = [
lemmatize_stemming(token)
for token in simple_preprocess(word, min_len=4)
if token not in gensim.parsing.preprocessing.STOPWORDS
]
result = result[0] if result else ""
return result
| 1,434 | 0 | 184 |
b85c5ac31e81accba9f9068b7d0c87bc1933a1bc | 796 | py | Python | examples/custom_algorithms/RandomSignalSelector.py | LuisCerdenoMota/SHERLOCK | 5fb52795d3ab44e27bc7dbc6f2c2e6c214995ba1 | [
"MIT"
] | 20 | 2020-09-25T13:18:46.000Z | 2022-03-09T14:01:03.000Z | examples/custom_algorithms/RandomSignalSelector.py | anthuil/SHERLOCK | e768a6375ded6c3ba1d07784afc2682e95228d3a | [
"MIT"
] | 74 | 2020-09-22T12:19:28.000Z | 2022-01-12T13:53:35.000Z | examples/custom_algorithms/RandomSignalSelector.py | anthuil/SHERLOCK | e768a6375ded6c3ba1d07784afc2682e95228d3a | [
"MIT"
] | 5 | 2020-10-19T10:01:05.000Z | 2021-12-16T10:23:24.000Z | import random
from sherlockpipe.scoring.SignalSelector import SignalSelector, SignalSelection
| 44.222222 | 99 | 0.719849 | import random
from sherlockpipe.scoring.SignalSelector import SignalSelector, SignalSelection
class RandomSignalSelector(SignalSelector):
def __init__(self):
super().__init__()
def select(self, transit_results, snr_min, detrend_method, wl):
best_signal_snr_index = random.randrange(0, len(transit_results) - 1, 1)
best_signal = transit_results[best_signal_snr_index]
best_signal_snr = best_signal.snr
if best_signal_snr > snr_min: # and SDE[a] > SDE_min and FAP[a] < FAP_max):
best_signal_score = 1
else:
best_signal_score = 0
# You could also extend SignalSelector class to provide more information about your choice.
return SignalSelection(best_signal_score, best_signal_snr_index, best_signal) | 604 | 22 | 76 |
43d1223f1a74a2aa5e5e638344493b7690b598af | 810 | py | Python | prediction/models/NERMicroservice/config.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | null | null | null | prediction/models/NERMicroservice/config.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | 32 | 2021-03-17T13:17:22.000Z | 2021-05-04T14:25:31.000Z | prediction/models/NERMicroservice/config.py | UMass-Rescue/CombinedTechStack | b3447b174d9798f3baf9bf6509b4cc14a5bd225a | [
"MIT"
] | 1 | 2021-03-24T13:47:44.000Z | 2021-03-24T13:47:44.000Z | # Give a name to describe this model. The name should conform to python variable naming conventions, and should be
# only a single word.
model_name = 'ner_text'
# Tags are used to describe the performance of a model. These simple keywords can help people decide whether your model
# is appropriate to use for their situation. Some examples of tags are 'fast', 'accurate', or 'essential'. You should
# limit the number of tags your model has to only contain a few with relevant information.
model_tags = 'huggingface,ner,text'
# The model type determines what inputs your model will receive. The options are:
# - 'image' : Model receives a file name to an image file, opens it, and creates a prediction
# - 'text' : Model receives a string of text and uses it to create a prediction.
model_type = 'text' | 57.857143 | 119 | 0.759259 | # Give a name to describe this model. The name should conform to python variable naming conventions, and should be
# only a single word.
model_name = 'ner_text'
# Tags are used to describe the performance of a model. These simple keywords can help people decide whether your model
# is appropriate to use for their situation. Some examples of tags are 'fast', 'accurate', or 'essential'. You should
# limit the number of tags your model has to only contain a few with relevant information.
model_tags = 'huggingface,ner,text'
# The model type determines what inputs your model will receive. The options are:
# - 'image' : Model receives a file name to an image file, opens it, and creates a prediction
# - 'text' : Model receives a string of text and uses it to create a prediction.
model_type = 'text' | 0 | 0 | 0 |
657edb8efd3ce356dba16837de7a86adaf1c99a4 | 1,661 | py | Python | seed/seed.py | mattkantor/basic-flask-app | ec893ca44b1c9c4772c24b81394b58644fefd29a | [
"MIT"
] | null | null | null | seed/seed.py | mattkantor/basic-flask-app | ec893ca44b1c9c4772c24b81394b58644fefd29a | [
"MIT"
] | null | null | null | seed/seed.py | mattkantor/basic-flask-app | ec893ca44b1c9c4772c24b81394b58644fefd29a | [
"MIT"
] | null | null | null | import random
import requests
import json
from flask import Flask
from app import create_app, init_app
from app.models import *
from faker import Faker
faker = Faker()
valid_email = "matthewkantor@gmail.com"
valid_password = "password"
valid_user = "mattkantor"
app = create_app()
seed()
| 23.069444 | 123 | 0.562914 | import random
import requests
import json
from flask import Flask
from app import create_app, init_app
from app.models import *
from faker import Faker
faker = Faker()
valid_email = "matthewkantor@gmail.com"
valid_password = "password"
valid_user = "mattkantor"
app = create_app()
def seed():
with app.app_context():
Group.query.delete()
News.query.delete()
users = User.query.all()
for user in users:
db.session.delete(user)
db.session.commit()
matt_user = User(email=valid_email, username=valid_user)
matt_user.set_password(valid_password)
db.session.add(matt_user)
db.session.commit()
for i in range(1,5):
user = User(email=faker.email(), password="password", username=(faker.first_name()+faker.first_name()).lower())
user.set_password(valid_password)
db.session.add(user)
db.session.commit()
uuid = user.uuid
if i % 2 ==0:
matt_user.follow(user)
db.session.commit()
random.seed(10)
with open('seed/news.json') as f:
data = json.load(f)
for j in range(1,3):
news_json = data[random.randint(0, 10)]
news = News(user_id=matt_user.id, title=news_json['title'], url=news_json["url"])
db.session.add(news)
db.session.commit()
for i in range(1,4):
group = Group(name=faker.city(), user_id=user.id)
db.session.add(group)
db.session.commit()
seed()
| 1,334 | 0 | 23 |
890ca65bde6475505e40106fce6464f436c86a5d | 1,124 | py | Python | sitrep.py | wjegbert/SitRep | e718bd2a7502ac8302fcf301b84bc23bcf4b9768 | [
"MIT"
] | 3 | 2019-11-07T04:18:56.000Z | 2019-11-15T17:52:20.000Z | sitrep.py | wjegbert/SitRep | e718bd2a7502ac8302fcf301b84bc23bcf4b9768 | [
"MIT"
] | 1 | 2019-11-09T17:34:12.000Z | 2019-11-09T17:34:12.000Z | sitrep.py | wjegbert/SitRep | e718bd2a7502ac8302fcf301b84bc23bcf4b9768 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 19:56:24 2019
@author: William
"""
import cv2
import winsound
import time
goal = 25
n = 0
cv2.VideoCapture(0).release
HAARPATH = "haarcascade/haarcascade_frontalface_default.xml"
cap=cv2.VideoCapture(0)
face_detect=cv2.CascadeClassifier(HAARPATH)
faces =[]
prevface = []
time.sleep(15) #Gives user 15 seconds to get into position
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if n>=goal:
break
# Our operations on the frame go here
if ret is True:
prevface = faces
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=face_detect.detectMultiScale(gray, 1.3, 7)
if len(faces) > 0 and len(prevface) == 0:
n = n+1
winsound.MessageBeep(winsound.MB_ICONHAND)
print (n)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print ("\a")
cap.release()
cv2.destroyAllWindows()
| 23.416667 | 61 | 0.590747 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 19:56:24 2019
@author: William
"""
import cv2
import winsound
import time
goal = 25
n = 0
cv2.VideoCapture(0).release
HAARPATH = "haarcascade/haarcascade_frontalface_default.xml"
cap=cv2.VideoCapture(0)
face_detect=cv2.CascadeClassifier(HAARPATH)
faces =[]
prevface = []
time.sleep(15) #Gives user 15 seconds to get into position
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if n>=goal:
break
# Our operations on the frame go here
if ret is True:
prevface = faces
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces=face_detect.detectMultiScale(gray, 1.3, 7)
if len(faces) > 0 and len(prevface) == 0:
n = n+1
winsound.MessageBeep(winsound.MB_ICONHAND)
print (n)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print ("\a")
cap.release()
cv2.destroyAllWindows()
| 0 | 0 | 0 |
d7c3b23dce5ccf76f3434a372f1c6f86b868906f | 559 | py | Python | mliot/mliot/status_messages.py | BrMorrison/EVT_Brickhack18 | e31035187f85ad18e9a71ebf62acd3b5ca40d3e4 | [
"MIT"
] | null | null | null | mliot/mliot/status_messages.py | BrMorrison/EVT_Brickhack18 | e31035187f85ad18e9a71ebf62acd3b5ca40d3e4 | [
"MIT"
] | null | null | null | mliot/mliot/status_messages.py | BrMorrison/EVT_Brickhack18 | e31035187f85ad18e9a71ebf62acd3b5ca40d3e4 | [
"MIT"
] | null | null | null | MESSAGE_DICT = {
'17':"Feed me!",
'33':"Things could be going better. :-(",
'65':"Do I want to do work: NOP. :-/",
'18':"This isn't the largest program I have every stored.",
'34':"I am happy!",
'66':"My processor is idling! :p",
'20':"Instructions! OM, NOP, NOP, NOP!",
'36':"I am very happy!",
'68':"I have a lot of energy! Electricity!!!",
'24':"Ooh, are all these instructions for me?",
'40':"I am ecstatic today! Brick hack is so exciting. :-)",
'72':"I feel like my processor speed increased tenfold!",
}
| 37.266667 | 63 | 0.581395 | MESSAGE_DICT = {
'17':"Feed me!",
'33':"Things could be going better. :-(",
'65':"Do I want to do work: NOP. :-/",
'18':"This isn't the largest program I have every stored.",
'34':"I am happy!",
'66':"My processor is idling! :p",
'20':"Instructions! OM, NOP, NOP, NOP!",
'36':"I am very happy!",
'68':"I have a lot of energy! Electricity!!!",
'24':"Ooh, are all these instructions for me?",
'40':"I am ecstatic today! Brick hack is so exciting. :-)",
'72':"I feel like my processor speed increased tenfold!",
}
| 0 | 0 | 0 |
5f53572176d76f6c8d0efe985afbcb0047b52b80 | 2,705 | py | Python | hub/api/views/hub.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | null | null | null | hub/api/views/hub.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | 2 | 2020-06-05T19:41:09.000Z | 2021-06-10T21:07:30.000Z | hub/api/views/hub.py | harenlewis/api-hub | f79cd8b82e95c039269765a4542866286803a322 | [
"MIT"
] | null | null | null | import json
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
from django.db.transaction import atomic
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
from hub.models.types import METHOD_TYPES_DICT, RESP_TYPES_DICT, JSON
from hub.models import Project, Api, APIPermissions
from hub.api.serializers import ProjectSerializer
from hub.api.pagination import StandardResultsPagination
@permission_classes((IsAuthenticated,))
@api_view(['GET', 'POST', 'PUT', 'DELETE'])
| 39.202899 | 131 | 0.701664 | import json
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
from django.db.transaction import atomic
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
from hub.models.types import METHOD_TYPES_DICT, RESP_TYPES_DICT, JSON
from hub.models import Project, Api, APIPermissions
from hub.api.serializers import ProjectSerializer
from hub.api.pagination import StandardResultsPagination
@permission_classes((IsAuthenticated,))
@api_view(['GET', 'POST', 'PUT', 'DELETE'])
def api_hub_view(request, *args, **kwargs):
error = {'errorMsg': ''}
path = kwargs.get('path', '').lstrip('/').rstrip('/')
project_uuid = request.subdomain
user = request.user
req_method = request.method
if path == '' or project_uuid == '':
error['errorMsg'] = 'Not a valid url'
return Response(error, status=status.HTTP_400_BAD_REQUEST)
try:
project = Project.objects.get(uuid=project_uuid)
except Project.DoesNotExist:
error['errorMsg'] = 'Project or API does not exists.'
return Response(error, status=status.HTTP_400_BAD_REQUEST)
except ValidationError:
error['errorMsg'] = 'Project or API does not exists.'
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# check if permission given by admin.
has_proj_perm = (APIPermissions
.objects
.filter(user_id=user.id, project_id=project.id)
.exists()
)
# check if user has permission on the project to add api's
if not (user.id == project.created_by.id or has_proj_perm):
error['errorMsg'] = 'No permission on this project'
return Response(error, status=status.HTTP_403_FORBIDDEN)
method_val = METHOD_TYPES_DICT.get(req_method, None)
if method_val is None:
error['errorMsg'] = 'Not a valid method.'
return Response(error, status=status.HTTP_400_BAD_REQUEST)
api_qs = (Api
.objects
.filter(project_id=project.id, path=path, method=method_val)
)
if not api_qs.exists():
error['errorMsg'] = "We were unable to find any matching requests for this method type and the mock path in your projects."
return Response(error, status=status.HTTP_400_BAD_REQUEST)
api = api_qs[0]
content_type = RESP_TYPES_DICT.get(api.get_res_type_display(), None)
return Response(api.res_body, content_type=content_type, status=status.HTTP_200_OK)
| 1,972 | 0 | 22 |
2380a187690c8d48e3cde402f44c488638a00fc8 | 26,135 | py | Python | flightdeck/jetpack/models.py | majacQ/FlightDeck | 4ed2e630024374e1ac9f4a8ced8acd0ed7883d76 | [
"MIT"
] | 1 | 2019-12-29T05:58:53.000Z | 2019-12-29T05:58:53.000Z | flightdeck/jetpack/models.py | majacQ/FlightDeck | 4ed2e630024374e1ac9f4a8ced8acd0ed7883d76 | [
"MIT"
] | null | null | null | flightdeck/jetpack/models.py | majacQ/FlightDeck | 4ed2e630024374e1ac9f4a8ced8acd0ed7883d76 | [
"MIT"
] | 2 | 2016-01-16T13:59:52.000Z | 2021-04-25T15:38:26.000Z | import os
import csv
import shutil
from copy import deepcopy
from exceptions import TypeError
from django.db.models.signals import pre_save, post_save
from django.db import models
from django.utils import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from jetpack import settings
from jetpack.managers import PackageManager
from jetpack.errors import SelfDependencyException, FilenameExistException, \
UpdateDeniedException, AddingModuleDenied, AddingAttachmentDenied
from jetpack.xpi_utils import sdk_copy, xpi_build, xpi_remove
PERMISSION_CHOICES = (
(0, 'private'),
(1, 'view'),
(2, 'do not copy'),
(3, 'edit')
)
TYPE_CHOICES = (
('l', 'Library'),
('a', 'Add-on')
)
class Package(models.Model):
"""
Holds the meta data shared across all PackageRevisions
"""
# identification
# it can be the same as database id, but if we want to copy the database
# some day or change to a document-oriented database it would be bad
# to have this relied on any database model
id_number = models.CharField(max_length=255, unique=True, blank=True)
# name of the Package
full_name = models.CharField(max_length=255)
# made from the full_name
# it is used to create Package directory for export
name = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
# type - determining ability to specific options
type = models.CharField(max_length=30, choices=TYPE_CHOICES)
# author is the first person who created the Package
author = models.ForeignKey(User, related_name='packages_originated')
# is the Package visible for public?
public_permission = models.IntegerField(
choices=PERMISSION_CHOICES,
default=1, blank=True)
# url for the Manifest
url = models.URLField(verify_exists=False, blank=True, default='')
# license on which this package is rekeased to the public
license = models.CharField(max_length=255, blank=True, default='')
# where to export modules
lib_dir = models.CharField(max_length=100, blank=True, null=True)
# this is set in the PackageRevision.set_version
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
version = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated')
latest = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated2')
private_key = models.TextField(blank=True, null=True)
public_key = models.TextField(blank=True, null=True)
jid = models.CharField(max_length=255, blank=True, null=True)
program_id = models.CharField(max_length=255, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
objects = PackageManager()
##################
# Methods
def create_id_number(self):
"""
get the highest id number and increment it
"""
all_packages = Package.objects.all().order_by('-id_number')
return str(int(all_packages[0].id_number) + 1) if all_packages else str(settings.MINIMUM_PACKAGE_ID)
def generate_key(self):
"""
create keypair, program_id and jid
"""
from ecdsa import SigningKey, NIST256p
from cuddlefish.preflight import vk_to_jid, jid_to_programid, my_b32encode
sk = SigningKey.generate(curve=NIST256p)
sk_text = "private-jid0-%s" % my_b32encode(sk.to_string())
vk = sk.get_verifying_key()
vk_text = "public-jid0-%s" % my_b32encode(vk.to_string())
self.jid = vk_to_jid(vk)
self.program_id = jid_to_programid(self.jid)
self.private_key = sk_text
self.public_key = vk_text
def make_dir(self, packages_dir):
"""
create package directories inside packages
return package directory name
"""
package_dir = '%s/%s' % (packages_dir, self.get_unique_package_name())
os.mkdir(package_dir)
os.mkdir('%s/%s' % (package_dir, self.get_lib_dir()))
if not os.path.isdir('%s/%s' % (package_dir, self.get_data_dir())):
os.mkdir('%s/%s' % (package_dir, self.get_data_dir()))
return package_dir
def copy(self, author):
"""
create copy of the package
"""
new_p = Package(
full_name=self.get_copied_full_name(),
description=self.description,
type=self.type,
author=author,
public_permission=self.public_permission,
url=self.url,
license=self.license,
lib_dir=self.lib_dir
)
new_p.save()
return new_p
class PackageRevision(models.Model):
"""
contains data which may be changed and rolled back
"""
package = models.ForeignKey(Package, related_name='revisions')
# public version name
# this is a tag used to mark important revisions
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
# this makes the revision unique across the same package/user
revision_number = models.IntegerField(blank=True, default=0)
# commit message
message = models.TextField(blank=True)
# Libraries on which current package depends
dependencies = models.ManyToManyField('self', blank=True, null=True,
symmetrical=False)
# from which revision this mutation was originated
origin = models.ForeignKey('PackageRevision', related_name='mutations',
blank=True, null=True)
# person who owns this revision
author = models.ForeignKey(User, related_name='package_revisions')
created_at = models.DateTimeField(auto_now_add=True)
#contributors for Manifest
contributors = models.CharField(max_length=255, blank=True, default='')
# main for the Manifest
module_main = models.CharField(max_length=100, blank=True, default='main')
######################
# Manifest
def get_full_description(self):
" return joined description "
description = self.package.description
if self.message:
description = "%s\n%s" % (description, self.message)
return description
def get_full_rendered_description(self):
" return description prepared for rendering "
return "<p>%s</p>" % self.get_full_description().replace("\n","<br/>")
def get_main_module(self):
" return executable Module for Add-ons "
if type == 'l': return None
# find main module
main = self.modules.filter(filename=self.module_main)
if not main:
raise Exception('Every Add-on needs to be linked with an executable Module')
return main[0]
######################
# revision save methods
def save(self, **kwargs):
"""
overloading save is needed to prevent from updating the same revision
use super(PackageRevision, self).save(**kwargs) if needed
"""
if self.id:
# create new revision
return self.save_new_revision(**kwargs)
return super(PackageRevision, self).save(**kwargs)
def save_new_revision(self, package=None, **kwargs):
" save self as new revision with link to the origin. "
origin = deepcopy(self)
if package:
self.package = package
self.author = package.author
self.id = None
self.version_name = None
self.origin = origin
self.revision_number = self.get_next_revision_number()
save_return = super(PackageRevision, self).save(**kwargs)
# reassign all dependencies
for dep in origin.dependencies.all():
self.dependencies.add(dep)
for mod in origin.modules.all():
self.modules.add(mod)
for att in origin.attachments.all():
self.attachments.add(att)
self.package.latest = self
self.package.save()
if package:
self.set_version('copy')
return save_return
def get_next_revision_number(self):
"""
find latest revision_number for the self.package and self.user
@return latest revisiion number or 1
"""
revision_numbers = PackageRevision.objects.filter(
author__username=self.author.username,
package__id_number=self.package.id_number
).order_by('-revision_number')
return revision_numbers[0].revision_number + 1 if revision_numbers else 1
def set_version(self, version_name, current=True):
"""
@param String version_name: name of the version
@param Boolean current: should the version become a current one
@returns result of save revision
Set the version_name
update the PackageRevision obeying the overload save
Set current Package:version_name and Package:version if current
"""
# check if there isn't a version with such a name
revisions = PackageRevision.objects.filter(package__pk=self.package.pk)
for revision in revisions:
if revision.version_name == version_name:
version_name = ''
#raise Exception("There is already a revision with that name")
self.version_name = version_name
if current:
self.package.version_name = version_name
self.package.version = self
self.package.save()
return super(PackageRevision, self).save()
def module_create(self, **kwargs):
" create module and add to modules "
# validate if given filename is valid
if not self.validate_module_filename(kwargs['filename']):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % kwargs['filename']
)
mod = Module.objects.create(**kwargs)
self.module_add(mod)
return mod
def module_add(self, mod):
" copy to new revision, add module "
# save as new version
# validate if given filename is valid
if not self.validate_module_filename(mod.filename):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % mod.filename
)
"""
I think it's not necessary
TODO: check integration
for rev in mod.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingModuleDenied('this module is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.modules.add(mod)
def module_remove(self, mod):
" copy to new revision, remove module "
# save as new version
self.save()
return self.modules.remove(mod)
def module_update(self, mod):
" to update a module, new package revision has to be created "
self.save()
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def modules_update(self, modules):
" update more than one module "
self.save()
for mod in modules:
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def attachment_create(self, **kwargs):
" create attachment and add to attachments "
# validate if given filename is valid
if not self.validate_attachment_filename(kwargs['filename'], kwargs['ext']):
raise FilenameExistException(
'Sorry, there is already an attachment in your add-on with the name "%s.%s". Each attachment in your add-on needs to have a unique name.' % (
kwargs['filename'], kwargs['ext']
)
)
att = Attachment.objects.create(**kwargs)
self.attachment_add(att)
return att
def attachment_add(self, att):
" copy to new revision, add attachment "
# save as new version
# validate if given filename is valid
if not self.validate_attachment_filename(att.filename, att.ext):
raise FilenameExistException(
'Attachment with filename %s.%s already exists' % (att.filename, att.ext)
)
"""
for rev in att.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingAttachmentDenied('this attachment is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.attachments.add(att)
def attachment_remove(self, dep):
" copy to new revision, remove attachment "
# save as new version
self.save()
return self.attachments.remove(dep)
def dependency_add(self, dep):
" copy to new revision, add dependency (existing Library - PackageVersion) "
# a PackageRevision has to depend on the LibraryRevision only
if dep.package.type != 'l':
raise TypeError('Dependency has to be a Library')
# a LibraryRevision can't depend on another LibraryRevision linked with the same
# Library
if dep.package.id_number == self.package.id_number:
raise SelfDependencyException('A Library can not depend on itself!')
# dependency have to be unique in the PackageRevision
deps = self.dependencies.all()
for d in deps:
if d.package.pk == dep.package.pk:
raise Exception('Your add-on is already using "%s" by %s.' % (dep.package.full_name, dep.package.author.get_profile()));
# save as new version
self.save()
return self.dependencies.add(dep)
def dependency_remove(self, dep):
" copy to new revision, remove dependency "
# save as new version
self.save()
return self.dependencies.remove(dep)
def dependency_remove_by_id_number(self, id_number):
" find dependency by its id_number call dependency_remove "
for dep in self.dependencies.all():
if dep.package.id_number == id_number:
self.dependency_remove(dep)
return True
raise Exception('There is no such library in this %s' % self.package.get_type_name())
def build_xpi(self):
" prepare and build XPI "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
self.export_files_with_dependencies('%s/packages' % sdk_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def build_xpi_test(self, modules):
" prepare and build XPI for test only (unsaved modules) "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
packages_dir = '%s/packages' % sdk_dir
package_dir = self.package.make_dir(packages_dir)
self.export_manifest(package_dir)
# instead of export modules
lib_dir = '%s/%s' % (package_dir, self.package.get_lib_dir())
for mod in self.modules.all():
mod_edited = False
for e_mod in modules:
if e_mod.pk == mod.pk:
mod_edited = True
e_mod.export_code(lib_dir)
if not mod_edited:
mod.export_code(lib_dir)
self.export_attachments('%s/%s' % (package_dir, self.package.get_data_dir()))
self.export_dependencies(packages_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def export_keys(self, sdk_dir):
" export private and public keys "
keydir = '%s/%s' % (sdk_dir, settings.KEYDIR)
if not os.path.isdir(keydir):
os.mkdir(keydir)
handle = open('%s/%s' % (keydir, self.package.jid), 'w')
handle.write('private-key:%s\n' % self.package.private_key)
handle.write('public-key:%s' % self.package.public_key)
handle.close()
class Module(models.Model):
" the only way to 'change' the module is to assign it to different PackageRequest "
revisions = models.ManyToManyField(PackageRevision,
related_name='modules', blank=True)
# name of the Module - it will be used as javascript file name
filename = models.CharField(max_length=255)
# Code of the module
code = models.TextField(blank=True)
# user who has written current revision of the module
author = models.ForeignKey(User, related_name='module_revisions')
#################################################################################
## Catching Signals
pre_save.connect(set_package_id_number, sender=Package)
pre_save.connect(make_name, sender=Package)
pre_save.connect(make_keypair_on_create, sender=Package)
def save_first_revision(instance, **kwargs):
"""
Create first PackageRevision
"""
if kwargs.get('raw', False): return
# only for the new Package
if not kwargs.get('created', False): return
revision = PackageRevision(package=instance, author=instance.author)
revision.save()
instance.version = revision
instance.latest = revision
if instance.is_addon():
mod = Module.objects.create(
filename=revision.module_main,
author=instance.author,
code="""// This is an active module of the %s Add-on
exports.main = function() {};""" % instance.full_name
)
revision.modules.add(mod)
instance.save()
post_save.connect(save_first_revision, sender=Package)
| 30.603044 | 150 | 0.723666 | import os
import csv
import shutil
from copy import deepcopy
from exceptions import TypeError
from django.db.models.signals import pre_save, post_save
from django.db import models
from django.utils import simplejson
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from jetpack import settings
from jetpack.managers import PackageManager
from jetpack.errors import SelfDependencyException, FilenameExistException, \
UpdateDeniedException, AddingModuleDenied, AddingAttachmentDenied
from jetpack.xpi_utils import sdk_copy, xpi_build, xpi_remove
PERMISSION_CHOICES = (
(0, 'private'),
(1, 'view'),
(2, 'do not copy'),
(3, 'edit')
)
TYPE_CHOICES = (
('l', 'Library'),
('a', 'Add-on')
)
class Package(models.Model):
"""
Holds the meta data shared across all PackageRevisions
"""
# identification
# it can be the same as database id, but if we want to copy the database
# some day or change to a document-oriented database it would be bad
# to have this relied on any database model
id_number = models.CharField(max_length=255, unique=True, blank=True)
# name of the Package
full_name = models.CharField(max_length=255)
# made from the full_name
# it is used to create Package directory for export
name = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
# type - determining ability to specific options
type = models.CharField(max_length=30, choices=TYPE_CHOICES)
# author is the first person who created the Package
author = models.ForeignKey(User, related_name='packages_originated')
# is the Package visible for public?
public_permission = models.IntegerField(
choices=PERMISSION_CHOICES,
default=1, blank=True)
# url for the Manifest
url = models.URLField(verify_exists=False, blank=True, default='')
# license on which this package is rekeased to the public
license = models.CharField(max_length=255, blank=True, default='')
# where to export modules
lib_dir = models.CharField(max_length=100, blank=True, null=True)
# this is set in the PackageRevision.set_version
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
version = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated')
latest = models.ForeignKey('PackageRevision', blank=True, null=True, related_name='package_deprecated2')
private_key = models.TextField(blank=True, null=True)
public_key = models.TextField(blank=True, null=True)
jid = models.CharField(max_length=255, blank=True, null=True)
program_id = models.CharField(max_length=255, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-last_update','-created_at')
objects = PackageManager()
##################
# Methods
def __unicode__(self):
return '%s v. %s by %s' % (self.full_name, self.version_name, self.author)
def get_absolute_url(self):
return reverse('jp_%s_details' % self.get_type_name(),
args=[self.id_number])
def get_latest_url(self):
return reverse('jp_%s_latest' % self.get_type_name(),
args=[self.id_number])
def get_edit_latest_url(self):
return reverse('jp_%s_edit_latest' % self.get_type_name(),
args=[self.id_number])
def is_addon(self):
return self.type == 'a'
def is_library(self):
return self.type == 'l'
def get_type_name(self):
return settings.PACKAGE_SINGULAR_NAMES[self.type]
def get_lib_dir(self):
return self.lib_dir or settings.DEFAULT_LIB_DIR
def get_data_dir(self):
return settings.DEFAULT_DATA_DIR
def get_unique_package_name(self):
return "%s-%s" % (self.name, self.id_number)
def set_name(self):
self.name = self.make_name()
def make_name(self):
return slugify(self.full_name)
def create_id_number(self):
"""
get the highest id number and increment it
"""
all_packages = Package.objects.all().order_by('-id_number')
return str(int(all_packages[0].id_number) + 1) if all_packages else str(settings.MINIMUM_PACKAGE_ID)
def generate_key(self):
"""
create keypair, program_id and jid
"""
from ecdsa import SigningKey, NIST256p
from cuddlefish.preflight import vk_to_jid, jid_to_programid, my_b32encode
sk = SigningKey.generate(curve=NIST256p)
sk_text = "private-jid0-%s" % my_b32encode(sk.to_string())
vk = sk.get_verifying_key()
vk_text = "public-jid0-%s" % my_b32encode(vk.to_string())
self.jid = vk_to_jid(vk)
self.program_id = jid_to_programid(self.jid)
self.private_key = sk_text
self.public_key = vk_text
def make_dir(self, packages_dir):
"""
create package directories inside packages
return package directory name
"""
package_dir = '%s/%s' % (packages_dir, self.get_unique_package_name())
os.mkdir(package_dir)
os.mkdir('%s/%s' % (package_dir, self.get_lib_dir()))
if not os.path.isdir('%s/%s' % (package_dir, self.get_data_dir())):
os.mkdir('%s/%s' % (package_dir, self.get_data_dir()))
return package_dir
def get_copied_full_name(self):
full_name = self.full_name
if not full_name.startswith('Copy of'):
full_name = "Copy of %s" % full_name
return full_name
def copy(self, author):
"""
create copy of the package
"""
new_p = Package(
full_name=self.get_copied_full_name(),
description=self.description,
type=self.type,
author=author,
public_permission=self.public_permission,
url=self.url,
license=self.license,
lib_dir=self.lib_dir
)
new_p.save()
return new_p
class PackageRevision(models.Model):
"""
contains data which may be changed and rolled back
"""
package = models.ForeignKey(Package, related_name='revisions')
# public version name
# this is a tag used to mark important revisions
version_name = models.CharField(max_length=250, blank=True, null=True,
default=settings.INITIAL_VERSION_NAME)
# this makes the revision unique across the same package/user
revision_number = models.IntegerField(blank=True, default=0)
# commit message
message = models.TextField(blank=True)
# Libraries on which current package depends
dependencies = models.ManyToManyField('self', blank=True, null=True,
symmetrical=False)
# from which revision this mutation was originated
origin = models.ForeignKey('PackageRevision', related_name='mutations',
blank=True, null=True)
# person who owns this revision
author = models.ForeignKey(User, related_name='package_revisions')
created_at = models.DateTimeField(auto_now_add=True)
#contributors for Manifest
contributors = models.CharField(max_length=255, blank=True, default='')
# main for the Manifest
module_main = models.CharField(max_length=100, blank=True, default='main')
class Meta:
ordering = ('-revision_number',)
unique_together = ('package', 'author', 'revision_number')
def __unicode__(self):
version = 'v. %s ' % self.version_name if self.version_name else ''
return '%s - %s %sr. %d by %s' % (
settings.PACKAGE_SINGULAR_NAMES[self.package.type],
self.package.full_name, version,
self.revision_number, self.author.get_profile()
)
def get_absolute_url(self):
if self.version_name:
if self.package.version.revision_number == self.revision_number:
return self.package.get_absolute_url()
return reverse(
'jp_%s_version_details' % settings.PACKAGE_SINGULAR_NAMES[self.package.type],
args=[self.package.id_number, self.version_name])
return reverse(
'jp_%s_revision_details' % settings.PACKAGE_SINGULAR_NAMES[self.package.type],
args=[self.package.id_number, self.revision_number])
def get_edit_url(self):
return reverse(
'jp_%s_revision_edit' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_save_url(self):
return reverse(
'jp_%s_revision_save' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_add_module_url(self):
return reverse(
'jp_%s_revision_add_module' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_remove_module_url(self):
return reverse(
'jp_%s_revision_remove_module' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_add_attachment_url(self):
return reverse(
'jp_%s_revision_add_attachment' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_remove_attachment_url(self):
return reverse(
'jp_%s_revision_remove_attachment' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_assign_library_url(self):
return reverse(
'jp_%s_revision_assign_library' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_remove_library_url(self):
return reverse(
'jp_%s_revision_remove_library' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
def get_test_xpi_url(self):
if self.package.type != 'a':
raise Exception('XPI might be created only from an Add-on')
return reverse(
'jp_addon_revision_test', args=[self.package.id_number, self.revision_number])
def get_download_xpi_url(self):
if self.package.type != 'a':
raise Exception('XPI might be created only from an Add-on')
return reverse(
'jp_addon_revision_xpi', args=[self.package.id_number, self.revision_number])
def get_copy_url(self):
return reverse(
'jp_%s_revision_copy' % self.package.get_type_name(),
args=[self.package.id_number, self.revision_number])
######################
# Manifest
def get_contributors_list(self):
csv_r = csv.reader([self.contributors], skipinitialspace=True)
for c in csv_r:
return c
def get_dependencies_list(self):
deps = ['jetpack-core']
deps.extend([dep.package.get_unique_package_name() for dep in self.dependencies.all()])
return deps
def get_full_description(self):
" return joined description "
description = self.package.description
if self.message:
description = "%s\n%s" % (description, self.message)
return description
def get_full_rendered_description(self):
" return description prepared for rendering "
return "<p>%s</p>" % self.get_full_description().replace("\n","<br/>")
def get_manifest(self, test_in_browser=False):
if self.version_name:
version = self.version_name
else:
version = "%s rev. %d" % (self.package.version_name, self.revision_number)
if test_in_browser:
version = "%s - test" % version
name = self.package.name if self.package.is_addon() else self.package.get_unique_package_name()
manifest = {
'fullName': self.package.full_name,
'name': name,
'description': self.get_full_description(),
'author': self.package.author.username,
'id': self.package.jid if self.package.is_addon() else self.package.id_number,
'version': version,
'dependencies': self.get_dependencies_list(),
'license': self.package.license,
'url': str(self.package.url),
'contributors': self.get_contributors_list(),
'lib': self.package.get_lib_dir()
}
if self.package.is_addon():
manifest['main'] = self.module_main
return manifest
def get_manifest_json(self, **kwargs):
return simplejson.dumps(self.get_manifest(**kwargs))
def get_main_module(self):
" return executable Module for Add-ons "
if type == 'l': return None
# find main module
main = self.modules.filter(filename=self.module_main)
if not main:
raise Exception('Every Add-on needs to be linked with an executable Module')
return main[0]
######################
# revision save methods
def save(self, **kwargs):
"""
overloading save is needed to prevent from updating the same revision
use super(PackageRevision, self).save(**kwargs) if needed
"""
if self.id:
# create new revision
return self.save_new_revision(**kwargs)
return super(PackageRevision, self).save(**kwargs)
def save_new_revision(self, package=None, **kwargs):
" save self as new revision with link to the origin. "
origin = deepcopy(self)
if package:
self.package = package
self.author = package.author
self.id = None
self.version_name = None
self.origin = origin
self.revision_number = self.get_next_revision_number()
save_return = super(PackageRevision, self).save(**kwargs)
# reassign all dependencies
for dep in origin.dependencies.all():
self.dependencies.add(dep)
for mod in origin.modules.all():
self.modules.add(mod)
for att in origin.attachments.all():
self.attachments.add(att)
self.package.latest = self
self.package.save()
if package:
self.set_version('copy')
return save_return
def get_next_revision_number(self):
"""
find latest revision_number for the self.package and self.user
@return latest revisiion number or 1
"""
revision_numbers = PackageRevision.objects.filter(
author__username=self.author.username,
package__id_number=self.package.id_number
).order_by('-revision_number')
return revision_numbers[0].revision_number + 1 if revision_numbers else 1
def set_version(self, version_name, current=True):
"""
@param String version_name: name of the version
@param Boolean current: should the version become a current one
@returns result of save revision
Set the version_name
update the PackageRevision obeying the overload save
Set current Package:version_name and Package:version if current
"""
# check if there isn't a version with such a name
revisions = PackageRevision.objects.filter(package__pk=self.package.pk)
for revision in revisions:
if revision.version_name == version_name:
version_name = ''
#raise Exception("There is already a revision with that name")
self.version_name = version_name
if current:
self.package.version_name = version_name
self.package.version = self
self.package.save()
return super(PackageRevision, self).save()
def validate_module_filename(self, filename):
for mod in self.modules.all():
if mod.filename == filename:
return False
return True
def validate_attachment_filename(self, filename, ext):
for mod in self.attachments.all():
if mod.filename == filename and mod.ext == ext:
return False
return True
def module_create(self, **kwargs):
" create module and add to modules "
# validate if given filename is valid
if not self.validate_module_filename(kwargs['filename']):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % kwargs['filename']
)
mod = Module.objects.create(**kwargs)
self.module_add(mod)
return mod
def module_add(self, mod):
" copy to new revision, add module "
# save as new version
# validate if given filename is valid
if not self.validate_module_filename(mod.filename):
raise FilenameExistException(
'Sorry, there is already a module in your add-on with the name "%s". Each module in your add-on needs to have a unique name.' % mod.filename
)
"""
I think it's not necessary
TODO: check integration
for rev in mod.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingModuleDenied('this module is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.modules.add(mod)
def module_remove(self, mod):
" copy to new revision, remove module "
# save as new version
self.save()
return self.modules.remove(mod)
def module_update(self, mod):
" to update a module, new package revision has to be created "
self.save()
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def modules_update(self, modules):
" update more than one module "
self.save()
for mod in modules:
self.modules.remove(mod)
mod.id = None
mod.save()
self.modules.add(mod)
def attachment_create(self, **kwargs):
" create attachment and add to attachments "
# validate if given filename is valid
if not self.validate_attachment_filename(kwargs['filename'], kwargs['ext']):
raise FilenameExistException(
'Sorry, there is already an attachment in your add-on with the name "%s.%s". Each attachment in your add-on needs to have a unique name.' % (
kwargs['filename'], kwargs['ext']
)
)
att = Attachment.objects.create(**kwargs)
self.attachment_add(att)
return att
def attachment_add(self, att):
" copy to new revision, add attachment "
# save as new version
# validate if given filename is valid
if not self.validate_attachment_filename(att.filename, att.ext):
raise FilenameExistException(
'Attachment with filename %s.%s already exists' % (att.filename, att.ext)
)
"""
for rev in att.revisions.all():
if rev.package.id_number != self.package.id_number:
raise AddingAttachmentDenied('this attachment is already assigned to other Library - %s' % rev.package.get_unique_package_name())
"""
self.save()
return self.attachments.add(att)
def attachment_remove(self, dep):
" copy to new revision, remove attachment "
# save as new version
self.save()
return self.attachments.remove(dep)
def dependency_add(self, dep):
" copy to new revision, add dependency (existing Library - PackageVersion) "
# a PackageRevision has to depend on the LibraryRevision only
if dep.package.type != 'l':
raise TypeError('Dependency has to be a Library')
# a LibraryRevision can't depend on another LibraryRevision linked with the same
# Library
if dep.package.id_number == self.package.id_number:
raise SelfDependencyException('A Library can not depend on itself!')
# dependency have to be unique in the PackageRevision
deps = self.dependencies.all()
for d in deps:
if d.package.pk == dep.package.pk:
raise Exception('Your add-on is already using "%s" by %s.' % (dep.package.full_name, dep.package.author.get_profile()));
# save as new version
self.save()
return self.dependencies.add(dep)
def dependency_remove(self, dep):
" copy to new revision, remove dependency "
# save as new version
self.save()
return self.dependencies.remove(dep)
def dependency_remove_by_id_number(self, id_number):
" find dependency by its id_number call dependency_remove "
for dep in self.dependencies.all():
if dep.package.id_number == id_number:
self.dependency_remove(dep)
return True
raise Exception('There is no such library in this %s' % self.package.get_type_name())
def get_dependencies_list_json(self):
l = [{
'full_name': d.package.full_name,
'view_url': d.get_absolute_url(),
'edit_url': d.get_edit_url()
} for d in self.dependencies.all()
] if len(self.dependencies.all()) > 0 else []
return simplejson.dumps(l)
def get_modules_list_json(self):
l = [{
'filename': m.filename,
'author': m.author.username,
'executable': self.module_main == m.filename
} for m in self.modules.all()
] if len(self.modules.all()) > 0 else []
return simplejson.dumps(l)
def get_sdk_name(self):
return '%s-%s' % (self.package.id_number, self.revision_number)
def get_sdk_dir(self):
return '%s-%s' % (settings.SDKDIR_PREFIX, self.get_sdk_name())
def build_xpi(self):
" prepare and build XPI "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
self.export_files_with_dependencies('%s/packages' % sdk_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def build_xpi_test(self, modules):
" prepare and build XPI for test only (unsaved modules) "
if self.package.type == 'l':
raise Exception('only Add-ons may build a XPI')
sdk_dir = self.get_sdk_dir()
# TODO: consider SDK staying per PackageRevision...
if os.path.isdir(sdk_dir):
xpi_remove(sdk_dir)
sdk_copy(sdk_dir)
self.export_keys(sdk_dir)
packages_dir = '%s/packages' % sdk_dir
package_dir = self.package.make_dir(packages_dir)
self.export_manifest(package_dir)
# instead of export modules
lib_dir = '%s/%s' % (package_dir, self.package.get_lib_dir())
for mod in self.modules.all():
mod_edited = False
for e_mod in modules:
if e_mod.pk == mod.pk:
mod_edited = True
e_mod.export_code(lib_dir)
if not mod_edited:
mod.export_code(lib_dir)
self.export_attachments('%s/%s' % (package_dir, self.package.get_data_dir()))
self.export_dependencies(packages_dir)
return (xpi_build(sdk_dir,
'%s/packages/%s' % (sdk_dir, self.package.get_unique_package_name()))
)
def export_keys(self, sdk_dir):
" export private and public keys "
keydir = '%s/%s' % (sdk_dir, settings.KEYDIR)
if not os.path.isdir(keydir):
os.mkdir(keydir)
handle = open('%s/%s' % (keydir, self.package.jid), 'w')
handle.write('private-key:%s\n' % self.package.private_key)
handle.write('public-key:%s' % self.package.public_key)
handle.close()
def export_manifest(self, package_dir):
handle = open('%s/package.json' % package_dir, 'w')
handle.write(self.get_manifest_json())
handle.close()
def export_modules(self, lib_dir):
for mod in self.modules.all():
mod.export_code(lib_dir)
def export_attachments(self, static_dir):
for att in self.attachments.all():
att.export_file(static_dir)
def export_dependencies(self, packages_dir):
for lib in self.dependencies.all():
lib.export_files_with_dependencies(packages_dir)
def export_files(self, packages_dir):
package_dir = self.package.make_dir(packages_dir)
self.export_manifest(package_dir)
self.export_modules('%s/%s' % (package_dir, self.package.get_lib_dir()))
self.export_attachments('%s/%s' % (package_dir, self.package.get_data_dir()))
def export_files_with_dependencies(self, packages_dir):
self.export_files(packages_dir)
self.export_dependencies(packages_dir)
def get_version_name(self):
name = '%s ' % self.version_name if self.version_name else ''
return '%srev. %s' % (name, self.revision_number)
class Module(models.Model):
" the only way to 'change' the module is to assign it to different PackageRequest "
revisions = models.ManyToManyField(PackageRevision,
related_name='modules', blank=True)
# name of the Module - it will be used as javascript file name
filename = models.CharField(max_length=255)
# Code of the module
code = models.TextField(blank=True)
# user who has written current revision of the module
author = models.ForeignKey(User, related_name='module_revisions')
class Meta:
ordering = ('filename',)
def __unicode__(self):
return '%s by %s (%s)' % (self.get_filename(), self.author, self.get_package_fullName())
def get_package(self):
try:
return self.revisions.all()[0].package
except:
return None
def get_package_fullName(self):
package = self.get_package()
return package.full_name if package else ''
def get_filename(self):
return "%s.js" % self.filename
def save(self, **kwargs):
if self.id:
raise UpdateDeniedException('Module can not be updated in the same row')
return super(Module, self).save(**kwargs)
def export_code(self, lib_dir):
handle = open('%s/%s.js' % (lib_dir, self.filename), 'w')
handle.write(self.code)
handle.close()
class Attachment(models.Model):
revisions = models.ManyToManyField(PackageRevision,
related_name='attachments', blank=True)
# filename of the attachment
filename = models.CharField(max_length=255)
# extension name
ext = models.CharField(max_length=10)
# upload path
path = models.CharField(max_length=255)
# user who has uploaded the file
author = models.ForeignKey(User, related_name='attachments')
# mime will help with displaying the attachment
mimetype = models.CharField(max_length=255, blank=True, null=True)
class Meta:
ordering = ('filename',)
def get_filename(self):
name = self.filename
if self.ext:
name = "%s.%s" % (name, self.ext)
return name
def save(self, **kwargs):
if self.id:
raise UpdateDeniedException('Attachment can not be updated in the same row')
return super(Attachment, self).save(**kwargs)
def export_file(self, static_dir):
shutil.copy('%s/%s' % (settings.UPLOAD_DIR, self.path),
'%s/%s.%s' % (static_dir, self.filename, self.ext))
def get_display_url(self):
return reverse('jp_attachment', args=[self.path])
#################################################################################
## Catching Signals
def set_package_id_number(instance, **kwargs):
if kwargs.get('raw', False): return
if instance.id: return
instance.id_number = instance.create_id_number()
pre_save.connect(set_package_id_number, sender=Package)
def make_name(instance, **kwargs):
if kwargs.get('raw',False): return
if not instance.name:
instance.set_name()
pre_save.connect(make_name, sender=Package)
def make_keypair_on_create(instance, **kwargs):
if kwargs.get('raw',False): return
if instance.id: return
if instance.is_addon():
instance.generate_key()
pre_save.connect(make_keypair_on_create, sender=Package)
def save_first_revision(instance, **kwargs):
"""
Create first PackageRevision
"""
if kwargs.get('raw', False): return
# only for the new Package
if not kwargs.get('created', False): return
revision = PackageRevision(package=instance, author=instance.author)
revision.save()
instance.version = revision
instance.latest = revision
if instance.is_addon():
mod = Module.objects.create(
filename=revision.module_main,
author=instance.author,
code="""// This is an active module of the %s Add-on
exports.main = function() {};""" % instance.full_name
)
revision.modules.add(mod)
instance.save()
post_save.connect(save_first_revision, sender=Package)
| 7,604 | 794 | 1,343 |
6fc8c42ea911cd5193b0a9fdd5f775a823869a3f | 700 | py | Python | setup.py | KedoKudo/finddata | 215ca88285108316f89e0b19ba723e070df7358f | [
"MIT"
] | null | null | null | setup.py | KedoKudo/finddata | 215ca88285108316f89e0b19ba723e070df7358f | [
"MIT"
] | 2 | 2019-03-06T20:57:13.000Z | 2019-12-12T14:34:29.000Z | setup.py | KedoKudo/finddata | 215ca88285108316f89e0b19ba723e070df7358f | [
"MIT"
] | 5 | 2016-11-17T18:54:28.000Z | 2022-01-03T18:54:49.000Z | import versioneer # https://github.com/warner/python-versioneer
from setuptools import setup
setup(name="finddata",
version=versioneer.get_version(), #"0.2.2",
cmdclass=versioneer.get_cmdclass(),
description = "Find data files using ONCat",
author = "Pete Peterson",
author_email = "petersonpf@ornl.gov",
url = "http://github.com/peterfpeterson/finddata/",
long_description = """This package uses ONCat at SNS to find NeXus files.""",
license = "The MIT License (MIT)",
scripts=["scripts/finddata"],
packages=["finddata"],
package_dir={},#'finddata': '.'},
data_files=[('/etc/bash_completion.d/', ['finddata.bashcomplete'])]
)
| 38.888889 | 83 | 0.657143 | import versioneer # https://github.com/warner/python-versioneer
from setuptools import setup
setup(name="finddata",
version=versioneer.get_version(), #"0.2.2",
cmdclass=versioneer.get_cmdclass(),
description = "Find data files using ONCat",
author = "Pete Peterson",
author_email = "petersonpf@ornl.gov",
url = "http://github.com/peterfpeterson/finddata/",
long_description = """This package uses ONCat at SNS to find NeXus files.""",
license = "The MIT License (MIT)",
scripts=["scripts/finddata"],
packages=["finddata"],
package_dir={},#'finddata': '.'},
data_files=[('/etc/bash_completion.d/', ['finddata.bashcomplete'])]
)
| 0 | 0 | 0 |
3bc88d4611a3e607e32422d3e25324d2297935dc | 1,839 | py | Python | zobs/nmbgmr_manual_etl.py | NMWDI/airflow-dags | dbbd90bbd4fe890396b6f27cfbc560c042d3b2b8 | [
"Apache-2.0"
] | null | null | null | zobs/nmbgmr_manual_etl.py | NMWDI/airflow-dags | dbbd90bbd4fe890396b6f27cfbc560c042d3b2b8 | [
"Apache-2.0"
] | null | null | null | zobs/nmbgmr_manual_etl.py | NMWDI/airflow-dags | dbbd90bbd4fe890396b6f27cfbc560c042d3b2b8 | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from bq_etl_levels import default_args, get_sql, BigQueryETLLevels
with DAG('NMBGMR_MANUAL_ETL0.7',
# schedule_interval='*/10 * * * *',
schedule_interval='@daily',
max_active_runs=1,
catchup=False,
default_args=default_args) as dag:
gsm = PythonOperator(task_id='get_manual_sql', python_callable=get_sql_manual)
gm = BigQueryETLLevels('Water Well',
('Ground Water Levels', {'agency': 'NMBGMR'}),
('Manual', 'Manual measurement of groundwater depth by field technician'),
('Depth to Water Below Land Surface', 'depth to water below land surface'),
task_id='etl_manual_levels', sql_task_id='get_manual_sql')
gsm >> gm
# ============= EOF =============================================
| 42.767442 | 102 | 0.600326 | # ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from bq_etl_levels import default_args, get_sql, BigQueryETLLevels
with DAG('NMBGMR_MANUAL_ETL0.7',
# schedule_interval='*/10 * * * *',
schedule_interval='@daily',
max_active_runs=1,
catchup=False,
default_args=default_args) as dag:
def get_sql_manual(**context):
tabblename = 'nmbgmr_manual_level_tbl'
task_id = 'etl_manual_levels'
return get_sql(task_id, tabblename, context)
gsm = PythonOperator(task_id='get_manual_sql', python_callable=get_sql_manual)
gm = BigQueryETLLevels('Water Well',
('Ground Water Levels', {'agency': 'NMBGMR'}),
('Manual', 'Manual measurement of groundwater depth by field technician'),
('Depth to Water Below Land Surface', 'depth to water below land surface'),
task_id='etl_manual_levels', sql_task_id='get_manual_sql')
gsm >> gm
# ============= EOF =============================================
| 147 | 0 | 26 |
a8769746ce0a6de9fca01a73707a04481ae09326 | 32,917 | py | Python | third_party/saltedge/swagger_client/models/attempt.py | ltowarek/budget-supervisor | 862a2d720aecd4ad2fded9c63bc839190ebbc77e | [
"MIT"
] | 1 | 2022-03-01T10:28:31.000Z | 2022-03-01T10:28:31.000Z | third_party/saltedge/swagger_client/models/attempt.py | ltowarek/budget-supervisor | 862a2d720aecd4ad2fded9c63bc839190ebbc77e | [
"MIT"
] | 75 | 2020-11-07T20:14:55.000Z | 2021-10-05T15:08:22.000Z | third_party/saltedge/swagger_client/models/attempt.py | ltowarek/budget-supervisor | 862a2d720aecd4ad2fded9c63bc839190ebbc77e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Attempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_mode': 'str',
'api_version': 'str',
'automatic_fetch': 'bool',
'daily_refresh': 'bool',
'categorization': 'str',
'created_at': 'datetime',
'custom_fields': 'object',
'device_type': 'str',
'remote_ip': 'str',
'exclude_accounts': 'list[str]',
'user_present': 'bool',
'customer_last_logged_at': 'datetime',
'fail_at': 'datetime',
'fail_error_class': 'str',
'fail_message': 'str',
'fetch_scopes': 'list[str]',
'finished': 'bool',
'finished_recent': 'bool',
'from_date': 'date',
'id': 'str',
'interactive': 'bool',
'locale': 'str',
'partial': 'bool',
'store_credentials': 'bool',
'success_at': 'datetime',
'to_date': 'datetime',
'updated_at': 'datetime',
'show_consent_confirmation': 'bool',
'include_natures': 'list[str]',
'stages': 'list[Stage]'
}
attribute_map = {
'api_mode': 'api_mode',
'api_version': 'api_version',
'automatic_fetch': 'automatic_fetch',
'daily_refresh': 'daily_refresh',
'categorization': 'categorization',
'created_at': 'created_at',
'custom_fields': 'custom_fields',
'device_type': 'device_type',
'remote_ip': 'remote_ip',
'exclude_accounts': 'exclude_accounts',
'user_present': 'user_present',
'customer_last_logged_at': 'customer_last_logged_at',
'fail_at': 'fail_at',
'fail_error_class': 'fail_error_class',
'fail_message': 'fail_message',
'fetch_scopes': 'fetch_scopes',
'finished': 'finished',
'finished_recent': 'finished_recent',
'from_date': 'from_date',
'id': 'id',
'interactive': 'interactive',
'locale': 'locale',
'partial': 'partial',
'store_credentials': 'store_credentials',
'success_at': 'success_at',
'to_date': 'to_date',
'updated_at': 'updated_at',
'show_consent_confirmation': 'show_consent_confirmation',
'include_natures': 'include_natures',
'stages': 'stages'
}
def __init__(self, api_mode=None, api_version=None, automatic_fetch=None, daily_refresh=None, categorization='personal', created_at=None, custom_fields=None, device_type=None, remote_ip=None, exclude_accounts=None, user_present=None, customer_last_logged_at=None, fail_at=None, fail_error_class=None, fail_message=None, fetch_scopes=None, finished=None, finished_recent=None, from_date=None, id=None, interactive=None, locale=None, partial=None, store_credentials=None, success_at=None, to_date=None, updated_at=None, show_consent_confirmation=None, include_natures=None, stages=None): # noqa: E501
"""Attempt - a model defined in Swagger""" # noqa: E501
self._api_mode = None
self._api_version = None
self._automatic_fetch = None
self._daily_refresh = None
self._categorization = None
self._created_at = None
self._custom_fields = None
self._device_type = None
self._remote_ip = None
self._exclude_accounts = None
self._user_present = None
self._customer_last_logged_at = None
self._fail_at = None
self._fail_error_class = None
self._fail_message = None
self._fetch_scopes = None
self._finished = None
self._finished_recent = None
self._from_date = None
self._id = None
self._interactive = None
self._locale = None
self._partial = None
self._store_credentials = None
self._success_at = None
self._to_date = None
self._updated_at = None
self._show_consent_confirmation = None
self._include_natures = None
self._stages = None
self.discriminator = None
self.api_mode = api_mode
self.api_version = api_version
self.automatic_fetch = automatic_fetch
self.daily_refresh = daily_refresh
self.categorization = categorization
self.created_at = created_at
self.custom_fields = custom_fields
self.device_type = device_type
self.remote_ip = remote_ip
self.exclude_accounts = exclude_accounts
self.user_present = user_present
self.customer_last_logged_at = customer_last_logged_at
self.fail_at = fail_at
self.fail_error_class = fail_error_class
self.fail_message = fail_message
self.fetch_scopes = fetch_scopes
self.finished = finished
self.finished_recent = finished_recent
self.from_date = from_date
self.id = id
self.interactive = interactive
self.locale = locale
self.partial = partial
self.store_credentials = store_credentials
self.success_at = success_at
self.to_date = to_date
self.updated_at = updated_at
self.show_consent_confirmation = show_consent_confirmation
self.include_natures = include_natures
self.stages = stages
@property
def api_mode(self):
"""Gets the api_mode of this Attempt. # noqa: E501
the API mode of the customer that queried the API. # noqa: E501
:return: The api_mode of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_mode
@api_mode.setter
def api_mode(self, api_mode):
"""Sets the api_mode of this Attempt.
the API mode of the customer that queried the API. # noqa: E501
:param api_mode: The api_mode of this Attempt. # noqa: E501
:type: str
"""
if api_mode is None:
raise ValueError("Invalid value for `api_mode`, must not be `None`") # noqa: E501
allowed_values = ["app", "service"] # noqa: E501
if api_mode not in allowed_values:
raise ValueError(
"Invalid value for `api_mode` ({0}), must be one of {1}" # noqa: E501
.format(api_mode, allowed_values)
)
self._api_mode = api_mode
@property
def api_version(self):
"""Gets the api_version of this Attempt. # noqa: E501
the API version in which the attempt was created # noqa: E501
:return: The api_version of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this Attempt.
the API version in which the attempt was created # noqa: E501
:param api_version: The api_version of this Attempt. # noqa: E501
:type: str
"""
if api_version is None:
raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
self._api_version = api_version
@property
def automatic_fetch(self):
"""Gets the automatic_fetch of this Attempt. # noqa: E501
whether the connection related to the attempt can be automatically fetched # noqa: E501
:return: The automatic_fetch of this Attempt. # noqa: E501
:rtype: bool
"""
return self._automatic_fetch
@automatic_fetch.setter
def automatic_fetch(self, automatic_fetch):
"""Sets the automatic_fetch of this Attempt.
whether the connection related to the attempt can be automatically fetched # noqa: E501
:param automatic_fetch: The automatic_fetch of this Attempt. # noqa: E501
:type: bool
"""
if automatic_fetch is None:
raise ValueError("Invalid value for `automatic_fetch`, must not be `None`") # noqa: E501
self._automatic_fetch = automatic_fetch
@property
def daily_refresh(self):
"""Gets the daily_refresh of this Attempt. # noqa: E501
latest assigned value for `daily_refresh` in connection # noqa: E501
:return: The daily_refresh of this Attempt. # noqa: E501
:rtype: bool
"""
return self._daily_refresh
@daily_refresh.setter
def daily_refresh(self, daily_refresh):
"""Sets the daily_refresh of this Attempt.
latest assigned value for `daily_refresh` in connection # noqa: E501
:param daily_refresh: The daily_refresh of this Attempt. # noqa: E501
:type: bool
"""
if daily_refresh is None:
raise ValueError("Invalid value for `daily_refresh`, must not be `None`") # noqa: E501
self._daily_refresh = daily_refresh
@property
def categorization(self):
"""Gets the categorization of this Attempt. # noqa: E501
the type of categorization applied. # noqa: E501
:return: The categorization of this Attempt. # noqa: E501
:rtype: str
"""
return self._categorization
@categorization.setter
def categorization(self, categorization):
"""Sets the categorization of this Attempt.
the type of categorization applied. # noqa: E501
:param categorization: The categorization of this Attempt. # noqa: E501
:type: str
"""
if categorization is None:
raise ValueError("Invalid value for `categorization`, must not be `None`") # noqa: E501
allowed_values = ["none", "personal", "business"] # noqa: E501
if categorization not in allowed_values:
raise ValueError(
"Invalid value for `categorization` ({0}), must be one of {1}" # noqa: E501
.format(categorization, allowed_values)
)
self._categorization = categorization
@property
def created_at(self):
"""Gets the created_at of this Attempt. # noqa: E501
when the attempt was made # noqa: E501
:return: The created_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attempt.
when the attempt was made # noqa: E501
:param created_at: The created_at of this Attempt. # noqa: E501
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def custom_fields(self):
"""Gets the custom_fields of this Attempt. # noqa: E501
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:return: The custom_fields of this Attempt. # noqa: E501
:rtype: object
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this Attempt.
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:param custom_fields: The custom_fields of this Attempt. # noqa: E501
:type: object
"""
if custom_fields is None:
raise ValueError("Invalid value for `custom_fields`, must not be `None`") # noqa: E501
self._custom_fields = custom_fields
@property
def device_type(self):
"""Gets the device_type of this Attempt. # noqa: E501
the type of the device that created the attempt. # noqa: E501
:return: The device_type of this Attempt. # noqa: E501
:rtype: str
"""
return self._device_type
@device_type.setter
def device_type(self, device_type):
"""Sets the device_type of this Attempt.
the type of the device that created the attempt. # noqa: E501
:param device_type: The device_type of this Attempt. # noqa: E501
:type: str
"""
if device_type is None:
raise ValueError("Invalid value for `device_type`, must not be `None`") # noqa: E501
allowed_values = ["desktop", "tablet", "mobile"] # noqa: E501
if device_type not in allowed_values:
raise ValueError(
"Invalid value for `device_type` ({0}), must be one of {1}" # noqa: E501
.format(device_type, allowed_values)
)
self._device_type = device_type
@property
def remote_ip(self):
"""Gets the remote_ip of this Attempt. # noqa: E501
the IP of the device that created the attempt # noqa: E501
:return: The remote_ip of this Attempt. # noqa: E501
:rtype: str
"""
return self._remote_ip
@remote_ip.setter
def remote_ip(self, remote_ip):
"""Sets the remote_ip of this Attempt.
the IP of the device that created the attempt # noqa: E501
:param remote_ip: The remote_ip of this Attempt. # noqa: E501
:type: str
"""
if remote_ip is None:
raise ValueError("Invalid value for `remote_ip`, must not be `None`") # noqa: E501
self._remote_ip = remote_ip
@property
def exclude_accounts(self):
"""Gets the exclude_accounts of this Attempt. # noqa: E501
the `ids` of accounts that do not need to be refreshed # noqa: E501
:return: The exclude_accounts of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._exclude_accounts
@exclude_accounts.setter
def exclude_accounts(self, exclude_accounts):
"""Sets the exclude_accounts of this Attempt.
the `ids` of accounts that do not need to be refreshed # noqa: E501
:param exclude_accounts: The exclude_accounts of this Attempt. # noqa: E501
:type: list[str]
"""
if exclude_accounts is None:
raise ValueError("Invalid value for `exclude_accounts`, must not be `None`") # noqa: E501
self._exclude_accounts = exclude_accounts
@property
def user_present(self):
"""Gets the user_present of this Attempt. # noqa: E501
whether the request was initiated by the end-user of your application # noqa: E501
:return: The user_present of this Attempt. # noqa: E501
:rtype: bool
"""
return self._user_present
@user_present.setter
def user_present(self, user_present):
"""Sets the user_present of this Attempt.
whether the request was initiated by the end-user of your application # noqa: E501
:param user_present: The user_present of this Attempt. # noqa: E501
:type: bool
"""
if user_present is None:
raise ValueError("Invalid value for `user_present`, must not be `None`") # noqa: E501
self._user_present = user_present
@property
def customer_last_logged_at(self):
"""Gets the customer_last_logged_at of this Attempt. # noqa: E501
the datetime when user was last active in your application # noqa: E501
:return: The customer_last_logged_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._customer_last_logged_at
@customer_last_logged_at.setter
def customer_last_logged_at(self, customer_last_logged_at):
"""Sets the customer_last_logged_at of this Attempt.
the datetime when user was last active in your application # noqa: E501
:param customer_last_logged_at: The customer_last_logged_at of this Attempt. # noqa: E501
:type: datetime
"""
if customer_last_logged_at is None:
raise ValueError("Invalid value for `customer_last_logged_at`, must not be `None`") # noqa: E501
self._customer_last_logged_at = customer_last_logged_at
@property
def fail_at(self):
"""Gets the fail_at of this Attempt. # noqa: E501
when the attempt failed to finish # noqa: E501
:return: The fail_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._fail_at
@fail_at.setter
def fail_at(self, fail_at):
"""Sets the fail_at of this Attempt.
when the attempt failed to finish # noqa: E501
:param fail_at: The fail_at of this Attempt. # noqa: E501
:type: datetime
"""
if fail_at is None:
raise ValueError("Invalid value for `fail_at`, must not be `None`") # noqa: E501
self._fail_at = fail_at
@property
def fail_error_class(self):
"""Gets the fail_error_class of this Attempt. # noqa: E501
class of error that triggered the fail for attempt # noqa: E501
:return: The fail_error_class of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_error_class
@fail_error_class.setter
def fail_error_class(self, fail_error_class):
"""Sets the fail_error_class of this Attempt.
class of error that triggered the fail for attempt # noqa: E501
:param fail_error_class: The fail_error_class of this Attempt. # noqa: E501
:type: str
"""
if fail_error_class is None:
raise ValueError("Invalid value for `fail_error_class`, must not be `None`") # noqa: E501
self._fail_error_class = fail_error_class
@property
def fail_message(self):
"""Gets the fail_message of this Attempt. # noqa: E501
message that describes the error class # noqa: E501
:return: The fail_message of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_message
@fail_message.setter
def fail_message(self, fail_message):
"""Sets the fail_message of this Attempt.
message that describes the error class # noqa: E501
:param fail_message: The fail_message of this Attempt. # noqa: E501
:type: str
"""
if fail_message is None:
raise ValueError("Invalid value for `fail_message`, must not be `None`") # noqa: E501
self._fail_message = fail_message
@property
def fetch_scopes(self):
"""Gets the fetch_scopes of this Attempt. # noqa: E501
fetching mode. # noqa: E501
:return: The fetch_scopes of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._fetch_scopes
@fetch_scopes.setter
def fetch_scopes(self, fetch_scopes):
"""Sets the fetch_scopes of this Attempt.
fetching mode. # noqa: E501
:param fetch_scopes: The fetch_scopes of this Attempt. # noqa: E501
:type: list[str]
"""
if fetch_scopes is None:
raise ValueError("Invalid value for `fetch_scopes`, must not be `None`") # noqa: E501
allowed_values = ["accounts", "holder_info", "transactions"] # noqa: E501
if not set(fetch_scopes).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `fetch_scopes` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(fetch_scopes) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._fetch_scopes = fetch_scopes
@property
def finished(self):
"""Gets the finished of this Attempt. # noqa: E501
whether the connection had finished fetching # noqa: E501
:return: The finished of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this Attempt.
whether the connection had finished fetching # noqa: E501
:param finished: The finished of this Attempt. # noqa: E501
:type: bool
"""
if finished is None:
raise ValueError("Invalid value for `finished`, must not be `None`") # noqa: E501
self._finished = finished
@property
def finished_recent(self):
"""Gets the finished_recent of this Attempt. # noqa: E501
whether the connection had finished data for recent range # noqa: E501
:return: The finished_recent of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished_recent
@finished_recent.setter
def finished_recent(self, finished_recent):
"""Sets the finished_recent of this Attempt.
whether the connection had finished data for recent range # noqa: E501
:param finished_recent: The finished_recent of this Attempt. # noqa: E501
:type: bool
"""
if finished_recent is None:
raise ValueError("Invalid value for `finished_recent`, must not be `None`") # noqa: E501
self._finished_recent = finished_recent
@property
def from_date(self):
"""Gets the from_date of this Attempt. # noqa: E501
date from which the data had been fetched # noqa: E501
:return: The from_date of this Attempt. # noqa: E501
:rtype: date
"""
return self._from_date
@from_date.setter
def from_date(self, from_date):
"""Sets the from_date of this Attempt.
date from which the data had been fetched # noqa: E501
:param from_date: The from_date of this Attempt. # noqa: E501
:type: date
"""
if from_date is None:
raise ValueError("Invalid value for `from_date`, must not be `None`") # noqa: E501
self._from_date = from_date
@property
def id(self):
"""Gets the id of this Attempt. # noqa: E501
`id` of the attempt # noqa: E501
:return: The id of this Attempt. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attempt.
`id` of the attempt # noqa: E501
:param id: The id of this Attempt. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def interactive(self):
"""Gets the interactive of this Attempt. # noqa: E501
whether the connection related to the attempt is interactive # noqa: E501
:return: The interactive of this Attempt. # noqa: E501
:rtype: bool
"""
return self._interactive
@interactive.setter
def interactive(self, interactive):
"""Sets the interactive of this Attempt.
whether the connection related to the attempt is interactive # noqa: E501
:param interactive: The interactive of this Attempt. # noqa: E501
:type: bool
"""
if interactive is None:
raise ValueError("Invalid value for `interactive`, must not be `None`") # noqa: E501
self._interactive = interactive
@property
def locale(self):
"""Gets the locale of this Attempt. # noqa: E501
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:return: The locale of this Attempt. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Attempt.
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:param locale: The locale of this Attempt. # noqa: E501
:type: str
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def partial(self):
"""Gets the partial of this Attempt. # noqa: E501
whether the connection was partially fetched # noqa: E501
:return: The partial of this Attempt. # noqa: E501
:rtype: bool
"""
return self._partial
@partial.setter
def partial(self, partial):
"""Sets the partial of this Attempt.
whether the connection was partially fetched # noqa: E501
:param partial: The partial of this Attempt. # noqa: E501
:type: bool
"""
if partial is None:
raise ValueError("Invalid value for `partial`, must not be `None`") # noqa: E501
self._partial = partial
@property
def store_credentials(self):
"""Gets the store_credentials of this Attempt. # noqa: E501
whether the credentials were stored on our side # noqa: E501
:return: The store_credentials of this Attempt. # noqa: E501
:rtype: bool
"""
return self._store_credentials
@store_credentials.setter
def store_credentials(self, store_credentials):
"""Sets the store_credentials of this Attempt.
whether the credentials were stored on our side # noqa: E501
:param store_credentials: The store_credentials of this Attempt. # noqa: E501
:type: bool
"""
if store_credentials is None:
raise ValueError("Invalid value for `store_credentials`, must not be `None`") # noqa: E501
self._store_credentials = store_credentials
@property
def success_at(self):
"""Gets the success_at of this Attempt. # noqa: E501
when the attempt succeeded and finished # noqa: E501
:return: The success_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._success_at
@success_at.setter
def success_at(self, success_at):
"""Sets the success_at of this Attempt.
when the attempt succeeded and finished # noqa: E501
:param success_at: The success_at of this Attempt. # noqa: E501
:type: datetime
"""
if success_at is None:
raise ValueError("Invalid value for `success_at`, must not be `None`") # noqa: E501
self._success_at = success_at
@property
def to_date(self):
"""Gets the to_date of this Attempt. # noqa: E501
date until which the data has been fetched # noqa: E501
:return: The to_date of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._to_date
@to_date.setter
def to_date(self, to_date):
"""Sets the to_date of this Attempt.
date until which the data has been fetched # noqa: E501
:param to_date: The to_date of this Attempt. # noqa: E501
:type: datetime
"""
if to_date is None:
raise ValueError("Invalid value for `to_date`, must not be `None`") # noqa: E501
self._to_date = to_date
@property
def updated_at(self):
"""Gets the updated_at of this Attempt. # noqa: E501
when last attempt update occurred # noqa: E501
:return: The updated_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attempt.
when last attempt update occurred # noqa: E501
:param updated_at: The updated_at of this Attempt. # noqa: E501
:type: datetime
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def show_consent_confirmation(self):
"""Gets the show_consent_confirmation of this Attempt. # noqa: E501
whether any consent was given for this connection # noqa: E501
:return: The show_consent_confirmation of this Attempt. # noqa: E501
:rtype: bool
"""
return self._show_consent_confirmation
@show_consent_confirmation.setter
def show_consent_confirmation(self, show_consent_confirmation):
"""Sets the show_consent_confirmation of this Attempt.
whether any consent was given for this connection # noqa: E501
:param show_consent_confirmation: The show_consent_confirmation of this Attempt. # noqa: E501
:type: bool
"""
if show_consent_confirmation is None:
raise ValueError("Invalid value for `show_consent_confirmation`, must not be `None`") # noqa: E501
self._show_consent_confirmation = show_consent_confirmation
@property
def include_natures(self):
"""Gets the include_natures of this Attempt. # noqa: E501
the natures of the accounts that need to be fetched # noqa: E501
:return: The include_natures of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._include_natures
@include_natures.setter
def include_natures(self, include_natures):
"""Sets the include_natures of this Attempt.
the natures of the accounts that need to be fetched # noqa: E501
:param include_natures: The include_natures of this Attempt. # noqa: E501
:type: list[str]
"""
if include_natures is None:
raise ValueError("Invalid value for `include_natures`, must not be `None`") # noqa: E501
self._include_natures = include_natures
@property
def stages(self):
"""Gets the stages of this Attempt. # noqa: E501
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:return: The stages of this Attempt. # noqa: E501
:rtype: list[Stage]
"""
return self._stages
@stages.setter
def stages(self, stages):
"""Sets the stages of this Attempt.
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:param stages: The stages of this Attempt. # noqa: E501
:type: list[Stage]
"""
if stages is None:
raise ValueError("Invalid value for `stages`, must not be `None`") # noqa: E501
self._stages = stages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Attempt, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attempt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.588776 | 603 | 0.614971 | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Attempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_mode': 'str',
'api_version': 'str',
'automatic_fetch': 'bool',
'daily_refresh': 'bool',
'categorization': 'str',
'created_at': 'datetime',
'custom_fields': 'object',
'device_type': 'str',
'remote_ip': 'str',
'exclude_accounts': 'list[str]',
'user_present': 'bool',
'customer_last_logged_at': 'datetime',
'fail_at': 'datetime',
'fail_error_class': 'str',
'fail_message': 'str',
'fetch_scopes': 'list[str]',
'finished': 'bool',
'finished_recent': 'bool',
'from_date': 'date',
'id': 'str',
'interactive': 'bool',
'locale': 'str',
'partial': 'bool',
'store_credentials': 'bool',
'success_at': 'datetime',
'to_date': 'datetime',
'updated_at': 'datetime',
'show_consent_confirmation': 'bool',
'include_natures': 'list[str]',
'stages': 'list[Stage]'
}
attribute_map = {
'api_mode': 'api_mode',
'api_version': 'api_version',
'automatic_fetch': 'automatic_fetch',
'daily_refresh': 'daily_refresh',
'categorization': 'categorization',
'created_at': 'created_at',
'custom_fields': 'custom_fields',
'device_type': 'device_type',
'remote_ip': 'remote_ip',
'exclude_accounts': 'exclude_accounts',
'user_present': 'user_present',
'customer_last_logged_at': 'customer_last_logged_at',
'fail_at': 'fail_at',
'fail_error_class': 'fail_error_class',
'fail_message': 'fail_message',
'fetch_scopes': 'fetch_scopes',
'finished': 'finished',
'finished_recent': 'finished_recent',
'from_date': 'from_date',
'id': 'id',
'interactive': 'interactive',
'locale': 'locale',
'partial': 'partial',
'store_credentials': 'store_credentials',
'success_at': 'success_at',
'to_date': 'to_date',
'updated_at': 'updated_at',
'show_consent_confirmation': 'show_consent_confirmation',
'include_natures': 'include_natures',
'stages': 'stages'
}
def __init__(self, api_mode=None, api_version=None, automatic_fetch=None, daily_refresh=None, categorization='personal', created_at=None, custom_fields=None, device_type=None, remote_ip=None, exclude_accounts=None, user_present=None, customer_last_logged_at=None, fail_at=None, fail_error_class=None, fail_message=None, fetch_scopes=None, finished=None, finished_recent=None, from_date=None, id=None, interactive=None, locale=None, partial=None, store_credentials=None, success_at=None, to_date=None, updated_at=None, show_consent_confirmation=None, include_natures=None, stages=None): # noqa: E501
"""Attempt - a model defined in Swagger""" # noqa: E501
self._api_mode = None
self._api_version = None
self._automatic_fetch = None
self._daily_refresh = None
self._categorization = None
self._created_at = None
self._custom_fields = None
self._device_type = None
self._remote_ip = None
self._exclude_accounts = None
self._user_present = None
self._customer_last_logged_at = None
self._fail_at = None
self._fail_error_class = None
self._fail_message = None
self._fetch_scopes = None
self._finished = None
self._finished_recent = None
self._from_date = None
self._id = None
self._interactive = None
self._locale = None
self._partial = None
self._store_credentials = None
self._success_at = None
self._to_date = None
self._updated_at = None
self._show_consent_confirmation = None
self._include_natures = None
self._stages = None
self.discriminator = None
self.api_mode = api_mode
self.api_version = api_version
self.automatic_fetch = automatic_fetch
self.daily_refresh = daily_refresh
self.categorization = categorization
self.created_at = created_at
self.custom_fields = custom_fields
self.device_type = device_type
self.remote_ip = remote_ip
self.exclude_accounts = exclude_accounts
self.user_present = user_present
self.customer_last_logged_at = customer_last_logged_at
self.fail_at = fail_at
self.fail_error_class = fail_error_class
self.fail_message = fail_message
self.fetch_scopes = fetch_scopes
self.finished = finished
self.finished_recent = finished_recent
self.from_date = from_date
self.id = id
self.interactive = interactive
self.locale = locale
self.partial = partial
self.store_credentials = store_credentials
self.success_at = success_at
self.to_date = to_date
self.updated_at = updated_at
self.show_consent_confirmation = show_consent_confirmation
self.include_natures = include_natures
self.stages = stages
@property
def api_mode(self):
"""Gets the api_mode of this Attempt. # noqa: E501
the API mode of the customer that queried the API. # noqa: E501
:return: The api_mode of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_mode
@api_mode.setter
def api_mode(self, api_mode):
"""Sets the api_mode of this Attempt.
the API mode of the customer that queried the API. # noqa: E501
:param api_mode: The api_mode of this Attempt. # noqa: E501
:type: str
"""
if api_mode is None:
raise ValueError("Invalid value for `api_mode`, must not be `None`") # noqa: E501
allowed_values = ["app", "service"] # noqa: E501
if api_mode not in allowed_values:
raise ValueError(
"Invalid value for `api_mode` ({0}), must be one of {1}" # noqa: E501
.format(api_mode, allowed_values)
)
self._api_mode = api_mode
@property
def api_version(self):
"""Gets the api_version of this Attempt. # noqa: E501
the API version in which the attempt was created # noqa: E501
:return: The api_version of this Attempt. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this Attempt.
the API version in which the attempt was created # noqa: E501
:param api_version: The api_version of this Attempt. # noqa: E501
:type: str
"""
if api_version is None:
raise ValueError("Invalid value for `api_version`, must not be `None`") # noqa: E501
self._api_version = api_version
@property
def automatic_fetch(self):
"""Gets the automatic_fetch of this Attempt. # noqa: E501
whether the connection related to the attempt can be automatically fetched # noqa: E501
:return: The automatic_fetch of this Attempt. # noqa: E501
:rtype: bool
"""
return self._automatic_fetch
@automatic_fetch.setter
def automatic_fetch(self, automatic_fetch):
"""Sets the automatic_fetch of this Attempt.
whether the connection related to the attempt can be automatically fetched # noqa: E501
:param automatic_fetch: The automatic_fetch of this Attempt. # noqa: E501
:type: bool
"""
if automatic_fetch is None:
raise ValueError("Invalid value for `automatic_fetch`, must not be `None`") # noqa: E501
self._automatic_fetch = automatic_fetch
@property
def daily_refresh(self):
"""Gets the daily_refresh of this Attempt. # noqa: E501
latest assigned value for `daily_refresh` in connection # noqa: E501
:return: The daily_refresh of this Attempt. # noqa: E501
:rtype: bool
"""
return self._daily_refresh
@daily_refresh.setter
def daily_refresh(self, daily_refresh):
"""Sets the daily_refresh of this Attempt.
latest assigned value for `daily_refresh` in connection # noqa: E501
:param daily_refresh: The daily_refresh of this Attempt. # noqa: E501
:type: bool
"""
if daily_refresh is None:
raise ValueError("Invalid value for `daily_refresh`, must not be `None`") # noqa: E501
self._daily_refresh = daily_refresh
@property
def categorization(self):
"""Gets the categorization of this Attempt. # noqa: E501
the type of categorization applied. # noqa: E501
:return: The categorization of this Attempt. # noqa: E501
:rtype: str
"""
return self._categorization
@categorization.setter
def categorization(self, categorization):
"""Sets the categorization of this Attempt.
the type of categorization applied. # noqa: E501
:param categorization: The categorization of this Attempt. # noqa: E501
:type: str
"""
if categorization is None:
raise ValueError("Invalid value for `categorization`, must not be `None`") # noqa: E501
allowed_values = ["none", "personal", "business"] # noqa: E501
if categorization not in allowed_values:
raise ValueError(
"Invalid value for `categorization` ({0}), must be one of {1}" # noqa: E501
.format(categorization, allowed_values)
)
self._categorization = categorization
@property
def created_at(self):
"""Gets the created_at of this Attempt. # noqa: E501
when the attempt was made # noqa: E501
:return: The created_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attempt.
when the attempt was made # noqa: E501
:param created_at: The created_at of this Attempt. # noqa: E501
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def custom_fields(self):
"""Gets the custom_fields of this Attempt. # noqa: E501
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:return: The custom_fields of this Attempt. # noqa: E501
:rtype: object
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this Attempt.
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:param custom_fields: The custom_fields of this Attempt. # noqa: E501
:type: object
"""
if custom_fields is None:
raise ValueError("Invalid value for `custom_fields`, must not be `None`") # noqa: E501
self._custom_fields = custom_fields
@property
def device_type(self):
"""Gets the device_type of this Attempt. # noqa: E501
the type of the device that created the attempt. # noqa: E501
:return: The device_type of this Attempt. # noqa: E501
:rtype: str
"""
return self._device_type
@device_type.setter
def device_type(self, device_type):
"""Sets the device_type of this Attempt.
the type of the device that created the attempt. # noqa: E501
:param device_type: The device_type of this Attempt. # noqa: E501
:type: str
"""
if device_type is None:
raise ValueError("Invalid value for `device_type`, must not be `None`") # noqa: E501
allowed_values = ["desktop", "tablet", "mobile"] # noqa: E501
if device_type not in allowed_values:
raise ValueError(
"Invalid value for `device_type` ({0}), must be one of {1}" # noqa: E501
.format(device_type, allowed_values)
)
self._device_type = device_type
@property
def remote_ip(self):
"""Gets the remote_ip of this Attempt. # noqa: E501
the IP of the device that created the attempt # noqa: E501
:return: The remote_ip of this Attempt. # noqa: E501
:rtype: str
"""
return self._remote_ip
@remote_ip.setter
def remote_ip(self, remote_ip):
"""Sets the remote_ip of this Attempt.
the IP of the device that created the attempt # noqa: E501
:param remote_ip: The remote_ip of this Attempt. # noqa: E501
:type: str
"""
if remote_ip is None:
raise ValueError("Invalid value for `remote_ip`, must not be `None`") # noqa: E501
self._remote_ip = remote_ip
@property
def exclude_accounts(self):
"""Gets the exclude_accounts of this Attempt. # noqa: E501
the `ids` of accounts that do not need to be refreshed # noqa: E501
:return: The exclude_accounts of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._exclude_accounts
@exclude_accounts.setter
def exclude_accounts(self, exclude_accounts):
"""Sets the exclude_accounts of this Attempt.
the `ids` of accounts that do not need to be refreshed # noqa: E501
:param exclude_accounts: The exclude_accounts of this Attempt. # noqa: E501
:type: list[str]
"""
if exclude_accounts is None:
raise ValueError("Invalid value for `exclude_accounts`, must not be `None`") # noqa: E501
self._exclude_accounts = exclude_accounts
@property
def user_present(self):
"""Gets the user_present of this Attempt. # noqa: E501
whether the request was initiated by the end-user of your application # noqa: E501
:return: The user_present of this Attempt. # noqa: E501
:rtype: bool
"""
return self._user_present
@user_present.setter
def user_present(self, user_present):
"""Sets the user_present of this Attempt.
whether the request was initiated by the end-user of your application # noqa: E501
:param user_present: The user_present of this Attempt. # noqa: E501
:type: bool
"""
if user_present is None:
raise ValueError("Invalid value for `user_present`, must not be `None`") # noqa: E501
self._user_present = user_present
@property
def customer_last_logged_at(self):
"""Gets the customer_last_logged_at of this Attempt. # noqa: E501
the datetime when user was last active in your application # noqa: E501
:return: The customer_last_logged_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._customer_last_logged_at
@customer_last_logged_at.setter
def customer_last_logged_at(self, customer_last_logged_at):
"""Sets the customer_last_logged_at of this Attempt.
the datetime when user was last active in your application # noqa: E501
:param customer_last_logged_at: The customer_last_logged_at of this Attempt. # noqa: E501
:type: datetime
"""
if customer_last_logged_at is None:
raise ValueError("Invalid value for `customer_last_logged_at`, must not be `None`") # noqa: E501
self._customer_last_logged_at = customer_last_logged_at
@property
def fail_at(self):
"""Gets the fail_at of this Attempt. # noqa: E501
when the attempt failed to finish # noqa: E501
:return: The fail_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._fail_at
@fail_at.setter
def fail_at(self, fail_at):
"""Sets the fail_at of this Attempt.
when the attempt failed to finish # noqa: E501
:param fail_at: The fail_at of this Attempt. # noqa: E501
:type: datetime
"""
if fail_at is None:
raise ValueError("Invalid value for `fail_at`, must not be `None`") # noqa: E501
self._fail_at = fail_at
@property
def fail_error_class(self):
"""Gets the fail_error_class of this Attempt. # noqa: E501
class of error that triggered the fail for attempt # noqa: E501
:return: The fail_error_class of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_error_class
@fail_error_class.setter
def fail_error_class(self, fail_error_class):
"""Sets the fail_error_class of this Attempt.
class of error that triggered the fail for attempt # noqa: E501
:param fail_error_class: The fail_error_class of this Attempt. # noqa: E501
:type: str
"""
if fail_error_class is None:
raise ValueError("Invalid value for `fail_error_class`, must not be `None`") # noqa: E501
self._fail_error_class = fail_error_class
@property
def fail_message(self):
"""Gets the fail_message of this Attempt. # noqa: E501
message that describes the error class # noqa: E501
:return: The fail_message of this Attempt. # noqa: E501
:rtype: str
"""
return self._fail_message
@fail_message.setter
def fail_message(self, fail_message):
"""Sets the fail_message of this Attempt.
message that describes the error class # noqa: E501
:param fail_message: The fail_message of this Attempt. # noqa: E501
:type: str
"""
if fail_message is None:
raise ValueError("Invalid value for `fail_message`, must not be `None`") # noqa: E501
self._fail_message = fail_message
@property
def fetch_scopes(self):
"""Gets the fetch_scopes of this Attempt. # noqa: E501
fetching mode. # noqa: E501
:return: The fetch_scopes of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._fetch_scopes
@fetch_scopes.setter
def fetch_scopes(self, fetch_scopes):
"""Sets the fetch_scopes of this Attempt.
fetching mode. # noqa: E501
:param fetch_scopes: The fetch_scopes of this Attempt. # noqa: E501
:type: list[str]
"""
if fetch_scopes is None:
raise ValueError("Invalid value for `fetch_scopes`, must not be `None`") # noqa: E501
allowed_values = ["accounts", "holder_info", "transactions"] # noqa: E501
if not set(fetch_scopes).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `fetch_scopes` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(fetch_scopes) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._fetch_scopes = fetch_scopes
@property
def finished(self):
"""Gets the finished of this Attempt. # noqa: E501
whether the connection had finished fetching # noqa: E501
:return: The finished of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this Attempt.
whether the connection had finished fetching # noqa: E501
:param finished: The finished of this Attempt. # noqa: E501
:type: bool
"""
if finished is None:
raise ValueError("Invalid value for `finished`, must not be `None`") # noqa: E501
self._finished = finished
@property
def finished_recent(self):
"""Gets the finished_recent of this Attempt. # noqa: E501
whether the connection had finished data for recent range # noqa: E501
:return: The finished_recent of this Attempt. # noqa: E501
:rtype: bool
"""
return self._finished_recent
@finished_recent.setter
def finished_recent(self, finished_recent):
"""Sets the finished_recent of this Attempt.
whether the connection had finished data for recent range # noqa: E501
:param finished_recent: The finished_recent of this Attempt. # noqa: E501
:type: bool
"""
if finished_recent is None:
raise ValueError("Invalid value for `finished_recent`, must not be `None`") # noqa: E501
self._finished_recent = finished_recent
@property
def from_date(self):
"""Gets the from_date of this Attempt. # noqa: E501
date from which the data had been fetched # noqa: E501
:return: The from_date of this Attempt. # noqa: E501
:rtype: date
"""
return self._from_date
@from_date.setter
def from_date(self, from_date):
"""Sets the from_date of this Attempt.
date from which the data had been fetched # noqa: E501
:param from_date: The from_date of this Attempt. # noqa: E501
:type: date
"""
if from_date is None:
raise ValueError("Invalid value for `from_date`, must not be `None`") # noqa: E501
self._from_date = from_date
@property
def id(self):
"""Gets the id of this Attempt. # noqa: E501
`id` of the attempt # noqa: E501
:return: The id of this Attempt. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attempt.
`id` of the attempt # noqa: E501
:param id: The id of this Attempt. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def interactive(self):
"""Gets the interactive of this Attempt. # noqa: E501
whether the connection related to the attempt is interactive # noqa: E501
:return: The interactive of this Attempt. # noqa: E501
:rtype: bool
"""
return self._interactive
@interactive.setter
def interactive(self, interactive):
"""Sets the interactive of this Attempt.
whether the connection related to the attempt is interactive # noqa: E501
:param interactive: The interactive of this Attempt. # noqa: E501
:type: bool
"""
if interactive is None:
raise ValueError("Invalid value for `interactive`, must not be `None`") # noqa: E501
self._interactive = interactive
@property
def locale(self):
"""Gets the locale of this Attempt. # noqa: E501
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:return: The locale of this Attempt. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Attempt.
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:param locale: The locale of this Attempt. # noqa: E501
:type: str
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def partial(self):
"""Gets the partial of this Attempt. # noqa: E501
whether the connection was partially fetched # noqa: E501
:return: The partial of this Attempt. # noqa: E501
:rtype: bool
"""
return self._partial
@partial.setter
def partial(self, partial):
"""Sets the partial of this Attempt.
whether the connection was partially fetched # noqa: E501
:param partial: The partial of this Attempt. # noqa: E501
:type: bool
"""
if partial is None:
raise ValueError("Invalid value for `partial`, must not be `None`") # noqa: E501
self._partial = partial
@property
def store_credentials(self):
"""Gets the store_credentials of this Attempt. # noqa: E501
whether the credentials were stored on our side # noqa: E501
:return: The store_credentials of this Attempt. # noqa: E501
:rtype: bool
"""
return self._store_credentials
@store_credentials.setter
def store_credentials(self, store_credentials):
"""Sets the store_credentials of this Attempt.
whether the credentials were stored on our side # noqa: E501
:param store_credentials: The store_credentials of this Attempt. # noqa: E501
:type: bool
"""
if store_credentials is None:
raise ValueError("Invalid value for `store_credentials`, must not be `None`") # noqa: E501
self._store_credentials = store_credentials
@property
def success_at(self):
"""Gets the success_at of this Attempt. # noqa: E501
when the attempt succeeded and finished # noqa: E501
:return: The success_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._success_at
@success_at.setter
def success_at(self, success_at):
"""Sets the success_at of this Attempt.
when the attempt succeeded and finished # noqa: E501
:param success_at: The success_at of this Attempt. # noqa: E501
:type: datetime
"""
if success_at is None:
raise ValueError("Invalid value for `success_at`, must not be `None`") # noqa: E501
self._success_at = success_at
@property
def to_date(self):
"""Gets the to_date of this Attempt. # noqa: E501
date until which the data has been fetched # noqa: E501
:return: The to_date of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._to_date
@to_date.setter
def to_date(self, to_date):
"""Sets the to_date of this Attempt.
date until which the data has been fetched # noqa: E501
:param to_date: The to_date of this Attempt. # noqa: E501
:type: datetime
"""
if to_date is None:
raise ValueError("Invalid value for `to_date`, must not be `None`") # noqa: E501
self._to_date = to_date
@property
def updated_at(self):
"""Gets the updated_at of this Attempt. # noqa: E501
when last attempt update occurred # noqa: E501
:return: The updated_at of this Attempt. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attempt.
when last attempt update occurred # noqa: E501
:param updated_at: The updated_at of this Attempt. # noqa: E501
:type: datetime
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def show_consent_confirmation(self):
"""Gets the show_consent_confirmation of this Attempt. # noqa: E501
whether any consent was given for this connection # noqa: E501
:return: The show_consent_confirmation of this Attempt. # noqa: E501
:rtype: bool
"""
return self._show_consent_confirmation
@show_consent_confirmation.setter
def show_consent_confirmation(self, show_consent_confirmation):
"""Sets the show_consent_confirmation of this Attempt.
whether any consent was given for this connection # noqa: E501
:param show_consent_confirmation: The show_consent_confirmation of this Attempt. # noqa: E501
:type: bool
"""
if show_consent_confirmation is None:
raise ValueError("Invalid value for `show_consent_confirmation`, must not be `None`") # noqa: E501
self._show_consent_confirmation = show_consent_confirmation
@property
def include_natures(self):
"""Gets the include_natures of this Attempt. # noqa: E501
the natures of the accounts that need to be fetched # noqa: E501
:return: The include_natures of this Attempt. # noqa: E501
:rtype: list[str]
"""
return self._include_natures
@include_natures.setter
def include_natures(self, include_natures):
"""Sets the include_natures of this Attempt.
the natures of the accounts that need to be fetched # noqa: E501
:param include_natures: The include_natures of this Attempt. # noqa: E501
:type: list[str]
"""
if include_natures is None:
raise ValueError("Invalid value for `include_natures`, must not be `None`") # noqa: E501
self._include_natures = include_natures
@property
def stages(self):
"""Gets the stages of this Attempt. # noqa: E501
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:return: The stages of this Attempt. # noqa: E501
:rtype: list[Stage]
"""
return self._stages
@stages.setter
def stages(self, stages):
"""Sets the stages of this Attempt.
information about [stages](#attempts-stages) through which the connection has passed # noqa: E501
:param stages: The stages of this Attempt. # noqa: E501
:type: list[Stage]
"""
if stages is None:
raise ValueError("Invalid value for `stages`, must not be `None`") # noqa: E501
self._stages = stages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Attempt, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attempt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
432fc3ff6d1726b9f47fc1106e5472077b10a5ec | 137 | py | Python | oteapi/plugins/__init__.py | TorgeirUstad/oteapi-core | 60432a5e8a511cca8a9c52197a247de50e808096 | [
"MIT"
] | 3 | 2022-01-24T15:18:08.000Z | 2022-03-16T14:01:51.000Z | oteapi/plugins/__init__.py | EMMC-ASBL/oteapi-core | 5a034c7610c300b21e585f563debb43383375af0 | [
"MIT"
] | 117 | 2022-01-13T17:26:38.000Z | 2022-03-30T16:12:06.000Z | oteapi/plugins/__init__.py | TorgeirUstad/oteapi-core | 60432a5e8a511cca8a9c52197a247de50e808096 | [
"MIT"
] | 3 | 2022-01-17T20:57:57.000Z | 2022-01-25T08:16:14.000Z | """`oteapi.plugins` module."""
from .factories import create_strategy, load_strategies
__all__ = ("create_strategy", "load_strategies")
| 27.4 | 55 | 0.766423 | """`oteapi.plugins` module."""
from .factories import create_strategy, load_strategies
__all__ = ("create_strategy", "load_strategies")
| 0 | 0 | 0 |