blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
744fba312f2b3ca31eb82c521b058139f9d7e0db | a8be4698c0a43edc3622837fbe2a98e92680f48a | /SSAFY알고리즘정규시간 Problem Solving/9월 Problem Solving/0930/3752가능한시험점수.py | e7305e71e4818c10983b34c211bc0b4567250260 | [] | no_license | blueboy1593/algorithm | fa8064241f7738a12b33544413c299e7c1e1a908 | 9d6fdd82b711ba16ad613edcc041cbecadd85e2d | refs/heads/master | 2021-06-23T22:44:06.120932 | 2021-02-21T10:44:16 | 2021-02-21T10:44:16 | 199,543,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import sys
sys.stdin = open("3752_input.txt", "r")
T = int(input())
# 그거 해보자 list에 index로 점수 기입해보기.
def DFS(i, jumsoo):
jumsoo += grade[i]
grade_set.add(jumsoo)
for j in range(i + 1, N):
TF[j] = False
DFS(j, jumsoo)
TF[j] = True
for tc in range(1, T + 1):
N = int(input())
grade = list(map(int, input().split()))
grade_set = set()
for i in range(len(grade)):
TF = [ False ] * N
jumsoo = 0
DFS(i, jumsoo)
result = len(grade_set) + 1
print("#%d %d" %(tc, result)) | [
"snb0303@naver.com"
] | snb0303@naver.com |
550425985d4b721c0fee84ae9bcc6571903970de | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /KPicBthv6WhHFGapg_21.py | b72401fb304ce28ac7d951cec22bc9d33168f6f1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | """
Create a function that returns the **number of syllables** in a simple string.
The string is made up of _short repeated words_ like `"Lalalalalalala"` (which
would have _7 syllables_ ).
### Examples
count_syllables("Hehehehehehe") ➞ 6
count_syllables("bobobobobobobobo") ➞ 8
count_syllables("NANANA") ➞ 3
### Notes
* For simplicity, please note that each syllable will consist of two letters only.
* Your code should accept strings of any case (upper, lower and mixed case).
"""
def count_syllables(txt):
txt=txt.lower()
co=txt[0:2]
ko=txt.count(co)
return (ko)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
386610dd251f9e3784e15ef74695779b95f74588 | 9c20b0f0ad729b77e970dedaf4a138c99b4364bc | /Lib/site-packages/phonenumbers/data/region_TN.py | 876916c6ca828f0dbe4ce8ea6dd9787c2e55166d | [] | no_license | GlovesMaker/Sklepinternetowy | 4459f8651d2280e4840cfb293de28f9413df68af | d05372e96f7238c9459caf4f7a890a5a6f2bb2c3 | refs/heads/master | 2022-12-22T02:43:33.628016 | 2018-09-11T18:20:37 | 2018-09-11T18:20:37 | 167,855,928 | 0 | 1 | null | 2022-12-08T05:55:04 | 2019-01-27T20:36:42 | Python | UTF-8 | Python | false | false | 1,103 | py | """Auto-generated file, do not edit by hand. TN metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TN = PhoneMetadata(id='TN', country_code=216, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-57-9]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[0-2]\\d{3}|7\\d{4}|81200)\\d{3}', example_number='30010123', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[259]\\d{3}|3(?:001|1(?:[1-35]\\d|40)|240|6[0-4]\\d|91\\d)|4[0-6]\\d{2})\\d{4}', example_number='20123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='8010\\d{4}', example_number='80101234', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='88\\d{6}', example_number='88123456', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='8[12]10\\d{4}', example_number='81101234', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3')])
| [
"buchar123@gmail.com"
] | buchar123@gmail.com |
8e8bfc220f7a7cb3625dcb6c3cd32dbb38472d3e | d82b4a4e710642dd38c944890671f21b2099232f | /Algorithm-python/_review/section4_ReverseSequence.py | b165ed2587bf79af6edb642fd01521716347485f | [] | no_license | somsomdah/Algorithm | 31a36d01bc0e1873dee3d95789dcff3dd68a9b09 | cd7f6f25fda5aef17495e11c20b54561d83674c5 | refs/heads/master | 2023-05-24T20:04:32.466801 | 2023-05-19T10:53:27 | 2023-05-19T10:53:27 | 236,322,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # 해결 못함
n=int(input())
seq=list(map(int,input().split()))
res=[0]*n
for i in range(0,n):
count=0
for j in range(0,n):
if res[j]==0:
count+=1
if count==seq[i]:
res[j]=i+1
print(res)
| [
"somdah98@gmail.com"
] | somdah98@gmail.com |
0c30d9ccebce25bb64383a0092ff2a31ff517dfa | b9ed8f5edf787f1a7df567a1b01086dc045427ba | /official/projects/mae/configs/mae.py | 00691534b0760e4f37705c52d877976ffcc55079 | [
"Apache-2.0"
] | permissive | stjordanis/models | 787183f973f8cd4152f328de2368dbef17376488 | 84e1f30cdb5015848cb0d9e38e5b3f0551953b7c | refs/heads/master | 2023-03-18T08:46:29.986735 | 2023-03-07T23:26:36 | 2023-03-07T23:27:43 | 143,071,287 | 0 | 0 | Apache-2.0 | 2018-07-31T21:18:06 | 2018-07-31T21:18:05 | null | UTF-8 | Python | false | false | 3,612 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE configurations."""
import dataclasses
from typing import Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.configs import image_classification
@dataclasses.dataclass
class MAEConfig(cfg.TaskConfig):
"""The translation task config."""
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
masking_ratio: float = 0.75
patch_h: int = 14
patch_w: int = 14
num_classes: int = 1000
input_size: Tuple[int, int] = (224, 224)
norm_target: bool = False
@exp_factory.register_config_factory('mae_imagenet')
def mae_imagenet() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 4096
eval_batch_size = 4096
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=MAEConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
crop_area_range=(0.2, 1.0),
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
)
),
trainer=cfg.TrainerConfig(
train_steps=800 * steps_per_epoch,
validation_steps=24,
steps_per_loop=1000,
summary_interval=1000,
checkpoint_interval=1000,
validation_interval=1000,
max_to_keep=5,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'beta_2': 0.95,
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm':
0.0,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias']
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate':
1.5 * 1e-4 * train_batch_size / 256,
'decay_steps': 800 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 40 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})
),
restrictions=[
'task.train_data.is_training != None',
])
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0328881cc74e66f9635e839f9d1e6dcc0f05d091 | 5748b92c451efe67fabc9e588dcd5dcedbe29c36 | /buildout/Naaya/zope210/bootstrap.py | 1fc174920923155818686f3b128bdbba7d09e16c | [] | no_license | Hamzahashmi4444/Salman | 146d30303ff738f9c78525466b039e7a6a7bd1bb | 611ac05be7771a46b26ff243359cfcafce738cb1 | refs/heads/master | 2023-02-16T14:05:35.070709 | 2021-01-18T06:56:23 | 2021-01-18T06:56:23 | 330,587,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,248 | py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser(
'This is a custom version of the zc.buildout %prog script. It is '
'intended to meet a temporary need if you encounter problems with '
'the zc.buildout 1.5 release.')
parser.add_option("-v", "--version", dest="version", default='1.4.4',
help='Use a specific zc.buildout version. *This '
'bootstrap script defaults to '
'1.4.4, unlike usual buildpout bootstrap scripts.*')
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
USE_DISTRIBUTE = options.distribute
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
env = dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(tmpeggs)]
if 'bootstrap-testing-find-links' in os.environ:
cmd.extend(['-f', os.environ['bootstrap-testing-find-links']])
cmd.append('zc.buildout' + VERSION)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
assert exitcode == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| [
"hamza@gmail.com"
] | hamza@gmail.com |
6ff49b287f974270f1f0ae428a47faaa8bfd7917 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03018/s719990159.py | 1ca3886d16372d3fff541701f826d87af32d984f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | s=input().replace("BC","X")
ans=0
acc=0
for i in range(len(s)):
if s[i]=="B" or s[i]=="C":
acc=0
elif s[i]=="A":
acc+=1
elif s[i]=="X":
ans+=acc
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a9c5137c80c6300de125d4f72e847585701420f7 | d0e80cae1af4ced7b0fe7e7dc178fcf8ff3b3c86 | /pocean/dsg/timeseriesProfile/r.py | 29111700d91ad7b66887fe596d262e36b6f755d9 | [
"MIT"
] | permissive | TomasTorsvik-tools/pocean-core-TTfork | 5d8d1276e7476662259162691b3e2ac56c4a0e1e | de28a80cd234573e42e25de7e13fcc97e1b819ea | refs/heads/master | 2022-12-21T05:27:15.691034 | 2020-09-22T14:58:02 | 2020-09-22T14:58:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,629 | py | #!python
# coding=utf-8
from copy import copy
from collections import OrderedDict
import six
import numpy as np
import pandas as pd
from pocean.utils import (
create_ncvar_from_series,
dict_update,
downcast_dataframe,
generic_masked,
get_default_axes,
get_dtype,
get_mapped_axes_variables,
get_masked_datetime_array,
get_ncdata_from_series,
nativize_times,
normalize_countable_array,
)
from pocean.cf import CFDataset, cf_safe_name
from pocean.utils import normalize_array
from pocean.utils import logger as L # noqa
class RaggedTimeseriesProfile(CFDataset):
@classmethod
def is_mine(cls, dsg, strict=False):
try:
assert dsg.featureType.lower() == 'timeseriesprofile'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
_ = dsg.filter_by_attrs(
cf_role='profile_id'
)[0]
svar = dsg.filter_by_attrs(
cf_role='timeseries_id'
)[0]
sdata = normalize_array(svar)
if not isinstance(sdata, six.string_types) and len(sdata.shape) > 0:
r_index_vars = dsg.filter_by_attrs(
instance_dimension=lambda x: x is not None
)
assert len(r_index_vars) == 1
assert r_index_vars[0].instance_dimension in dsg.dimensions # Station dimension
except BaseException:
if strict is True:
raise
return False
return True
@classmethod
def from_dataframe(cls, df, output, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
daxes = axes
reduce_dims = kwargs.pop('reduce_dims', False)
unlimited = kwargs.pop('unlimited', False)
unique_dims = kwargs.pop('unique_dims', False)
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not supported in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)
# Downcast anything from int64 to int32
# Convert any timezone aware datetimes to native UTC times
df = downcast_dataframe(nativize_times(df))
with RaggedTimeseriesProfile(output, 'w') as nc:
station_groups = df.groupby(axes.station)
unique_stations = list(station_groups.groups.keys())
num_stations = len(unique_stations)
# Calculate the max number of profiles
profile_groups = df.groupby(axes.profile)
unique_profiles = list(profile_groups.groups.keys())
num_profiles = len(unique_profiles)
nc.createDimension(daxes.profile, num_profiles)
if reduce_dims is True and num_stations == 1:
# If a singlular station, remove the dimension
station_dimensions = ()
s_ind = None
else:
station_dimensions = (daxes.station,)
nc.createDimension(daxes.station, num_stations)
# The station this profile belongs to
s_ind = nc.createVariable('stationIndex', 'i4', (daxes.profile,))
station = nc.createVariable(axes.station, get_dtype(unique_stations), station_dimensions)
profile = nc.createVariable(axes.profile, get_dtype(df[axes.profile]), (daxes.profile,))
latitude = nc.createVariable(axes.y, get_dtype(df[axes.y]), station_dimensions)
longitude = nc.createVariable(axes.x, get_dtype(df[axes.x]), station_dimensions)
# Get unique obs by grouping on traj and profile and getting the max size
if unlimited is True:
nc.createDimension(daxes.sample, None)
else:
nc.createDimension(daxes.sample, len(df))
# Number of observations in each profile
row_size = nc.createVariable('rowSize', 'i4', (daxes.profile,))
# Axes variables are already processed so skip them
data_columns = [ d for d in df.columns if d not in axes ]
data_columns += [axes.t, axes.z] # time isn't really special, its dimensioned by obs
attributes = dict_update(nc.nc_attributes(axes, daxes), kwargs.pop('attributes', {}))
for i, (sname, srg) in enumerate(station_groups):
station[i] = sname
latitude[i] = df[axes.y][df[axes.station] == sname].dropna().iloc[0]
longitude[i] = df[axes.x][df[axes.station] == sname].dropna().iloc[0]
for j, (pname, pfg) in enumerate(profile_groups):
profile[j] = pname
row_size[j] = len(pfg)
if s_ind is not None:
s_ind[j] = np.asscalar(np.argwhere(station[:] == pfg[axes.station].dropna().iloc[0]))
# Add back in the z axes that was removed when calculating data_columns
# and ignore variables that were stored in the profile index
skips = ['stationIndex', 'rowSize']
for c in [ d for d in data_columns if d not in skips ]:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
v = create_ncvar_from_series(
nc,
var_name,
(daxes.sample,),
df[c],
zlib=True,
complevel=1
)
else:
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(df[c], v)
try:
if unlimited is True:
v[:] = vvalues
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Metadata variables
nc.createVariable('crs', 'i4')
# Set attributes
nc.update_attributes(attributes)
return RaggedTimeseriesProfile(output, **kwargs)
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
axv = get_mapped_axes_variables(self, axes)
# Profile dimension
p_var = self.filter_by_attrs(cf_role='profile_id')[0]
p_dim = self.dimensions[p_var.dimensions[0]]
# Station dimension
s_var = self.filter_by_attrs(cf_role='timeseries_id')[0]
if s_var.ndim == 1:
s_dim = self.dimensions[s_var.dimensions[0]]
elif s_var.ndim == 0:
s_dim = None
else:
raise ValueError('Number of dimension on the station (timeseries_id) must be 0 or 1')
# Station index
r_index_var = self.filter_by_attrs(instance_dimension=lambda x: x is not None)
if not r_index_var:
# A reduced netCDF file, set station to 0 so it pulls the first value
# of the variable that identifies the stations
r_index_var = [0]
else:
r_index_var = r_index_var[0]
# Sample (obs) dimension
o_index_var = self.filter_by_attrs(sample_dimension=lambda x: x is not None)
if not o_index_var:
raise ValueError(
'Could not find the "sample_dimension" attribute on any variables, '
'is this a valid {}?'.format(self.__class__.__name__)
)
else:
o_index_var = o_index_var[0]
# Sample dimension
# Since this is a flat dataframe, everything is the length of the obs dimension
row_sizes = o_index_var[:]
o_dim = self.dimensions[o_index_var.sample_dimension]
profile_indexes = normalize_countable_array(p_var, count_if_none=p_dim.size)
p = np.repeat(profile_indexes, row_sizes)
stat_indexes = normalize_countable_array(s_var, count_if_none=s_dim.size)
r = np.ma.masked_all(o_dim.size, dtype=stat_indexes.dtype)
# Lat and Lon are on the station dimension
xvar = axv.x
x = np.ma.masked_all(o_dim.size, dtype=xvar.dtype)
yvar = axv.y
y = np.ma.masked_all(o_dim.size, dtype=yvar.dtype)
si = 0
for i in np.arange(stat_indexes.size):
ei = si + o_index_var[i]
r[si:ei] = np.array(stat_indexes[r_index_var[i]])
x[si:ei] = xvar[i]
y[si:ei] = yvar[i]
si = ei
x = generic_masked(x, minv=-180, maxv=180)
y = generic_masked(y, minv=-90, maxv=90)
# Time and Z are on the sample (obs) dimension
tvar = axv.t
t = get_masked_datetime_array(
generic_masked(tvar[:].flatten(), attrs=self.vatts(tvar.name)),
tvar
)
z = generic_masked(axv.z[:].flatten(), attrs=self.vatts(axv.z.name))
df_data = OrderedDict([
(axes.t, t),
(axes.x, x),
(axes.y, y),
(axes.z, z),
(axes.station, r),
(axes.profile, p)
])
building_index_to_drop = np.ones(o_dim.size, dtype=bool)
extract_vars = copy(self.variables)
# Skip the station and row index variables
del extract_vars[o_index_var.name]
del extract_vars[r_index_var.name]
# Axes variables are already processed so skip them
for ncvar in axv._asdict().values():
if ncvar is not None and ncvar.name in extract_vars:
del extract_vars[ncvar.name]
for i, (dnam, dvar) in enumerate(extract_vars.items()):
# Profile dimensions
if dvar.dimensions == (p_dim.name,):
vdata = generic_masked(
np.repeat(
dvar[:].flatten().astype(dvar.dtype),
row_sizes
),
attrs=self.vatts(dnam)
)
# Sample dimensions
elif dvar.dimensions == (o_dim.name,):
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
else:
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
continue
# Mark rows with data so we don't remove them with clear_rows
if vdata.size == building_index_to_drop.size:
building_index_to_drop = (building_index_to_drop == True) & (vdata.mask == True) # noqa
# Handle scalars here at the end
if vdata.size == 1:
vdata = vdata[0]
df_data[dnam] = vdata
df = pd.DataFrame(df_data)
# Drop all data columns with no data
if clean_cols:
df = df.dropna(axis=1, how='all')
# Drop all data rows with no data variable data
if clean_rows:
df = df.iloc[~building_index_to_drop]
return df
def nc_attributes(self, axes, daxes):
atts = super(RaggedTimeseriesProfile, self).nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'timeSeriesProfile',
'cdm_data_type': 'TimeseriesProfile',
'cdm_timeseries_variables': axes.station,
'cdm_profile_variables': axes.profile,
'subsetVariables': '{x},{y},{t},{station}'.format(**axes._asdict())
},
axes.station : {
'cf_role': 'timeseries_id',
'long_name' : 'station identifier',
'ioos_category': 'identifier'
},
axes.profile : {
'cf_role': 'profile_id',
'long_name' : 'profile identifier',
'ioos_category': 'identifier'
},
axes.x: {
'axis': 'X'
},
axes.y: {
'axis': 'Y'
},
axes.z: {
'axis': 'Z'
},
axes.t: {
'units': self.default_time_unit,
'standard_name': 'time',
'axis': 'T'
},
'stationIndex': {
'long_name': 'which station this profile belongs to',
'instance_dimension': daxes.station
},
'rowSize': {
'long_name': 'number of obs in this profile',
'sample_dimension': daxes.sample
}
})
| [
"kyle@axiomdatascience.com"
] | kyle@axiomdatascience.com |
e55bb7d0e2394ed842d96643697053021e38e637 | 4142b8c513d87361da196631f7edd82f11465abb | /python/round550/1144A.py | 58cb00fccbaacef165270928c128d3c682aaeb87 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from sys import stdin
for _ in range(int(stdin.readline())):
s = sorted(list(stdin.readline().strip()))
direction = 0
res = 'Yes'
for i in range(1, len(s)):
cur = ord(s[i]) - ord(s[i-1])
if cur != 1:
res = 'No'
break
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
32a501f2fc6eb9540010a9ef115e67b914460788 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_replacemsggroup_utm.py | 5c9a7947333fb6d15ac03c289003d0af40a4c779 | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,263 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_system_replacemsggroup_utm
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
replacemsg-group:
description: the parameter (replacemsg-group) in requested url
type: str
required: true
system_replacemsggroup_utm:
description: the top level parameters set
required: false
type: dict
suboptions:
buffer:
type: str
description: no description
format:
type: str
description: no description
choices:
- 'none'
- 'text'
- 'html'
- 'wml'
header:
type: str
description: no description
choices:
- 'none'
- 'http'
- '8bit'
msg-type:
type: str
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Replacement message table entries.
fmgr_system_replacemsggroup_utm:
bypass_validation: False
adom: ansible
replacemsg-group: ansible-test # name
state: present
system_replacemsggroup_utm:
buffer: ansible-buffer
format: text #<value in [none, text, html, ...]>
header: none #<value in [none, http, 8bit]>
msg-type: ansible-msgtype # required
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the UTMs of replacement message group
fmgr_fact:
facts:
selector: 'system_replacemsggroup_utm'
params:
adom: 'ansible'
replacemsg-group: 'ansible-test' # name
utm: 'your_value'
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/system/replacemsg-group/{replacemsg-group}/utm',
'/pm/config/global/obj/system/replacemsg-group/{replacemsg-group}/utm'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/system/replacemsg-group/{replacemsg-group}/utm/{utm}',
'/pm/config/global/obj/system/replacemsg-group/{replacemsg-group}/utm/{utm}'
]
url_params = ['adom', 'replacemsg-group']
module_primary_key = 'msg-type'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'replacemsg-group': {
'required': True,
'type': 'str'
},
'system_replacemsggroup_utm': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'options': {
'buffer': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'str'
},
'format': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'none',
'text',
'html',
'wml'
],
'type': 'str'
},
'header': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'none',
'http',
'8bit'
],
'type': 'str'
},
'msg-type': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'system_replacemsggroup_utm'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"baltah666@gmail.com"
] | baltah666@gmail.com |
0edac64487d82dd2cf104d28b0e826a21bc6b79f | 347569ec4be307e9ae78286da0280e95f2689d27 | /updates/api/views.py | 5459b3e9930bbcbebfcb0667eb7554762f17a222 | [] | no_license | rahulsayon/Django-api | f45763330e91ffb9ccb12b686b9c0cb2af7d6fbb | f7d042086ad34f59a6ae92d920f2426b91ddda7b | refs/heads/master | 2022-12-11T01:33:48.193457 | 2020-09-19T19:26:14 | 2020-09-19T19:26:14 | 296,937,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | #from django.shortcuts import render
# Create your views here.
from updates.models import Update as UpdateModel
from django.views.generic import View
from django.http import HttpResponse
from updates.mixin import CSRFExemptMixin
from cfeapi.mixins import HttpResponseMixin
import json
class UpdateModelDetailAPIView(HttpResponseMixin,CSRFExemptMixin,View):
is_json = True
def get(self , request , id ,*args , **kwargs):
obj = UpdateModel.objects.get(id=1)
json_data = obj.serialize()
return self.render_to_response(json_data)
def post(self , request , *args , **kwargs):
json_data = {}
return self.render_to_response(json_data)
def put(self , request , *args, **kwargs):
json_data = {}
return self.render_to_response(json_data)
def delete(self , request , *args , **kwargs):
json_data = {}
return self.render_to_response(json_data , status=403)
class UpdateModelListAPIView(HttpResponseMixin,CSRFExemptMixin,View):
is_json = True
def get(self , request , *args , **kwargs):
qs = UpdateModel.objects.all()
json_data = qs.serialize()
#return HttpResponse(json_data , content_type='application/json')
return self.render_to_response(data)
def post(self , request , *args , **kwargs):
data = json.dumps({"message" : "Unkonw data"})
#return HttpResponse(data , content_type='application/json')
return self.render_to_response(data , status=400)
def delete(self , request , *args , **kwargs):
data = json.dumps({"message" : "you can not delete an entire list"})
status_code = 403
return self.render_to_response(data, status=403)
| [
"rahulsayon95@gmail.com"
] | rahulsayon95@gmail.com |
5748278cb172838464abd74abc7813dda1031e03 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Cocoa/PyObjCTest/test_nsfilewrapper.py | 8f242fb17d835c1952adda1f7f75d57c09d1e3a9 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 2,001 | py | import AppKit
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSFileWrapper(TestCase):
def test_enum_types(self):
self.assertIsEnumType(Foundation.NSFileWrapperReadingOptions)
self.assertIsEnumType(Foundation.NSFileWrapperWritingOptions)
def testMethods(self):
self.assertResultIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_
)
self.assertArgIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_, 1
)
self.assertArgIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_, 2
)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isRegularFile)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isDirectory)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isSymbolicLink)
self.assertResultIsBOOL(AppKit.NSFileWrapper.needsToBeUpdatedFromPath_)
self.assertResultIsBOOL(AppKit.NSFileWrapper.updateFromPath_)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(AppKit.NSFileWrapperReadingImmediate, 1 << 0)
self.assertEqual(AppKit.NSFileWrapperReadingWithoutMapping, 1 << 1)
self.assertEqual(AppKit.NSFileWrapperWritingAtomic, 1 << 0)
self.assertEqual(AppKit.NSFileWrapperWritingWithNameUpdating, 1 << 1)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsOut(AppKit.NSFileWrapper.initWithURL_options_error_, 2)
self.assertResultIsBOOL(AppKit.NSFileWrapper.matchesContentsOfURL_)
self.assertResultIsBOOL(AppKit.NSFileWrapper.readFromURL_options_error_)
self.assertArgIsOut(AppKit.NSFileWrapper.readFromURL_options_error_, 2)
self.assertResultIsBOOL(
AppKit.NSFileWrapper.writeToURL_options_originalContentsURL_error_
)
self.assertArgIsOut(
AppKit.NSFileWrapper.writeToURL_options_originalContentsURL_error_, 3
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
f1d8ef12acecd27c70767e80673ba6b6dba7adba | 52f68b99981d16a6297ecacd4e8b92790daf0ef9 | /23.py | 05b94be1851770ce14ec01cce3b09e51adce1d35 | [] | no_license | ComputahSaysNo/AOC_2019 | a960214257016d2f376b20381a84bc3ba60f9f63 | 35867882647e1923a27216dae85388f13e402a68 | refs/heads/master | 2020-11-24T01:55:00.153911 | 2020-07-18T08:58:09 | 2020-07-18T08:58:09 | 227,914,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,901 | py | from processInputs import get_formatted_input
from intcode import IntcodeComputer
def part_1_and_2(data):
outputs = []
for part in (1, 2):
packetQueue = []
def inf():
return -1
network = [IntcodeComputer(data, inf) for i in range(50)]
for i in range(50):
network[i].give_next_input(i)
packetQueue.append([])
nat = [0, 0]
history = []
running = True
while running:
idle = True
for i in range(len(network)):
computer = network[i]
queue = packetQueue[i]
if len(queue) == 0:
computer.give_next_input(-1)
else:
idle = False
while (len(queue)) > 0:
packet = queue.pop(0)
computer.give_next_input(packet[0])
computer.give_next_input(packet[1])
while len(computer.outputs) > 0:
dest, x, y = computer.outputs[-3], computer.outputs[-2], computer.outputs[-1]
if dest == 255:
if part == 1:
outputs.append(y)
running = False
nat = [x, y]
else:
packetQueue[dest].append([x, y])
computer.outputs = computer.outputs[:-3]
if idle:
packetQueue[0].append(nat)
history.append(nat)
if len(history) > 2:
history.pop(0)
if history[-1][1] == history[-2][1]:
if history[-1][1] != 0:
outputs.append(history[-1][1])
return outputs
INPUT = get_formatted_input(23)
print(part_1_and_2(INPUT))
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
bc784b221bfe2c3829ae2b9ef8a9c755c1dee450 | f07b0142e37afe0bf8ed4d56399a0a49f5b1801b | /lino_xl/lib/beid/views.py | 2eeb93702cb6494e66469ccf7a09193753635492 | [
"BSD-2-Clause"
] | permissive | khchine5/xl | af70fb21e4caeb05ff62e9618113c278d71a75ed | b1634937a9ce87af1e948eb712b934b11f221d9d | refs/heads/master | 2021-01-20T22:51:01.193260 | 2018-08-22T07:47:43 | 2018-08-22T07:47:43 | 52,145,840 | 1 | 0 | BSD-2-Clause | 2018-08-19T12:29:06 | 2016-02-20T09:21:19 | Python | UTF-8 | Python | false | false | 2,578 | py | # -*- coding: UTF-8 -*-
# Copyright 2018 Rumma 6 Ko Ltd
# License: BSD (see file COPYING for details)
"""Views for `lino.modlib.bootstrap3`.
"""
from __future__ import division
from os.path import join
import time
import json
# from django import http
# from django.conf import settings
from django.views.generic import View
# from django.core import exceptions
from lino.core.views import json_response
from lino.api import dd, _
def load_card_data(uuid):
# raise Exception("20180412 {}".format(uuid))
fn = dd.plugins.beid.data_cache_dir.child(uuid)
timeout = dd.plugins.beid.eidreader_timeout
count = 0
while True:
try:
fp = open(fn)
rv = json.load(fp)
fp.close()
# dd.logger.info("20180412 json.load({}) returned {}".format(
# fn, rv))
return rv
# raise Warning(
# _("Got invalid card data {} from eidreader.").format(rv))
except IOError as e:
# dd.logger.info("20180412 {} : {}".format(fn, e))
time.sleep(1)
count += 1
if count > timeout:
raise Warning(_("Abandoned after {} seconds").format(
timeout))
# rv = dict(success=False)
# break
# continue
class EidStore(View):
# def get(self, request, uuid, **kw):
# print("20180412 GET {} {}".format(uuid, request.GET))
# return json_response()
def post(self, request, uuid, **kw):
# uuid = request.POST.get('uuid')
card_data = request.POST.get('card_data')
# card_data = json.loads(card_data)
# msg = "20180412 raw data {}".format(request.body)
# dd.logger.info(msg)
# if not card_data:
# raise Exception("No card_data found in {}".format(
# request.POST))
fn = dd.plugins.beid.data_cache_dir.child(uuid)
# pth = dd.plugins.beid.data_cache_dir
# pth = join(pth, uuid)
try:
fp = open(fn, 'w')
fp.write(card_data)
# json.dump(card_data, fp)
fp.close()
except IOError as e:
dd.logger.warning(
"Failed to store data to file %s : %s", fn, e)
# msg = "20180412 wrote {} {}".format(fn, card_data)
# dd.logger.info(msg)
# username = request.POST.get('username')
# return http.HttpResponseRedirect(target)
return json_response(dict(success=True, message="OK"))
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
2e2d8dd31cf7a6ea046fe522c3c9294575c558db | 0369fb3fa86b09ea8472741a4d71c658d9d7eb0a | /tensorflow_model_analysis/metrics/tf_metric_wrapper_test.py | 7bf0412531ee1852be74cc9d0be9479e1e0beb97 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | dhruvesh09/model-analysis | ec85883490529b54bb9d1850baee669db06b56cf | 3fddfbecf1e31b7f5edc84ecd4d935dbf66d8022 | refs/heads/master | 2021-12-04T05:36:02.118840 | 2021-09-20T16:42:28 | 2021-09-20T16:43:52 | 287,142,586 | 0 | 0 | Apache-2.0 | 2020-08-13T00:24:11 | 2020-08-13T00:24:11 | null | UTF-8 | Python | false | false | 37,537 | py | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TF metric wrapper."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.metrics import tf_metric_wrapper
from tensorflow_model_analysis.proto import config_pb2
class _CustomMetric(tf.keras.metrics.Mean):
def __init__(self, name='custom', dtype=None, update_y_pred=True):
super(_CustomMetric, self).__init__(name=name, dtype=dtype)
self.update_y_pred = update_y_pred
def update_state(self, y_true, y_pred, sample_weight):
return super(_CustomMetric, self).update_state(
y_pred if self.update_y_pred else y_true, sample_weight=sample_weight)
def get_config(self):
cfg = super(tf.keras.metrics.Mean, self).get_config()
cfg.update({'update_y_pred': self.update_y_pred})
return cfg
class _CustomConfusionMatrixMetric(tf.keras.metrics.Precision):
def __init__(self, name='custom', dtype=None):
super(_CustomConfusionMatrixMetric, self).__init__(name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight):
super(_CustomConfusionMatrixMetric, self).update_state(
y_true, y_pred, sample_weight=sample_weight)
def get_config(self):
# Remove config items we don't accept or they will be passed to __init__.
base_config = super(tf.keras.metrics.Precision, self).get_config()
return {'name': base_config['name'], 'dtype': base_config['dtype']}
class ConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
# This is needed because of pickling errors when using
# parameterized.named_parameters with TF metric types.
def _tf_metric_by_name(self, metric_name):
"""Returns instance of tf.keras.metric with default args given name."""
if metric_name == 'auc':
return tf.keras.metrics.AUC(name='auc')
elif metric_name == 'auc_pr':
return tf.keras.metrics.AUC(name='auc_pr', curve='PR')
elif metric_name == 'precision':
return tf.keras.metrics.Precision(name='precision')
elif metric_name == 'precision@2':
return tf.keras.metrics.Precision(name='precision@2', top_k=2)
elif metric_name == 'precision@3':
return tf.keras.metrics.Precision(name='precision@3', top_k=3)
elif metric_name == 'recall':
return tf.keras.metrics.Recall(name='recall')
elif metric_name == 'recall@2':
return tf.keras.metrics.Recall(name='recall@2', top_k=2)
elif metric_name == 'recall@3':
return tf.keras.metrics.Recall(name='recall@3', top_k=3)
elif metric_name == 'true_positives':
return tf.keras.metrics.TruePositives(name='true_positives')
elif metric_name == 'false_positives':
return tf.keras.metrics.FalsePositives(name='false_positives')
elif metric_name == 'true_negatives':
return tf.keras.metrics.TrueNegatives(name='true_negatives')
elif metric_name == 'false_negatives':
return tf.keras.metrics.FalseNegatives(name='false_negatives')
elif metric_name == 'specificity_at_sensitivity':
return tf.keras.metrics.SpecificityAtSensitivity(
0.5, name='specificity_at_sensitivity')
elif metric_name == 'sensitivity_at_specificity':
return tf.keras.metrics.SensitivityAtSpecificity(
0.5, name='sensitivity_at_specificity')
@parameterized.named_parameters(
('auc', 'auc', 0.75),
('auc_pr', 'auc_pr', 0.79727),
('precision', 'precision', 1.0),
('recall', 'recall', 0.5),
('true_positives', 'true_positives', 1.0),
('false_positives', 'false_positives', 0.0),
('true_negatives', 'true_negatives', 2.0),
('false_negatives', 'false_negatives', 1.0),
('specificity_at_sensitivity', 'specificity_at_sensitivity', 1.0),
('sensitivity_at_specificity', 'sensitivity_at_specificity', 1.0),
)
def testMetricsWithoutWeights(self, metric_name, expected_value):
# TODO (b/151636380): remove when CL/299961405 is propagated through Kokoro.
if metric_name == 'specificity_at_sensitivity':
fix_present = hasattr(tf.keras.metrics.SpecificityAtSensitivity,
'_find_max_under_constraint')
if not fix_present:
expected_value = 0.5
computations = tf_metric_wrapper.tf_metric_computations(
[self._tf_metric_by_name(metric_name)])
histogram = computations[0]
matrix = computations[1]
metric = computations[2]
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map(
lambda x: (x[0], metric.result(x[1])))) # pyformat: disable
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name=metric_name)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('auc', 'auc', 0.64286),
('auc_pr', 'auc_pr', 0.37467),
('precision', 'precision', 0.5833333),
('recall', 'recall', 1.0),
('true_positives', 'true_positives', 0.7),
('false_positives', 'false_positives', 0.5),
('true_negatives', 'true_negatives', 0.9),
('false_negatives', 'false_negatives', 0.0),
('specificity_at_sensitivity', 'specificity_at_sensitivity', 0.642857),
('sensitivity_at_specificity', 'sensitivity_at_specificity', 1.0),
)
def testMetricsWithWeights(self, metric_name, expected_value):
# TODO (b/151636380): remove when CL/299961405 is propagated through Kokoro.
if metric_name == 'specificity_at_sensitivity':
fix_present = hasattr(tf.keras.metrics.SpecificityAtSensitivity,
'_find_max_under_constraint')
if not fix_present:
expected_value = 0.0
computations = tf_metric_wrapper.tf_metric_computations(
[self._tf_metric_by_name(metric_name)])
histogram = computations[0]
matrix = computations[1]
metric = computations[2]
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([1.0]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([1.0]),
'predictions': np.array([0.7]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([0.9]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map(
lambda x: (x[0], metric.result(x[1])))) # pyformat: disable
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name=metric_name)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('auc', 'auc', 0.8571428),
('auc_pr', 'auc_pr', 0.77369833),
('true_positives', 'true_positives', 1.4),
('false_positives', 'false_positives', 0.6),
('true_negatives', 'true_negatives', 1.0),
('false_negatives', 'false_negatives', 0.0),
)
def testMetricsWithFractionalLabels(self, metric_name, expected_value):
computations = tf_metric_wrapper.tf_metric_computations(
[self._tf_metric_by_name(metric_name)])
histogram = computations[0]
matrix = computations[1]
metric = computations[2]
# The following examples will be expanded to:
#
# prediction | label | weight
# 0.0 | - | 1.0
# 0.7 | - | 0.4
# 0.7 | + | 0.6
# 1.0 | - | 0.2
# 1.0 | + | 0.8
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.6]),
'predictions': np.array([0.7]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([0.8]),
'predictions': np.array([1.0]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map(
lambda x: (x[0], metric.result(x[1])))) # pyformat: disable
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name=metric_name)
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('precision@2', 'precision', 2, 1.6 / (1.6 + 3.2)),
('recall@2', 'recall', 2, 1.6 / (1.6 + 0.8)),
('precision@3', 'precision', 3, 1.9 / (1.9 + 5.3)),
('recall@3', 'recall', 3, 1.9 / (1.9 + 0.5)),
)
def testMultiClassMetricsUsingConfusionMatrix(self, metric_name, top_k,
expected_value):
computations = tf_metric_wrapper.tf_metric_computations(
[self._tf_metric_by_name(metric_name)],
sub_key=metric_types.SubKey(top_k=top_k))
histogram = computations[0]
matrix = computations[1]
metric = computations[2]
# top_k = 2
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*0 = 1.6
# FP = 0.5*2 + 0.7*1 + 0.9*1 + 0.3*2 = 3.2
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*1 = 0.8
#
# top_k = 3
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*1 = 1.9
# FP = 0.5*3 + 0.7*2 + 0.9*2 + 0.3*2 = 5.3
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*0 = 0.5
example1 = {
'labels': np.array([2]),
'predictions': np.array([0.1, 0.2, 0.1, 0.25, 0.35]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([1]),
'predictions': np.array([0.2, 0.3, 0.05, 0.15, 0.3]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([3]),
'predictions': np.array([0.01, 0.2, 0.09, 0.5, 0.2]),
'example_weights': np.array([0.9]),
}
example4 = {
'labels': np.array([1]),
'predictions': np.array([0.3, 0.2, 0.05, 0.4, 0.05]),
'example_weights': np.array([0.3]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], matrix.result(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map(
lambda x: (x[0], metric.result(x[1])))) # pyformat: disable
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(
name=metric_name, sub_key=metric_types.SubKey(top_k=top_k))
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(
('precision@2', 'precision@2', 1.6 / (1.6 + 3.2)),
('recall@2', 'recall@2', 1.6 / (1.6 + 0.8)),
('precision@3', 'precision@3', 1.9 / (1.9 + 5.3)),
('recall@3', 'recall@3', 1.9 / (1.9 + 0.5)),
)
def testMultiClassMetricsUsingKerasConfig(self, metric_name, expected_value):
metric = tf_metric_wrapper.tf_metric_computations(
[self._tf_metric_by_name(metric_name)])[0]
# top_k = 2
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*0 = 1.6
# FP = 0.5*2 + 0.7*1 + 0.9*1 + 0.3*2 = 3.2
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*1 = 0.8
#
# top_k = 3
# TP = 0.5*0 + 0.7*1 + 0.9*1 + 0.3*1 = 1.9
# FP = 0.5*3 + 0.7*2 + 0.9*2 + 0.3*2 = 5.3
# FN = 0.5*1 + 0.7*0 + 0.9*0 + 0.3*0 = 0.5
example1 = {
'labels': np.array([2]),
'predictions': np.array([0.1, 0.2, 0.1, 0.25, 0.35]),
'example_weights': np.array([0.5]),
}
example2 = {
'labels': np.array([1]),
'predictions': np.array([0.2, 0.3, 0.05, 0.15, 0.3]),
'example_weights': np.array([0.7]),
}
example3 = {
'labels': np.array([3]),
'predictions': np.array([0.01, 0.2, 0.09, 0.5, 0.2]),
'example_weights': np.array([0.9]),
}
example4 = {
'labels': np.array([1]),
'predictions': np.array([0.3, 0.2, 0.05, 0.4, 0.05]),
'example_weights': np.array([0.3]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(metric.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
top_k = int(metric_name.split('@')[1])
key = metric_types.MetricKey(
name=metric_name, sub_key=metric_types.SubKey(top_k=top_k))
self.assertDictElementsAlmostEqual(
got_metrics, {key: expected_value}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
class NonConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
def testSimpleMetric(self):
computation = tf_metric_wrapper.tf_metric_computations(
[tf.keras.metrics.MeanSquaredError(name='mse')])[0]
example = {
'labels': [0, 0, 1, 1],
'predictions': [0, 0.5, 0.3, 0.9],
'example_weights': [1.0]
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
mse_key = metric_types.MetricKey(name='mse')
self.assertDictElementsAlmostEqual(got_metrics, {mse_key: 0.1875})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testSparseMetric(self):
computation = tf_metric_wrapper.tf_metric_computations([
tf.keras.metrics.SparseCategoricalCrossentropy(
name='sparse_categorical_crossentropy')
])[0]
# Simulate a multi-class problem with 3 labels.
example = {
'labels': [1],
'predictions': [0.3, 0.6, 0.1],
'example_weights': [1.0]
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
key = metric_types.MetricKey(name='sparse_categorical_crossentropy')
# 0*log(.3) -1*log(0.6)-0*log(.1) = 0.51
self.assertDictElementsAlmostEqual(got_metrics, {key: 0.51083})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testRaisesErrorForInvalidNonSparseSettings(self):
with self.assertRaises(ValueError):
tf_metric_wrapper.tf_metric_computations(
[
tf.keras.metrics.SparseCategoricalCrossentropy(
name='sparse_categorical_crossentropy')
],
aggregation_type=metric_types.AggregationType(micro_average=True))
def testMetricWithClassWeights(self):
computation = tf_metric_wrapper.tf_metric_computations(
[tf.keras.metrics.MeanSquaredError(name='mse')],
aggregation_type=metric_types.AggregationType(micro_average=True),
class_weights={
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4
})[0]
# Simulate a multi-class problem with 4 labels. The use of class weights
# implies micro averaging which only makes sense for multi-class metrics.
example = {
'labels': [0, 0, 1, 0],
'predictions': [0, 0.5, 0.3, 0.9],
'example_weights': [1.0]
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
mse_key = metric_types.MetricKey(name='mse')
# numerator = (0.1*0**2 + 0.2*0.5**2 + 0.3*0.7**2 + 0.4*0.9**2)
# denominator = (.1 + .2 + 0.3 + 0.4)
# numerator / denominator = 0.521
self.assertDictElementsAlmostEqual(got_metrics, {mse_key: 0.521})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testCustomTFMetric(self):
metric = tf_metric_wrapper.tf_metric_computations([_CustomMetric()])[0]
example1 = {'labels': [0.0], 'predictions': [0.2], 'example_weights': [1.0]}
example2 = {'labels': [0.0], 'predictions': [0.8], 'example_weights': [1.0]}
example3 = {'labels': [0.0], 'predictions': [0.5], 'example_weights': [2.0]}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(metric.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
custom_key = metric_types.MetricKey(name='custom')
self.assertDictElementsAlmostEqual(
got_metrics,
{custom_key: (0.2 + 0.8 + 2 * 0.5) / (1.0 + 1.0 + 2.0)})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testCustomConfusionMatrixTFMetric(self):
metric = tf_metric_wrapper.tf_metric_computations(
[_CustomConfusionMatrixMetric()])[0]
# tp = 1
# fp = 1
example1 = {'labels': [0.0], 'predictions': [0.7], 'example_weights': [1.0]}
example2 = {'labels': [1.0], 'predictions': [0.8], 'example_weights': [1.0]}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(metric.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
custom_key = metric_types.MetricKey(name='custom')
self.assertDictElementsAlmostEqual(got_metrics,
{custom_key: 1.0 / (1.0 + 1.0)})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
@parameterized.named_parameters(*[
dict(
testcase_name='within_example',
example_indices=[0],
# label_sum = (1 - 1 - 1 - 1) * 1.0 = -2.0
# pred_sum = (0.1 + 0.2 + 0.3 + 0.0) = 0.6
# weights_total = 1.0 * 4 = 4.0
expected={
metric_types.MetricKey(name='custom_label'): -2.0 / 4.0,
metric_types.MetricKey(name='custom_pred'): 0.6 / 4.0
}),
dict(
testcase_name='across_examples',
# label_sum = (1 - 1 - 1 - 1) * 1.0 +
# (1 + 2 - 1.0 - 1) * 1.0 +
# (1 + 2 + 3 - 1) * 2.0
# = 9.0
#
# pred_sum = (0.1 + 0.2 + 0.3 + 0.0) * 1.0 +
# (0.1 + 0.2 + 0.0 - 1.0) * 1.0 +
# (0.1 + 0.2 + 0.3 - 1.0) * 2.0
# = -0.9
#
# weights_total = (1.0 * 4 + 1.0 * 4 + 2.0 * 4) = 16.0
example_indices=[0, 1, 2],
expected={
metric_types.MetricKey(name='custom_label'): 9.0 / 16.0,
metric_types.MetricKey(name='custom_pred'): -0.9 / 16.0
}),
])
def testCustomTFMetricWithPadding(self, example_indices, expected):
computation = tf_metric_wrapper.tf_metric_computations(
[
_CustomMetric(name='custom_label', update_y_pred=False),
_CustomMetric(name='custom_pred', update_y_pred=True),
],
eval_config=config_pb2.EvalConfig(model_specs=[
config_pb2.ModelSpec(
padding_options=config_pb2.PaddingOptions(
label_int_padding=-1,
prediction_float_padding=-1.0,
))
]))[0]
examples = [{
'labels': np.array([1], dtype=np.int64),
'predictions': np.array([0.1, 0.2, 0.3, 0.0]),
'example_weights': np.array([1.0])
}, {
'labels': np.array([1, 2], dtype=np.int64),
'predictions': np.array([0.1, 0.2, 0.0]),
'example_weights': np.array([1.0])
}, {
'labels': np.array([1, 2, 3], dtype=np.int64),
'predictions': np.array([0.1, 0.2, 0.3]),
'example_weights': np.array([2.0])
}]
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([examples[i] for i in example_indices])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
custom_label_key = metric_types.MetricKey(name='custom_label')
custom_pred_key = metric_types.MetricKey(name='custom_pred')
self.assertDictElementsAlmostEqual(got_metrics, expected)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testMultiOutputTFMetric(self):
computation = tf_metric_wrapper.tf_metric_computations({
'output_name': [tf.keras.metrics.MeanSquaredError(name='mse')],
})[0]
extracts = {
'labels': {
'output_name': [0, 0, 1, 1],
},
'predictions': {
'output_name': [0, 0.5, 0.3, 0.9],
},
'example_weights': {
'output_name': [1.0]
}
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([extracts])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
mse_key = metric_types.MetricKey(
name='mse', output_name='output_name')
self.assertDictElementsAlmostEqual(got_metrics, {
mse_key: 0.1875,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testTFMetricWithClassID(self):
computation = tf_metric_wrapper.tf_metric_computations(
[tf.keras.metrics.MeanSquaredError(name='mse')],
sub_key=metric_types.SubKey(class_id=1))[0]
example1 = {
'labels': [2],
'predictions': [0.5, 0.0, 0.5],
'example_weights': [1.0]
}
example2 = {
'labels': [0],
'predictions': [0.2, 0.5, 0.3],
'example_weights': [1.0]
}
example3 = {
'labels': [1],
'predictions': [0.2, 0.3, 0.5],
'example_weights': [1.0]
}
example4 = {
'labels': [1],
'predictions': [0.0, 0.9, 0.1],
'example_weights': [1.0]
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
mse_key = metric_types.MetricKey(
name='mse', sub_key=metric_types.SubKey(class_id=1))
self.assertDictElementsAlmostEqual(got_metrics, {
mse_key: 0.1875,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testBatching(self):
computation = tf_metric_wrapper.tf_metric_computations(
[_CustomMetric(),
tf.keras.metrics.MeanSquaredError(name='mse')],
desired_batch_size=2)[0]
example1 = {'labels': [0.0], 'predictions': [0.0], 'example_weights': [1.0]}
example2 = {'labels': [0.0], 'predictions': [0.5], 'example_weights': [1.0]}
example3 = {'labels': [1.0], 'predictions': [0.3], 'example_weights': [1.0]}
example4 = {'labels': [1.0], 'predictions': [0.9], 'example_weights': [1.0]}
example5 = {'labels': [1.0], 'predictions': [0.5], 'example_weights': [0.0]}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create(
[example1, example2, example3, example4, example5])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'Combine' >> beam.CombinePerKey(computation.combiner))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1, 'got: %s' % got)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
custom_key = metric_types.MetricKey(name='custom')
mse_key = metric_types.MetricKey(name='mse')
self.assertDictElementsAlmostEqual(
got_metrics, {
custom_key: (0.0 + 0.5 + 0.3 + 0.9 + 0.0) /
(1.0 + 1.0 + 1.0 + 1.0 + 0.0),
mse_key:
0.1875,
})
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testMergeAccumulators(self):
computation = tf_metric_wrapper.tf_metric_computations(
[tf.keras.metrics.MeanSquaredError(name='mse')],
desired_batch_size=2)[0]
example1 = {'labels': [0.0], 'predictions': [0.0], 'example_weights': [1.0]}
example2 = {'labels': [0.0], 'predictions': [0.5], 'example_weights': [1.0]}
example3 = {'labels': [1.0], 'predictions': [0.3], 'example_weights': [1.0]}
example4 = {'labels': [1.0], 'predictions': [0.9], 'example_weights': [1.0]}
example5 = {'labels': [1.0], 'predictions': [0.5], 'example_weights': [0.0]}
computation.combiner.setup()
combiner_inputs = []
for e in (example1, example2, example3, example4, example5):
combiner_inputs.append(metric_util.to_standard_metric_inputs(e))
acc1 = computation.combiner.create_accumulator()
acc1 = computation.combiner.add_input(acc1, combiner_inputs[0])
acc1 = computation.combiner.add_input(acc1, combiner_inputs[1])
acc1 = computation.combiner.add_input(acc1, combiner_inputs[2])
acc2 = computation.combiner.create_accumulator()
acc2 = computation.combiner.add_input(acc2, combiner_inputs[3])
acc2 = computation.combiner.add_input(acc2, combiner_inputs[4])
acc = computation.combiner.merge_accumulators([acc1, acc2])
got_metrics = computation.combiner.extract_output(acc)
mse_key = metric_types.MetricKey(name='mse')
self.assertDictElementsAlmostEqual(got_metrics, {mse_key: 0.1875})
class MixedMetricsTest(testutil.TensorflowModelAnalysisTest):
def testWithMixedMetrics(self):
computations = tf_metric_wrapper.tf_metric_computations([
tf.keras.metrics.AUC(name='auc'),
tf.keras.losses.BinaryCrossentropy(name='binary_crossentropy'),
tf.keras.metrics.MeanSquaredError(name='mse')
])
confusion_histogram = computations[0]
confusion_matrix = computations[1].result
confusion_metrics = computations[2].result
non_confusion_metrics = computations[3]
example1 = {
'labels': np.array([0.0]),
'predictions': np.array([0.0]),
'example_weights': np.array([1.0]),
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.5]),
'example_weights': np.array([1.0]),
}
example3 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3]),
'example_weights': np.array([1.0]),
}
example4 = {
'labels': np.array([1.0]),
'predictions': np.array([0.9]),
'example_weights': np.array([1.0]),
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
sliced_examples = (
pipeline
| 'Create' >> beam.Create([example1, example2, example3, example4])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x)))
confusion_result = (
sliced_examples
|
'ComputeHistogram' >> beam.CombinePerKey(confusion_histogram.combiner)
| 'ComputeConfusionMatrix' >> beam.Map(
lambda x: (x[0], confusion_matrix(x[1]))) # pyformat: disable
| 'ComputeMetric' >> beam.Map(
lambda x: (x[0], confusion_metrics(x[1])))) # pyformat: disable
non_confusion_result = (
sliced_examples
| 'Combine' >> beam.CombinePerKey(non_confusion_metrics.combiner))
# pylint: enable=no-value-for-parameter
def check_confusion_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
auc_key = metric_types.MetricKey(name='auc')
self.assertDictElementsAlmostEqual(
got_metrics, {auc_key: 0.75}, places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
def check_non_confusion_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
mse_key = metric_types.MetricKey(name='mse')
binary_crossentropy_key = metric_types.MetricKey(
name='binary_crossentropy')
self.assertDictElementsAlmostEqual(
got_metrics, {
mse_key: 0.1875,
binary_crossentropy_key: 0.50061995
},
places=5)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(
confusion_result, check_confusion_result, label='confusion')
util.assert_that(
non_confusion_result,
check_non_confusion_result,
label='non_confusion')
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
a330663f1e5ce3fc16f9eef56bfaed1bdf134cee | 2ed7f1e1f59832e91fe0402eca82ecf6fea2be40 | /0x05-python-exceptions/2-safe_print_list_integers.py | 70935bd34066cc1d54b184070aff6075e7e38cb5 | [] | no_license | Leidysalda/holbertonschool-higher_level_programming | abf3159db916ec293fc219b591e2c44f74afe3f3 | 46c04cdc7b76afbd79c650ff258f85aef7d2d5fe | refs/heads/master | 2020-09-29T02:40:47.437740 | 2020-09-23T05:37:10 | 2020-09-23T05:37:10 | 259,387,894 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
acum = 0
for i in range(0, x):
try:
print('{:d}'.format(my_list[i]), end='')
acum += 1
except (ValueError, TypeError):
pass
print('')
return (acum)
| [
"leidysalda1@gmail.com"
] | leidysalda1@gmail.com |
da7c8635ae11fd4b2b872ed62822f2e8814197d9 | 6cffc8421453dd8b21f1e10bbeff58d4ba01dfc7 | /strut.py | 301d55b2989640baaac933deb2146ce84a6ad97f | [
"Apache-2.0"
] | permissive | zignig/cqparts_bucket | 196150e3b32a2c12d34b3027af27539ca704995a | 9707b0948a9dd1ed514e03c291a3b96fddc4a22d | refs/heads/master | 2021-08-08T17:59:18.576133 | 2020-03-29T02:19:06 | 2020-03-29T02:19:06 | 134,233,074 | 12 | 2 | null | 2018-12-09T05:01:46 | 2018-05-21T07:18:51 | Python | UTF-8 | Python | false | false | 113 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 08:33:24 2018
@author: simonk
"""
# a threaded strut for the
| [
"obeygiantrobot@gmail.com"
] | obeygiantrobot@gmail.com |
5efdcea0b197ce67a1ce02ffb34ddfc54e626264 | d88ede90d3434d56b00bdc530711208e1673b245 | /从字符串中提取省-市-区/main.py | 5c40573f246b912751d8027fc17bad7edbbbf992 | [] | no_license | SmallPotY/SmallUtil | 0ec84fab01ce7b46cf44f839ed0f7b2d63bad0cb | 0761283fc1f41ac909a1705aa3f31d925691189f | refs/heads/master | 2020-06-13T22:49:12.060011 | 2019-08-12T06:08:33 | 2019-08-12T06:08:33 | 194,813,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding:utf-8 -*-
import cpca
import os
path = os.path.dirname(__file__)
file = path + "/地址字符串.txt"
location_str = []
with open(file, 'r', encoding='utf-8') as f:
while True:
line = f.readline().splitlines()
if not line:
break
location_str.append(line[0])
# print(location_str)
df = cpca.transform(location_str, cut=False,pos_sensitive=True)
df.to_csv('省-市-区.csv', encoding="utf_8_sig")
| [
"1041132457@qq.com"
] | 1041132457@qq.com |
aa51bdfd1ab45a3eedf68789a160f94e29bc0da1 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/shipbuild.py | 864b47f125621ea058f9681fa8ad565ed9efb8be | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 275 | py | ii = [('CoolWHM2.py', 1), ('RogePAV.py', 1), ('AubePRP2.py', 3), ('WilkJMC3.py', 1), ('ClarGE2.py', 1), ('CoolWHM.py', 2), ('ClarGE.py', 1), ('LeakWTI.py', 1), ('BachARE.py', 2), ('FitzRNS.py', 2), ('MackCNH2.py', 1), ('WilbRLW3.py', 1), ('JacoWHI.py', 1), ('ClarGE3.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
b12910bde72865270b33d213ed7d7729845c413b | c3b739b07214507bf1023b926c19d30784623e98 | /segme/metric/sad.py | 6a214ab8ef8f566f2f85cda2e1f6aa8e227f5ec3 | [
"MIT"
] | permissive | templeblock/segme | 20a96787500c46483cb7af0db917207fcedafb0b | 8192ed066558c1ea1e7283805b40da4baa5b3827 | refs/heads/master | 2023-08-30T12:31:39.327283 | 2021-11-11T17:08:40 | 2021-11-11T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | import tensorflow as tf
from keras.metrics import SumOverBatchSize, metrics_utils
from keras.utils import losses_utils
from keras.utils.generic_utils import register_keras_serializable
@register_keras_serializable(package='SegMe')
class SAD(SumOverBatchSize):
def __init__(self, divider=255., name='sad', dtype=None):
"""Creates a `SumAbsoluteDifference` instance for matting task (by default downscales input by 255).
Args:
divider: A float value for input scaling.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super().__init__(name, dtype=dtype)
self.divider = divider
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
[y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true, sample_weight)
else:
y_pred, y_true, sample_weight = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true, sample_weight)
values = sum_absolute_difference(y_true, y_pred, sample_weight)
return super().update_state(values / self.divider)
def result(self):
return super().result() / 1000.
def get_config(self):
config = super().get_config()
config.update({'divider': self.divider})
return config
def sum_absolute_difference(y_true, y_pred, sample_weight=None):
result = tf.abs(y_pred - y_true)
if sample_weight is not None:
result *= sample_weight
axis_hwc = list(range(1, result.shape.ndims))
result = tf.reduce_sum(result, axis=axis_hwc)
return result
| [
"shkarupa.alex@gmail.com"
] | shkarupa.alex@gmail.com |
7ef99ad9f507c2e36be022a636cfdda823f8a2ae | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/compute/v20171201/availability_set.py | aecfc2e33bb449a16a533577137ae609e1aa7964 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 9,030 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['AvailabilitySet']
class AvailabilitySet(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_set_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
platform_update_domain_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machines: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] availability_set_name: The name of the availability set.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[int] platform_fault_domain_count: Fault Domain count.
:param pulumi.Input[int] platform_update_domain_count: Update Domain count.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: Sku of the availability set
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] virtual_machines: A list of references to all virtual machines in the availability set.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if availability_set_name is None:
raise TypeError("Missing required property 'availability_set_name'")
__props__['availability_set_name'] = availability_set_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['platform_fault_domain_count'] = platform_fault_domain_count
__props__['platform_update_domain_count'] = platform_update_domain_count
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
__props__['virtual_machines'] = virtual_machines
__props__['name'] = None
__props__['statuses'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/latest:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20150615:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20191201:AvailabilitySet"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:AvailabilitySet")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AvailabilitySet, __self__).__init__(
'azure-nextgen:compute/v20171201:AvailabilitySet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AvailabilitySet':
"""
Get an existing AvailabilitySet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return AvailabilitySet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Fault Domain count.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="platformUpdateDomainCount")
def platform_update_domain_count(self) -> pulumi.Output[Optional[int]]:
"""
Update Domain count.
"""
return pulumi.get(self, "platform_update_domain_count")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
Sku of the availability set
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def statuses(self) -> pulumi.Output[Sequence['outputs.InstanceViewStatusResponse']]:
"""
The resource status information.
"""
return pulumi.get(self, "statuses")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
A list of references to all virtual machines in the availability set.
"""
return pulumi.get(self, "virtual_machines")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
1d3ab882c43c70cb14004d2256ccf46c2ffbd651 | 050ca74a2b304d49709050585424114f0a6bc1a7 | /tools/generate_taint_models/get_REST_api_sources.py | 01752c36104efc14cf8da804112946938b1fcb26 | [
"MIT"
] | permissive | tholiao/pyre-check | fcc1019c63ad27dcec920ecee1464c0507a68672 | f5705fb5dae6a78623a058e5972461e89e283634 | refs/heads/master | 2020-06-23T15:25:36.468656 | 2019-07-24T15:33:38 | 2019-07-24T15:33:38 | 198,662,804 | 0 | 0 | MIT | 2019-07-24T15:30:31 | 2019-07-24T15:30:30 | null | UTF-8 | Python | false | false | 1,357 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import inspect
import types
from typing import Callable, Iterable
from .inspect_parser import extract_annotation, extract_name, extract_view_name
from .model import CallableModel
from .model_generator import Configuration, Registry
from .view_generator import ViewGenerator
class RESTApiSourceGenerator(ViewGenerator):
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[str]:
entry_points = set()
for view_function in functions_to_model:
view_name = extract_view_name(view_function)
if view_name in Configuration.whitelisted_views:
continue
model = CallableModel(
callable=view_function,
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
whitelisted_parameters=Configuration.whitelisted_classes,
).generate()
if model is not None:
entry_points.add(model)
return sorted(entry_points)
Registry.register("get_REST_api_sources", RESTApiSourceGenerator)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d8a2c27e6abe1ff0606d58ea519950a06e4de596 | 70f5264401822933a25a94101f648d385362b87b | /demo1.py | 6ba70b66f563d654ef5538db55f502ec3cb763c0 | [] | no_license | aspiringguru/COVID_data_analysis | aa859ecc5bc76b4c68a526efd52ae2fc0a8f67db | 86004246a7199807d3b7751f42d934ffe3e6bf41 | refs/heads/master | 2021-05-22T22:06:33.358741 | 2020-04-05T07:25:38 | 2020-04-05T07:25:38 | 253,117,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,860 | py | import pandas as pd
import os
mypath = "../data.bls.gov/unemployment_rate/"
file_list = []
for file in os.listdir(mypath):
if file.endswith(".xlsx"):
print(os.path.join(mypath, file))
file_list.append(os.path.join(mypath, file))
file_list
col_names = []
combined_results = pd.DataFrame(columns = col_names)
for filename in file_list:
# Read File
print("opening filename:", filename)
df = pd.read_excel(filename, sheet_name="BLS Data Series")
#df = pd.read_excel(file_list[0], sheet_name="BLS Data Series")
#df.shape
series_id = df.iloc[2,1]
print("series_id:", series_id)
series_id_text = df.iloc[4,1]
print("series_id_text:", series_id_text)
#now detect rownumber where data starts and ends.
#find row with first value of 'Year' in column 0
#and first non null value in column 0 after that.
first_col = df.iloc[:, 0]
#first_col
start_row_index = first_col[first_col == 'Year'].index[0]
#start_row_index
last_row_index = len(first_col)
#last_row_index
df_data = df.iloc[start_row_index+1:last_row_index,0:13]
#df_data
#df_info = df.iloc[4:9,0:2]
#df_data = df.iloc[11:32,0:13]
new_col_names = list(df.iloc[start_row_index, :])
#new_col_names
print("df_data.shape:", df_data.shape)
#print(df_data.head())
df_data.columns = new_col_names
#print(df_data.head())
#df_data
df_data_cleaned = pd.melt(df_data, id_vars=['Year'])
df_data_cleaned = df_data.melt('Year')
df_data_cleaned['date'] = df_data_cleaned['Year'].astype('str') + '-' + df_data_cleaned['variable']
#df_data_cleaned
df_data_cleaned.drop(['Year', 'variable'], axis=1, inplace=True)
#df_data_cleaned
df_data_cleaned.dropna(inplace=True)
df_data_cleaned.rename(columns={"value": series_id}, inplace=True)
#
print("df_data_cleaned\n", df_data_cleaned)
#series_id
#series_id_text
output_filename = filename+"_"+series_id+"_"+series_id_text+".csv"
print("saving as csv file:", output_filename)
df_data_cleaned[['date', series_id]].to_csv(output_filename, index=False)
#df_data_cleaned
#append x to combined_results
if len(combined_results.columns)==0:
print("combined_results is empty. combined_results.shape=", combined_results.shape)
combined_results = df_data_cleaned[['date', series_id]]
else:
print("combined_results not empty, joining column from df_data_cleaned")
#add the series_id column to combined_results (years should be the same)
combined_results[series_id] = df_data_cleaned[series_id]
print("after adding new data column, combined_results.shape:", combined_results.shape)
combined_results.to_csv(mypath+"combined_results.csv", index=False)
| [
"bmatthewtaylor@gmail.com"
] | bmatthewtaylor@gmail.com |
03eb72d66aa8bc393d0357dc165bae29b0a9d6eb | 609d5408f302c9188b723998762c2c1f7b883af9 | /.closet/jython.configurator.efr32/1.0.0.201606231656-435/host_py_rm_studio_internal/host_py_rm_studio_internal_efr32xg1xfull/revA3/MODEM_register.py | 3ccf95ee71730128fd2ba0cbc59db14a96c32a36 | [] | no_license | acvilla/Sundial-Beta | 6ea4fd44cbf7c2df8100128aff5c39b6faf24a82 | 9f84e3b5a1397998dfea5287949fa5b1f4c209a6 | refs/heads/master | 2021-01-15T15:36:19.394640 | 2016-08-31T20:15:16 | 2016-08-31T20:15:16 | 63,294,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169,977 | py |
from static import Base_RM_Register
from MODEM_field import *
class RM_Register_MODEM_STATUS(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_STATUS, self).__init__(rmio, label,
0x40086000, 0x000,
'STATUS', 'MODEM.STATUS', 'read-only',
"",
0x00000000, 0xFFFF00F7)
self.DEMODSTATE = RM_Field_MODEM_STATUS_DEMODSTATE(self)
self.zz_fdict['DEMODSTATE'] = self.DEMODSTATE
self.FRAMEDETID = RM_Field_MODEM_STATUS_FRAMEDETID(self)
self.zz_fdict['FRAMEDETID'] = self.FRAMEDETID
self.ANTSEL = RM_Field_MODEM_STATUS_ANTSEL(self)
self.zz_fdict['ANTSEL'] = self.ANTSEL
self.TIMSEQINV = RM_Field_MODEM_STATUS_TIMSEQINV(self)
self.zz_fdict['TIMSEQINV'] = self.TIMSEQINV
self.TIMLOSTCAUSE = RM_Field_MODEM_STATUS_TIMLOSTCAUSE(self)
self.zz_fdict['TIMLOSTCAUSE'] = self.TIMLOSTCAUSE
self.CORR = RM_Field_MODEM_STATUS_CORR(self)
self.zz_fdict['CORR'] = self.CORR
self.WEAKSYMBOLS = RM_Field_MODEM_STATUS_WEAKSYMBOLS(self)
self.zz_fdict['WEAKSYMBOLS'] = self.WEAKSYMBOLS
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_TIMDETSTATUS(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_TIMDETSTATUS, self).__init__(rmio, label,
0x40086000, 0x004,
'TIMDETSTATUS', 'MODEM.TIMDETSTATUS', 'read-only',
"",
0x00000000, 0x1F0FFFFF)
self.TIMDETCORR = RM_Field_MODEM_TIMDETSTATUS_TIMDETCORR(self)
self.zz_fdict['TIMDETCORR'] = self.TIMDETCORR
self.TIMDETFREQOFFEST = RM_Field_MODEM_TIMDETSTATUS_TIMDETFREQOFFEST(self)
self.zz_fdict['TIMDETFREQOFFEST'] = self.TIMDETFREQOFFEST
self.TIMDETPREERRORS = RM_Field_MODEM_TIMDETSTATUS_TIMDETPREERRORS(self)
self.zz_fdict['TIMDETPREERRORS'] = self.TIMDETPREERRORS
self.TIMDETPASS = RM_Field_MODEM_TIMDETSTATUS_TIMDETPASS(self)
self.zz_fdict['TIMDETPASS'] = self.TIMDETPASS
self.TIMDETINDEX = RM_Field_MODEM_TIMDETSTATUS_TIMDETINDEX(self)
self.zz_fdict['TIMDETINDEX'] = self.TIMDETINDEX
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_FREQOFFEST(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_FREQOFFEST, self).__init__(rmio, label,
0x40086000, 0x008,
'FREQOFFEST', 'MODEM.FREQOFFEST', 'read-only',
"",
0x00000000, 0xFFFFFFFF)
self.FREQOFFEST = RM_Field_MODEM_FREQOFFEST_FREQOFFEST(self)
self.zz_fdict['FREQOFFEST'] = self.FREQOFFEST
self.POE = RM_Field_MODEM_FREQOFFEST_POE(self)
self.zz_fdict['POE'] = self.POE
self.CORRVAL = RM_Field_MODEM_FREQOFFEST_CORRVAL(self)
self.zz_fdict['CORRVAL'] = self.CORRVAL
self.SOFTVAL = RM_Field_MODEM_FREQOFFEST_SOFTVAL(self)
self.zz_fdict['SOFTVAL'] = self.SOFTVAL
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_AFCADJRX(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_AFCADJRX, self).__init__(rmio, label,
0x40086000, 0x00C,
'AFCADJRX', 'MODEM.AFCADJRX', 'read-only',
"",
0x00000000, 0x0007FFFF)
self.AFCADJRX = RM_Field_MODEM_AFCADJRX_AFCADJRX(self)
self.zz_fdict['AFCADJRX'] = self.AFCADJRX
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_AFCADJTX(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_AFCADJTX, self).__init__(rmio, label,
0x40086000, 0x010,
'AFCADJTX', 'MODEM.AFCADJTX', 'read-only',
"",
0x00000000, 0x0007FFFF)
self.AFCADJTX = RM_Field_MODEM_AFCADJTX_AFCADJTX(self)
self.zz_fdict['AFCADJTX'] = self.AFCADJTX
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_MIXCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_MIXCTRL, self).__init__(rmio, label,
0x40086000, 0x014,
'MIXCTRL', 'MODEM.MIXCTRL', 'read-write',
"",
0x00000000, 0x0000001F)
self.MODE = RM_Field_MODEM_MIXCTRL_MODE(self)
self.zz_fdict['MODE'] = self.MODE
self.DIGIQSWAPEN = RM_Field_MODEM_MIXCTRL_DIGIQSWAPEN(self)
self.zz_fdict['DIGIQSWAPEN'] = self.DIGIQSWAPEN
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL0, self).__init__(rmio, label,
0x40086000, 0x018,
'CTRL0', 'MODEM.CTRL0', 'read-write',
"",
0x00000000, 0xFFFFFBFF)
self.FDM0DIFFDIS = RM_Field_MODEM_CTRL0_FDM0DIFFDIS(self)
self.zz_fdict['FDM0DIFFDIS'] = self.FDM0DIFFDIS
self.MAPFSK = RM_Field_MODEM_CTRL0_MAPFSK(self)
self.zz_fdict['MAPFSK'] = self.MAPFSK
self.CODING = RM_Field_MODEM_CTRL0_CODING(self)
self.zz_fdict['CODING'] = self.CODING
self.MODFORMAT = RM_Field_MODEM_CTRL0_MODFORMAT(self)
self.zz_fdict['MODFORMAT'] = self.MODFORMAT
self.DUALCORROPTDIS = RM_Field_MODEM_CTRL0_DUALCORROPTDIS(self)
self.zz_fdict['DUALCORROPTDIS'] = self.DUALCORROPTDIS
self.DSSSLEN = RM_Field_MODEM_CTRL0_DSSSLEN(self)
self.zz_fdict['DSSSLEN'] = self.DSSSLEN
self.DSSSSHIFTS = RM_Field_MODEM_CTRL0_DSSSSHIFTS(self)
self.zz_fdict['DSSSSHIFTS'] = self.DSSSSHIFTS
self.DSSSDOUBLE = RM_Field_MODEM_CTRL0_DSSSDOUBLE(self)
self.zz_fdict['DSSSDOUBLE'] = self.DSSSDOUBLE
self.DETDIS = RM_Field_MODEM_CTRL0_DETDIS(self)
self.zz_fdict['DETDIS'] = self.DETDIS
self.DIFFENCMODE = RM_Field_MODEM_CTRL0_DIFFENCMODE(self)
self.zz_fdict['DIFFENCMODE'] = self.DIFFENCMODE
self.SHAPING = RM_Field_MODEM_CTRL0_SHAPING(self)
self.zz_fdict['SHAPING'] = self.SHAPING
self.DEMODRAWDATASEL = RM_Field_MODEM_CTRL0_DEMODRAWDATASEL(self)
self.zz_fdict['DEMODRAWDATASEL'] = self.DEMODRAWDATASEL
self.FRAMEDETDEL = RM_Field_MODEM_CTRL0_FRAMEDETDEL(self)
self.zz_fdict['FRAMEDETDEL'] = self.FRAMEDETDEL
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL1, self).__init__(rmio, label,
0x40086000, 0x01C,
'CTRL1', 'MODEM.CTRL1', 'read-write',
"",
0x00000000, 0xFFFFDFFF)
self.SYNCBITS = RM_Field_MODEM_CTRL1_SYNCBITS(self)
self.zz_fdict['SYNCBITS'] = self.SYNCBITS
self.SYNCERRORS = RM_Field_MODEM_CTRL1_SYNCERRORS(self)
self.zz_fdict['SYNCERRORS'] = self.SYNCERRORS
self.DUALSYNC = RM_Field_MODEM_CTRL1_DUALSYNC(self)
self.zz_fdict['DUALSYNC'] = self.DUALSYNC
self.TXSYNC = RM_Field_MODEM_CTRL1_TXSYNC(self)
self.zz_fdict['TXSYNC'] = self.TXSYNC
self.SYNCDATA = RM_Field_MODEM_CTRL1_SYNCDATA(self)
self.zz_fdict['SYNCDATA'] = self.SYNCDATA
self.SYNC1INV = RM_Field_MODEM_CTRL1_SYNC1INV(self)
self.zz_fdict['SYNC1INV'] = self.SYNC1INV
self.COMPMODE = RM_Field_MODEM_CTRL1_COMPMODE(self)
self.zz_fdict['COMPMODE'] = self.COMPMODE
self.RESYNCPER = RM_Field_MODEM_CTRL1_RESYNCPER(self)
self.zz_fdict['RESYNCPER'] = self.RESYNCPER
self.PHASEDEMOD = RM_Field_MODEM_CTRL1_PHASEDEMOD(self)
self.zz_fdict['PHASEDEMOD'] = self.PHASEDEMOD
self.FREQOFFESTPER = RM_Field_MODEM_CTRL1_FREQOFFESTPER(self)
self.zz_fdict['FREQOFFESTPER'] = self.FREQOFFESTPER
self.FREQOFFESTLIM = RM_Field_MODEM_CTRL1_FREQOFFESTLIM(self)
self.zz_fdict['FREQOFFESTLIM'] = self.FREQOFFESTLIM
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL2(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL2, self).__init__(rmio, label,
0x40086000, 0x020,
'CTRL2', 'MODEM.CTRL2', 'read-write',
"",
0x00001000, 0xFFFFFFFF)
self.SQITHRESH = RM_Field_MODEM_CTRL2_SQITHRESH(self)
self.zz_fdict['SQITHRESH'] = self.SQITHRESH
self.RXFRCDIS = RM_Field_MODEM_CTRL2_RXFRCDIS(self)
self.zz_fdict['RXFRCDIS'] = self.RXFRCDIS
self.RXPINMODE = RM_Field_MODEM_CTRL2_RXPINMODE(self)
self.zz_fdict['RXPINMODE'] = self.RXPINMODE
self.TXPINMODE = RM_Field_MODEM_CTRL2_TXPINMODE(self)
self.zz_fdict['TXPINMODE'] = self.TXPINMODE
self.DATAFILTER = RM_Field_MODEM_CTRL2_DATAFILTER(self)
self.zz_fdict['DATAFILTER'] = self.DATAFILTER
self.BRDIVA = RM_Field_MODEM_CTRL2_BRDIVA(self)
self.zz_fdict['BRDIVA'] = self.BRDIVA
self.BRDIVB = RM_Field_MODEM_CTRL2_BRDIVB(self)
self.zz_fdict['BRDIVB'] = self.BRDIVB
self.DEVMULA = RM_Field_MODEM_CTRL2_DEVMULA(self)
self.zz_fdict['DEVMULA'] = self.DEVMULA
self.DEVMULB = RM_Field_MODEM_CTRL2_DEVMULB(self)
self.zz_fdict['DEVMULB'] = self.DEVMULB
self.RATESELMODE = RM_Field_MODEM_CTRL2_RATESELMODE(self)
self.zz_fdict['RATESELMODE'] = self.RATESELMODE
self.PRSDEBUG = RM_Field_MODEM_CTRL2_PRSDEBUG(self)
self.zz_fdict['PRSDEBUG'] = self.PRSDEBUG
self.DEVWEIGHTDIS = RM_Field_MODEM_CTRL2_DEVWEIGHTDIS(self)
self.zz_fdict['DEVWEIGHTDIS'] = self.DEVWEIGHTDIS
self.DMASEL = RM_Field_MODEM_CTRL2_DMASEL(self)
self.zz_fdict['DMASEL'] = self.DMASEL
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL3(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL3, self).__init__(rmio, label,
0x40086000, 0x024,
'CTRL3', 'MODEM.CTRL3', 'read-write',
"",
0x00008000, 0xFFFFFF9F)
self.PRSDINEN = RM_Field_MODEM_CTRL3_PRSDINEN(self)
self.zz_fdict['PRSDINEN'] = self.PRSDINEN
self.PRSDINSEL = RM_Field_MODEM_CTRL3_PRSDINSEL(self)
self.zz_fdict['PRSDINSEL'] = self.PRSDINSEL
self.RAMTESTEN = RM_Field_MODEM_CTRL3_RAMTESTEN(self)
self.zz_fdict['RAMTESTEN'] = self.RAMTESTEN
self.ANTDIVMODE = RM_Field_MODEM_CTRL3_ANTDIVMODE(self)
self.zz_fdict['ANTDIVMODE'] = self.ANTDIVMODE
self.ANTDIVREPEATDIS = RM_Field_MODEM_CTRL3_ANTDIVREPEATDIS(self)
self.zz_fdict['ANTDIVREPEATDIS'] = self.ANTDIVREPEATDIS
self.TSAMPMODE = RM_Field_MODEM_CTRL3_TSAMPMODE(self)
self.zz_fdict['TSAMPMODE'] = self.TSAMPMODE
self.TSAMPDEL = RM_Field_MODEM_CTRL3_TSAMPDEL(self)
self.zz_fdict['TSAMPDEL'] = self.TSAMPDEL
self.TSAMPLIM = RM_Field_MODEM_CTRL3_TSAMPLIM(self)
self.zz_fdict['TSAMPLIM'] = self.TSAMPLIM
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL4(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL4, self).__init__(rmio, label,
0x40086000, 0x028,
'CTRL4', 'MODEM.CTRL4', 'read-write',
"",
0x00800000, 0xBFFFFFFF)
self.ISICOMP = RM_Field_MODEM_CTRL4_ISICOMP(self)
self.zz_fdict['ISICOMP'] = self.ISICOMP
self.DEVOFFCOMP = RM_Field_MODEM_CTRL4_DEVOFFCOMP(self)
self.zz_fdict['DEVOFFCOMP'] = self.DEVOFFCOMP
self.PREDISTGAIN = RM_Field_MODEM_CTRL4_PREDISTGAIN(self)
self.zz_fdict['PREDISTGAIN'] = self.PREDISTGAIN
self.PREDISTDEB = RM_Field_MODEM_CTRL4_PREDISTDEB(self)
self.zz_fdict['PREDISTDEB'] = self.PREDISTDEB
self.PREDISTAVG = RM_Field_MODEM_CTRL4_PREDISTAVG(self)
self.zz_fdict['PREDISTAVG'] = self.PREDISTAVG
self.PREDISTRST = RM_Field_MODEM_CTRL4_PREDISTRST(self)
self.zz_fdict['PREDISTRST'] = self.PREDISTRST
self.PHASECLICKFILT = RM_Field_MODEM_CTRL4_PHASECLICKFILT(self)
self.zz_fdict['PHASECLICKFILT'] = self.PHASECLICKFILT
self.SOFTDSSSMODE = RM_Field_MODEM_CTRL4_SOFTDSSSMODE(self)
self.zz_fdict['SOFTDSSSMODE'] = self.SOFTDSSSMODE
self.ADCSATLEVEL = RM_Field_MODEM_CTRL4_ADCSATLEVEL(self)
self.zz_fdict['ADCSATLEVEL'] = self.ADCSATLEVEL
self.ADCSATDENS = RM_Field_MODEM_CTRL4_ADCSATDENS(self)
self.zz_fdict['ADCSATDENS'] = self.ADCSATDENS
self.OFFSETPHASEMASKING = RM_Field_MODEM_CTRL4_OFFSETPHASEMASKING(self)
self.zz_fdict['OFFSETPHASEMASKING'] = self.OFFSETPHASEMASKING
self.OFFSETPHASESCALING = RM_Field_MODEM_CTRL4_OFFSETPHASESCALING(self)
self.zz_fdict['OFFSETPHASESCALING'] = self.OFFSETPHASESCALING
self.CLKUNDIVREQ = RM_Field_MODEM_CTRL4_CLKUNDIVREQ(self)
self.zz_fdict['CLKUNDIVREQ'] = self.CLKUNDIVREQ
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CTRL5(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CTRL5, self).__init__(rmio, label,
0x40086000, 0x02C,
'CTRL5', 'MODEM.CTRL5', 'read-write',
"",
0x00000000, 0x000007FE)
self.BRCALEN = RM_Field_MODEM_CTRL5_BRCALEN(self)
self.zz_fdict['BRCALEN'] = self.BRCALEN
self.BRCALMODE = RM_Field_MODEM_CTRL5_BRCALMODE(self)
self.zz_fdict['BRCALMODE'] = self.BRCALMODE
self.BRCALAVG = RM_Field_MODEM_CTRL5_BRCALAVG(self)
self.zz_fdict['BRCALAVG'] = self.BRCALAVG
self.DETDEL = RM_Field_MODEM_CTRL5_DETDEL(self)
self.zz_fdict['DETDEL'] = self.DETDEL
self.TDEDGE = RM_Field_MODEM_CTRL5_TDEDGE(self)
self.zz_fdict['TDEDGE'] = self.TDEDGE
self.TREDGE = RM_Field_MODEM_CTRL5_TREDGE(self)
self.zz_fdict['TREDGE'] = self.TREDGE
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_TXBR(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_TXBR, self).__init__(rmio, label,
0x40086000, 0x030,
'TXBR', 'MODEM.TXBR', 'read-write',
"",
0x00000000, 0x00FFFFFF)
self.TXBRNUM = RM_Field_MODEM_TXBR_TXBRNUM(self)
self.zz_fdict['TXBRNUM'] = self.TXBRNUM
self.TXBRDEN = RM_Field_MODEM_TXBR_TXBRDEN(self)
self.zz_fdict['TXBRDEN'] = self.TXBRDEN
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RXBR(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RXBR, self).__init__(rmio, label,
0x40086000, 0x034,
'RXBR', 'MODEM.RXBR', 'read-write',
"",
0x00000000, 0x00001FFF)
self.RXBRNUM = RM_Field_MODEM_RXBR_RXBRNUM(self)
self.zz_fdict['RXBRNUM'] = self.RXBRNUM
self.RXBRDEN = RM_Field_MODEM_RXBR_RXBRDEN(self)
self.zz_fdict['RXBRDEN'] = self.RXBRDEN
self.RXBRINT = RM_Field_MODEM_RXBR_RXBRINT(self)
self.zz_fdict['RXBRINT'] = self.RXBRINT
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CF(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CF, self).__init__(rmio, label,
0x40086000, 0x038,
'CF', 'MODEM.CF', 'read-write',
"",
0x00000000, 0x3FFFFFFF)
self.DEC0 = RM_Field_MODEM_CF_DEC0(self)
self.zz_fdict['DEC0'] = self.DEC0
self.DEC1 = RM_Field_MODEM_CF_DEC1(self)
self.zz_fdict['DEC1'] = self.DEC1
self.DEC2 = RM_Field_MODEM_CF_DEC2(self)
self.zz_fdict['DEC2'] = self.DEC2
self.CFOSR = RM_Field_MODEM_CF_CFOSR(self)
self.zz_fdict['CFOSR'] = self.CFOSR
self.DEC1GAIN = RM_Field_MODEM_CF_DEC1GAIN(self)
self.zz_fdict['DEC1GAIN'] = self.DEC1GAIN
self.RESYNCRESETTIMING = RM_Field_MODEM_CF_RESYNCRESETTIMING(self)
self.zz_fdict['RESYNCRESETTIMING'] = self.RESYNCRESETTIMING
self.RESYNCBYPASS = RM_Field_MODEM_CF_RESYNCBYPASS(self)
self.zz_fdict['RESYNCBYPASS'] = self.RESYNCBYPASS
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_PRE(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_PRE, self).__init__(rmio, label,
0x40086000, 0x03C,
'PRE', 'MODEM.PRE', 'read-write',
"",
0x00000000, 0xFFFF1FFF)
self.BASE = RM_Field_MODEM_PRE_BASE(self)
self.zz_fdict['BASE'] = self.BASE
self.BASEBITS = RM_Field_MODEM_PRE_BASEBITS(self)
self.zz_fdict['BASEBITS'] = self.BASEBITS
self.PRESYMB4FSK = RM_Field_MODEM_PRE_PRESYMB4FSK(self)
self.zz_fdict['PRESYMB4FSK'] = self.PRESYMB4FSK
self.PREERRORS = RM_Field_MODEM_PRE_PREERRORS(self)
self.zz_fdict['PREERRORS'] = self.PREERRORS
self.DSSSPRE = RM_Field_MODEM_PRE_DSSSPRE(self)
self.zz_fdict['DSSSPRE'] = self.DSSSPRE
self.SYNCSYMB4FSK = RM_Field_MODEM_PRE_SYNCSYMB4FSK(self)
self.zz_fdict['SYNCSYMB4FSK'] = self.SYNCSYMB4FSK
self.TXBASES = RM_Field_MODEM_PRE_TXBASES(self)
self.zz_fdict['TXBASES'] = self.TXBASES
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_SYNC0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_SYNC0, self).__init__(rmio, label,
0x40086000, 0x040,
'SYNC0', 'MODEM.SYNC0', 'read-write',
"",
0x00000000, 0xFFFFFFFF)
self.SYNC0 = RM_Field_MODEM_SYNC0_SYNC0(self)
self.zz_fdict['SYNC0'] = self.SYNC0
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_SYNC1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_SYNC1, self).__init__(rmio, label,
0x40086000, 0x044,
'SYNC1', 'MODEM.SYNC1', 'read-write',
"",
0x00000000, 0xFFFFFFFF)
self.SYNC1 = RM_Field_MODEM_SYNC1_SYNC1(self)
self.zz_fdict['SYNC1'] = self.SYNC1
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_TIMING(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_TIMING, self).__init__(rmio, label,
0x40086000, 0x048,
'TIMING', 'MODEM.TIMING', 'read-write',
"",
0x00000000, 0xFFFFFFFF)
self.TIMTHRESH = RM_Field_MODEM_TIMING_TIMTHRESH(self)
self.zz_fdict['TIMTHRESH'] = self.TIMTHRESH
self.TIMINGBASES = RM_Field_MODEM_TIMING_TIMINGBASES(self)
self.zz_fdict['TIMINGBASES'] = self.TIMINGBASES
self.ADDTIMSEQ = RM_Field_MODEM_TIMING_ADDTIMSEQ(self)
self.zz_fdict['ADDTIMSEQ'] = self.ADDTIMSEQ
self.TIMSEQINVEN = RM_Field_MODEM_TIMING_TIMSEQINVEN(self)
self.zz_fdict['TIMSEQINVEN'] = self.TIMSEQINVEN
self.TIMSEQSYNC = RM_Field_MODEM_TIMING_TIMSEQSYNC(self)
self.zz_fdict['TIMSEQSYNC'] = self.TIMSEQSYNC
self.FDM0THRESH = RM_Field_MODEM_TIMING_FDM0THRESH(self)
self.zz_fdict['FDM0THRESH'] = self.FDM0THRESH
self.OFFSUBNUM = RM_Field_MODEM_TIMING_OFFSUBNUM(self)
self.zz_fdict['OFFSUBNUM'] = self.OFFSUBNUM
self.OFFSUBDEN = RM_Field_MODEM_TIMING_OFFSUBDEN(self)
self.zz_fdict['OFFSUBDEN'] = self.OFFSUBDEN
self.TSAGCDEL = RM_Field_MODEM_TIMING_TSAGCDEL(self)
self.zz_fdict['TSAGCDEL'] = self.TSAGCDEL
self.FASTRESYNC = RM_Field_MODEM_TIMING_FASTRESYNC(self)
self.zz_fdict['FASTRESYNC'] = self.FASTRESYNC
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_DSSS0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_DSSS0, self).__init__(rmio, label,
0x40086000, 0x04C,
'DSSS0', 'MODEM.DSSS0', 'read-write',
"",
0x00000000, 0xFFFFFFFF)
self.DSSS0 = RM_Field_MODEM_DSSS0_DSSS0(self)
self.zz_fdict['DSSS0'] = self.DSSS0
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_MODINDEX(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_MODINDEX, self).__init__(rmio, label,
0x40086000, 0x050,
'MODINDEX', 'MODEM.MODINDEX', 'read-write',
"",
0x00000000, 0x003F03FF)
self.MODINDEXM = RM_Field_MODEM_MODINDEX_MODINDEXM(self)
self.zz_fdict['MODINDEXM'] = self.MODINDEXM
self.MODINDEXE = RM_Field_MODEM_MODINDEX_MODINDEXE(self)
self.zz_fdict['MODINDEXE'] = self.MODINDEXE
self.FREQGAINE = RM_Field_MODEM_MODINDEX_FREQGAINE(self)
self.zz_fdict['FREQGAINE'] = self.FREQGAINE
self.FREQGAINM = RM_Field_MODEM_MODINDEX_FREQGAINM(self)
self.zz_fdict['FREQGAINM'] = self.FREQGAINM
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_AFC(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_AFC, self).__init__(rmio, label,
0x40086000, 0x054,
'AFC', 'MODEM.AFC', 'read-write',
"",
0x00000000, 0x00FFFCFF)
self.AFCSCALEM = RM_Field_MODEM_AFC_AFCSCALEM(self)
self.zz_fdict['AFCSCALEM'] = self.AFCSCALEM
self.AFCSCALEE = RM_Field_MODEM_AFC_AFCSCALEE(self)
self.zz_fdict['AFCSCALEE'] = self.AFCSCALEE
self.AFCRXMODE = RM_Field_MODEM_AFC_AFCRXMODE(self)
self.zz_fdict['AFCRXMODE'] = self.AFCRXMODE
self.AFCTXMODE = RM_Field_MODEM_AFC_AFCTXMODE(self)
self.zz_fdict['AFCTXMODE'] = self.AFCTXMODE
self.AFCRXCLR = RM_Field_MODEM_AFC_AFCRXCLR(self)
self.zz_fdict['AFCRXCLR'] = self.AFCRXCLR
self.AFCDEL = RM_Field_MODEM_AFC_AFCDEL(self)
self.zz_fdict['AFCDEL'] = self.AFCDEL
self.AFCAVGPER = RM_Field_MODEM_AFC_AFCAVGPER(self)
self.zz_fdict['AFCAVGPER'] = self.AFCAVGPER
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_AFCADJLIM(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_AFCADJLIM, self).__init__(rmio, label,
0x40086000, 0x058,
'AFCADJLIM', 'MODEM.AFCADJLIM', 'read-write',
"",
0x00000000, 0x0003FFFF)
self.AFCADJLIM = RM_Field_MODEM_AFCADJLIM_AFCADJLIM(self)
self.zz_fdict['AFCADJLIM'] = self.AFCADJLIM
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_SHAPING0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_SHAPING0, self).__init__(rmio, label,
0x40086000, 0x05C,
'SHAPING0', 'MODEM.SHAPING0', 'read-write',
"",
0x22130A04, 0xFFFFFFFF)
self.COEFF0 = RM_Field_MODEM_SHAPING0_COEFF0(self)
self.zz_fdict['COEFF0'] = self.COEFF0
self.COEFF1 = RM_Field_MODEM_SHAPING0_COEFF1(self)
self.zz_fdict['COEFF1'] = self.COEFF1
self.COEFF2 = RM_Field_MODEM_SHAPING0_COEFF2(self)
self.zz_fdict['COEFF2'] = self.COEFF2
self.COEFF3 = RM_Field_MODEM_SHAPING0_COEFF3(self)
self.zz_fdict['COEFF3'] = self.COEFF3
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_SHAPING1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_SHAPING1, self).__init__(rmio, label,
0x40086000, 0x060,
'SHAPING1', 'MODEM.SHAPING1', 'read-write',
"",
0x4F4A4132, 0xFFFFFFFF)
self.COEFF4 = RM_Field_MODEM_SHAPING1_COEFF4(self)
self.zz_fdict['COEFF4'] = self.COEFF4
self.COEFF5 = RM_Field_MODEM_SHAPING1_COEFF5(self)
self.zz_fdict['COEFF5'] = self.COEFF5
self.COEFF6 = RM_Field_MODEM_SHAPING1_COEFF6(self)
self.zz_fdict['COEFF6'] = self.COEFF6
self.COEFF7 = RM_Field_MODEM_SHAPING1_COEFF7(self)
self.zz_fdict['COEFF7'] = self.COEFF7
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_SHAPING2(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_SHAPING2, self).__init__(rmio, label,
0x40086000, 0x064,
'SHAPING2', 'MODEM.SHAPING2', 'read-write',
"",
0x00000000, 0x000000FF)
self.COEFF8 = RM_Field_MODEM_SHAPING2_COEFF8(self)
self.zz_fdict['COEFF8'] = self.COEFF8
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAMPCTRL(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAMPCTRL, self).__init__(rmio, label,
0x40086000, 0x068,
'RAMPCTRL', 'MODEM.RAMPCTRL', 'read-write',
"",
0x00000555, 0xFF800FFF)
self.RAMPRATE0 = RM_Field_MODEM_RAMPCTRL_RAMPRATE0(self)
self.zz_fdict['RAMPRATE0'] = self.RAMPRATE0
self.RAMPRATE1 = RM_Field_MODEM_RAMPCTRL_RAMPRATE1(self)
self.zz_fdict['RAMPRATE1'] = self.RAMPRATE1
self.RAMPRATE2 = RM_Field_MODEM_RAMPCTRL_RAMPRATE2(self)
self.zz_fdict['RAMPRATE2'] = self.RAMPRATE2
self.RAMPDIS = RM_Field_MODEM_RAMPCTRL_RAMPDIS(self)
self.zz_fdict['RAMPDIS'] = self.RAMPDIS
self.RAMPVAL = RM_Field_MODEM_RAMPCTRL_RAMPVAL(self)
self.zz_fdict['RAMPVAL'] = self.RAMPVAL
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAMPLEV(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAMPLEV, self).__init__(rmio, label,
0x40086000, 0x06C,
'RAMPLEV', 'MODEM.RAMPLEV', 'read-write',
"",
0x00FFFFFF, 0x00FFFFFF)
self.RAMPLEV0 = RM_Field_MODEM_RAMPLEV_RAMPLEV0(self)
self.zz_fdict['RAMPLEV0'] = self.RAMPLEV0
self.RAMPLEV1 = RM_Field_MODEM_RAMPLEV_RAMPLEV1(self)
self.zz_fdict['RAMPLEV1'] = self.RAMPLEV1
self.RAMPLEV2 = RM_Field_MODEM_RAMPLEV_RAMPLEV2(self)
self.zz_fdict['RAMPLEV2'] = self.RAMPLEV2
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_ROUTEPEN(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_ROUTEPEN, self).__init__(rmio, label,
0x40086000, 0x070,
'ROUTEPEN', 'MODEM.ROUTEPEN', 'read-write',
"",
0x00000000, 0x0000001F)
self.DINPEN = RM_Field_MODEM_ROUTEPEN_DINPEN(self)
self.zz_fdict['DINPEN'] = self.DINPEN
self.DOUTPEN = RM_Field_MODEM_ROUTEPEN_DOUTPEN(self)
self.zz_fdict['DOUTPEN'] = self.DOUTPEN
self.DCLKPEN = RM_Field_MODEM_ROUTEPEN_DCLKPEN(self)
self.zz_fdict['DCLKPEN'] = self.DCLKPEN
self.ANT0PEN = RM_Field_MODEM_ROUTEPEN_ANT0PEN(self)
self.zz_fdict['ANT0PEN'] = self.ANT0PEN
self.ANT1PEN = RM_Field_MODEM_ROUTEPEN_ANT1PEN(self)
self.zz_fdict['ANT1PEN'] = self.ANT1PEN
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_ROUTELOC0(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_ROUTELOC0, self).__init__(rmio, label,
0x40086000, 0x074,
'ROUTELOC0', 'MODEM.ROUTELOC0', 'read-write',
"",
0x00000000, 0x003F3F3F)
self.DINLOC = RM_Field_MODEM_ROUTELOC0_DINLOC(self)
self.zz_fdict['DINLOC'] = self.DINLOC
self.DOUTLOC = RM_Field_MODEM_ROUTELOC0_DOUTLOC(self)
self.zz_fdict['DOUTLOC'] = self.DOUTLOC
self.DCLKLOC = RM_Field_MODEM_ROUTELOC0_DCLKLOC(self)
self.zz_fdict['DCLKLOC'] = self.DCLKLOC
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_ROUTELOC1(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_ROUTELOC1, self).__init__(rmio, label,
0x40086000, 0x078,
'ROUTELOC1', 'MODEM.ROUTELOC1', 'read-write',
"",
0x00000000, 0x00003F3F)
self.ANT0LOC = RM_Field_MODEM_ROUTELOC1_ANT0LOC(self)
self.zz_fdict['ANT0LOC'] = self.ANT0LOC
self.ANT1LOC = RM_Field_MODEM_ROUTELOC1_ANT1LOC(self)
self.zz_fdict['ANT1LOC'] = self.ANT1LOC
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_IF(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_IF, self).__init__(rmio, label,
0x40086000, 0x080,
'IF', 'MODEM.IF', 'read-only',
"",
0x00000000, 0x0000FF07)
self.TXFRAMESENT = RM_Field_MODEM_IF_TXFRAMESENT(self)
self.zz_fdict['TXFRAMESENT'] = self.TXFRAMESENT
self.TXSYNCSENT = RM_Field_MODEM_IF_TXSYNCSENT(self)
self.zz_fdict['TXSYNCSENT'] = self.TXSYNCSENT
self.TXPRESENT = RM_Field_MODEM_IF_TXPRESENT(self)
self.zz_fdict['TXPRESENT'] = self.TXPRESENT
self.RXTIMDET = RM_Field_MODEM_IF_RXTIMDET(self)
self.zz_fdict['RXTIMDET'] = self.RXTIMDET
self.RXPREDET = RM_Field_MODEM_IF_RXPREDET(self)
self.zz_fdict['RXPREDET'] = self.RXPREDET
self.RXFRAMEDET0 = RM_Field_MODEM_IF_RXFRAMEDET0(self)
self.zz_fdict['RXFRAMEDET0'] = self.RXFRAMEDET0
self.RXFRAMEDET1 = RM_Field_MODEM_IF_RXFRAMEDET1(self)
self.zz_fdict['RXFRAMEDET1'] = self.RXFRAMEDET1
self.RXTIMLOST = RM_Field_MODEM_IF_RXTIMLOST(self)
self.zz_fdict['RXTIMLOST'] = self.RXTIMLOST
self.RXPRELOST = RM_Field_MODEM_IF_RXPRELOST(self)
self.zz_fdict['RXPRELOST'] = self.RXPRELOST
self.RXFRAMEDETOF = RM_Field_MODEM_IF_RXFRAMEDETOF(self)
self.zz_fdict['RXFRAMEDETOF'] = self.RXFRAMEDETOF
self.RXTIMNF = RM_Field_MODEM_IF_RXTIMNF(self)
self.zz_fdict['RXTIMNF'] = self.RXTIMNF
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_IFS(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_IFS, self).__init__(rmio, label,
0x40086000, 0x084,
'IFS', 'MODEM.IFS', 'write-only',
"",
0x00000000, 0x0000FF07)
self.TXFRAMESENT = RM_Field_MODEM_IFS_TXFRAMESENT(self)
self.zz_fdict['TXFRAMESENT'] = self.TXFRAMESENT
self.TXSYNCSENT = RM_Field_MODEM_IFS_TXSYNCSENT(self)
self.zz_fdict['TXSYNCSENT'] = self.TXSYNCSENT
self.TXPRESENT = RM_Field_MODEM_IFS_TXPRESENT(self)
self.zz_fdict['TXPRESENT'] = self.TXPRESENT
self.RXTIMDET = RM_Field_MODEM_IFS_RXTIMDET(self)
self.zz_fdict['RXTIMDET'] = self.RXTIMDET
self.RXPREDET = RM_Field_MODEM_IFS_RXPREDET(self)
self.zz_fdict['RXPREDET'] = self.RXPREDET
self.RXFRAMEDET0 = RM_Field_MODEM_IFS_RXFRAMEDET0(self)
self.zz_fdict['RXFRAMEDET0'] = self.RXFRAMEDET0
self.RXFRAMEDET1 = RM_Field_MODEM_IFS_RXFRAMEDET1(self)
self.zz_fdict['RXFRAMEDET1'] = self.RXFRAMEDET1
self.RXTIMLOST = RM_Field_MODEM_IFS_RXTIMLOST(self)
self.zz_fdict['RXTIMLOST'] = self.RXTIMLOST
self.RXPRELOST = RM_Field_MODEM_IFS_RXPRELOST(self)
self.zz_fdict['RXPRELOST'] = self.RXPRELOST
self.RXFRAMEDETOF = RM_Field_MODEM_IFS_RXFRAMEDETOF(self)
self.zz_fdict['RXFRAMEDETOF'] = self.RXFRAMEDETOF
self.RXTIMNF = RM_Field_MODEM_IFS_RXTIMNF(self)
self.zz_fdict['RXTIMNF'] = self.RXTIMNF
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_IFC(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_IFC, self).__init__(rmio, label,
0x40086000, 0x088,
'IFC', 'MODEM.IFC', 'write-only',
"",
0x00000000, 0x0000FF07)
self.TXFRAMESENT = RM_Field_MODEM_IFC_TXFRAMESENT(self)
self.zz_fdict['TXFRAMESENT'] = self.TXFRAMESENT
self.TXSYNCSENT = RM_Field_MODEM_IFC_TXSYNCSENT(self)
self.zz_fdict['TXSYNCSENT'] = self.TXSYNCSENT
self.TXPRESENT = RM_Field_MODEM_IFC_TXPRESENT(self)
self.zz_fdict['TXPRESENT'] = self.TXPRESENT
self.RXTIMDET = RM_Field_MODEM_IFC_RXTIMDET(self)
self.zz_fdict['RXTIMDET'] = self.RXTIMDET
self.RXPREDET = RM_Field_MODEM_IFC_RXPREDET(self)
self.zz_fdict['RXPREDET'] = self.RXPREDET
self.RXFRAMEDET0 = RM_Field_MODEM_IFC_RXFRAMEDET0(self)
self.zz_fdict['RXFRAMEDET0'] = self.RXFRAMEDET0
self.RXFRAMEDET1 = RM_Field_MODEM_IFC_RXFRAMEDET1(self)
self.zz_fdict['RXFRAMEDET1'] = self.RXFRAMEDET1
self.RXTIMLOST = RM_Field_MODEM_IFC_RXTIMLOST(self)
self.zz_fdict['RXTIMLOST'] = self.RXTIMLOST
self.RXPRELOST = RM_Field_MODEM_IFC_RXPRELOST(self)
self.zz_fdict['RXPRELOST'] = self.RXPRELOST
self.RXFRAMEDETOF = RM_Field_MODEM_IFC_RXFRAMEDETOF(self)
self.zz_fdict['RXFRAMEDETOF'] = self.RXFRAMEDETOF
self.RXTIMNF = RM_Field_MODEM_IFC_RXTIMNF(self)
self.zz_fdict['RXTIMNF'] = self.RXTIMNF
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_IEN(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_IEN, self).__init__(rmio, label,
0x40086000, 0x08C,
'IEN', 'MODEM.IEN', 'read-write',
"",
0x00000000, 0x0000FF07)
self.TXFRAMESENT = RM_Field_MODEM_IEN_TXFRAMESENT(self)
self.zz_fdict['TXFRAMESENT'] = self.TXFRAMESENT
self.TXSYNCSENT = RM_Field_MODEM_IEN_TXSYNCSENT(self)
self.zz_fdict['TXSYNCSENT'] = self.TXSYNCSENT
self.TXPRESENT = RM_Field_MODEM_IEN_TXPRESENT(self)
self.zz_fdict['TXPRESENT'] = self.TXPRESENT
self.RXTIMDET = RM_Field_MODEM_IEN_RXTIMDET(self)
self.zz_fdict['RXTIMDET'] = self.RXTIMDET
self.RXPREDET = RM_Field_MODEM_IEN_RXPREDET(self)
self.zz_fdict['RXPREDET'] = self.RXPREDET
self.RXFRAMEDET0 = RM_Field_MODEM_IEN_RXFRAMEDET0(self)
self.zz_fdict['RXFRAMEDET0'] = self.RXFRAMEDET0
self.RXFRAMEDET1 = RM_Field_MODEM_IEN_RXFRAMEDET1(self)
self.zz_fdict['RXFRAMEDET1'] = self.RXFRAMEDET1
self.RXTIMLOST = RM_Field_MODEM_IEN_RXTIMLOST(self)
self.zz_fdict['RXTIMLOST'] = self.RXTIMLOST
self.RXPRELOST = RM_Field_MODEM_IEN_RXPRELOST(self)
self.zz_fdict['RXPRELOST'] = self.RXPRELOST
self.RXFRAMEDETOF = RM_Field_MODEM_IEN_RXFRAMEDETOF(self)
self.zz_fdict['RXFRAMEDETOF'] = self.RXFRAMEDETOF
self.RXTIMNF = RM_Field_MODEM_IEN_RXTIMNF(self)
self.zz_fdict['RXTIMNF'] = self.RXTIMNF
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_CMD(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_CMD, self).__init__(rmio, label,
0x40086000, 0x090,
'CMD', 'MODEM.CMD', 'write-only',
"",
0x00000000, 0x00000039)
self.PRESTOP = RM_Field_MODEM_CMD_PRESTOP(self)
self.zz_fdict['PRESTOP'] = self.PRESTOP
self.AFCTXLOCK = RM_Field_MODEM_CMD_AFCTXLOCK(self)
self.zz_fdict['AFCTXLOCK'] = self.AFCTXLOCK
self.AFCTXCLEAR = RM_Field_MODEM_CMD_AFCTXCLEAR(self)
self.zz_fdict['AFCTXCLEAR'] = self.AFCTXCLEAR
self.AFCRXCLEAR = RM_Field_MODEM_CMD_AFCRXCLEAR(self)
self.zz_fdict['AFCRXCLEAR'] = self.AFCRXCLEAR
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_DCCOMP(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_DCCOMP, self).__init__(rmio, label,
0x40086000, 0x098,
'DCCOMP', 'MODEM.DCCOMP', 'read-write',
"",
0x00000030, 0x000001FF)
self.DCESTIEN = RM_Field_MODEM_DCCOMP_DCESTIEN(self)
self.zz_fdict['DCESTIEN'] = self.DCESTIEN
self.DCCOMPEN = RM_Field_MODEM_DCCOMP_DCCOMPEN(self)
self.zz_fdict['DCCOMPEN'] = self.DCCOMPEN
self.DCRSTEN = RM_Field_MODEM_DCCOMP_DCRSTEN(self)
self.zz_fdict['DCRSTEN'] = self.DCRSTEN
self.DCCOMPFREEZE = RM_Field_MODEM_DCCOMP_DCCOMPFREEZE(self)
self.zz_fdict['DCCOMPFREEZE'] = self.DCCOMPFREEZE
self.DCCOMPGEAR = RM_Field_MODEM_DCCOMP_DCCOMPGEAR(self)
self.zz_fdict['DCCOMPGEAR'] = self.DCCOMPGEAR
self.DCLIMIT = RM_Field_MODEM_DCCOMP_DCLIMIT(self)
self.zz_fdict['DCLIMIT'] = self.DCLIMIT
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_DCCOMPFILTINIT(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_DCCOMPFILTINIT, self).__init__(rmio, label,
0x40086000, 0x09C,
'DCCOMPFILTINIT', 'MODEM.DCCOMPFILTINIT', 'read-write',
"",
0x00000000, 0x7FFFFFFF)
self.DCCOMPINITVALI = RM_Field_MODEM_DCCOMPFILTINIT_DCCOMPINITVALI(self)
self.zz_fdict['DCCOMPINITVALI'] = self.DCCOMPINITVALI
self.DCCOMPINITVALQ = RM_Field_MODEM_DCCOMPFILTINIT_DCCOMPINITVALQ(self)
self.zz_fdict['DCCOMPINITVALQ'] = self.DCCOMPINITVALQ
self.DCCOMPINIT = RM_Field_MODEM_DCCOMPFILTINIT_DCCOMPINIT(self)
self.zz_fdict['DCCOMPINIT'] = self.DCCOMPINIT
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_DCESTI(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_DCESTI, self).__init__(rmio, label,
0x40086000, 0x100,
'DCESTI', 'MODEM.DCESTI', 'read-only',
"",
0x00000000, 0x3FFFFFFF)
self.DCCOMPESTIVALI = RM_Field_MODEM_DCESTI_DCCOMPESTIVALI(self)
self.zz_fdict['DCCOMPESTIVALI'] = self.DCCOMPESTIVALI
self.DCCOMPESTIVALQ = RM_Field_MODEM_DCESTI_DCCOMPESTIVALQ(self)
self.zz_fdict['DCCOMPESTIVALQ'] = self.DCCOMPESTIVALQ
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM0_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM0_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x400,
'RAM0_RAMDATA', 'MODEM.RAM0_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM0_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM1_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM1_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x404,
'RAM1_RAMDATA', 'MODEM.RAM1_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM1_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM2_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM2_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x408,
'RAM2_RAMDATA', 'MODEM.RAM2_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM2_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM3_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM3_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x40C,
'RAM3_RAMDATA', 'MODEM.RAM3_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM3_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM4_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM4_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x410,
'RAM4_RAMDATA', 'MODEM.RAM4_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM4_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM5_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM5_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x414,
'RAM5_RAMDATA', 'MODEM.RAM5_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM5_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM6_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM6_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x418,
'RAM6_RAMDATA', 'MODEM.RAM6_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM6_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM7_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM7_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x41C,
'RAM7_RAMDATA', 'MODEM.RAM7_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM7_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM8_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM8_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x420,
'RAM8_RAMDATA', 'MODEM.RAM8_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM8_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM9_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM9_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x424,
'RAM9_RAMDATA', 'MODEM.RAM9_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM9_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM10_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM10_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x428,
'RAM10_RAMDATA', 'MODEM.RAM10_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM10_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM11_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM11_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x42C,
'RAM11_RAMDATA', 'MODEM.RAM11_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM11_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM12_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM12_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x430,
'RAM12_RAMDATA', 'MODEM.RAM12_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM12_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM13_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM13_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x434,
'RAM13_RAMDATA', 'MODEM.RAM13_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM13_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM14_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM14_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x438,
'RAM14_RAMDATA', 'MODEM.RAM14_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM14_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM15_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM15_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x43C,
'RAM15_RAMDATA', 'MODEM.RAM15_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM15_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM16_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM16_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x440,
'RAM16_RAMDATA', 'MODEM.RAM16_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM16_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM17_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM17_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x444,
'RAM17_RAMDATA', 'MODEM.RAM17_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM17_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM18_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM18_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x448,
'RAM18_RAMDATA', 'MODEM.RAM18_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM18_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM19_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM19_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x44C,
'RAM19_RAMDATA', 'MODEM.RAM19_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM19_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM20_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM20_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x450,
'RAM20_RAMDATA', 'MODEM.RAM20_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM20_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM21_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM21_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x454,
'RAM21_RAMDATA', 'MODEM.RAM21_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM21_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM22_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM22_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x458,
'RAM22_RAMDATA', 'MODEM.RAM22_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM22_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM23_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM23_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x45C,
'RAM23_RAMDATA', 'MODEM.RAM23_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM23_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM24_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM24_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x460,
'RAM24_RAMDATA', 'MODEM.RAM24_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM24_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM25_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM25_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x464,
'RAM25_RAMDATA', 'MODEM.RAM25_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM25_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM26_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM26_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x468,
'RAM26_RAMDATA', 'MODEM.RAM26_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM26_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM27_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM27_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x46C,
'RAM27_RAMDATA', 'MODEM.RAM27_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM27_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM28_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM28_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x470,
'RAM28_RAMDATA', 'MODEM.RAM28_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM28_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM29_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM29_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x474,
'RAM29_RAMDATA', 'MODEM.RAM29_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM29_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM30_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM30_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x478,
'RAM30_RAMDATA', 'MODEM.RAM30_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM30_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM31_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM31_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x47C,
'RAM31_RAMDATA', 'MODEM.RAM31_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM31_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM32_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM32_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x480,
'RAM32_RAMDATA', 'MODEM.RAM32_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM32_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM33_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM33_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x484,
'RAM33_RAMDATA', 'MODEM.RAM33_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM33_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM34_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM34_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x488,
'RAM34_RAMDATA', 'MODEM.RAM34_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM34_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM35_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM35_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x48C,
'RAM35_RAMDATA', 'MODEM.RAM35_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM35_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM36_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM36_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x490,
'RAM36_RAMDATA', 'MODEM.RAM36_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM36_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM37_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM37_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x494,
'RAM37_RAMDATA', 'MODEM.RAM37_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM37_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM38_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM38_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x498,
'RAM38_RAMDATA', 'MODEM.RAM38_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM38_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM39_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM39_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x49C,
'RAM39_RAMDATA', 'MODEM.RAM39_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM39_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM40_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM40_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4A0,
'RAM40_RAMDATA', 'MODEM.RAM40_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM40_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM41_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM41_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4A4,
'RAM41_RAMDATA', 'MODEM.RAM41_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM41_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM42_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM42_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4A8,
'RAM42_RAMDATA', 'MODEM.RAM42_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM42_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM43_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM43_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4AC,
'RAM43_RAMDATA', 'MODEM.RAM43_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM43_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM44_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM44_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4B0,
'RAM44_RAMDATA', 'MODEM.RAM44_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM44_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM45_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM45_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4B4,
'RAM45_RAMDATA', 'MODEM.RAM45_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM45_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM46_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM46_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4B8,
'RAM46_RAMDATA', 'MODEM.RAM46_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM46_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM47_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM47_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4BC,
'RAM47_RAMDATA', 'MODEM.RAM47_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM47_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM48_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM48_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4C0,
'RAM48_RAMDATA', 'MODEM.RAM48_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM48_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM49_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM49_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4C4,
'RAM49_RAMDATA', 'MODEM.RAM49_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM49_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM50_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM50_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4C8,
'RAM50_RAMDATA', 'MODEM.RAM50_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM50_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM51_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM51_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4CC,
'RAM51_RAMDATA', 'MODEM.RAM51_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM51_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM52_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM52_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4D0,
'RAM52_RAMDATA', 'MODEM.RAM52_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM52_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM53_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM53_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4D4,
'RAM53_RAMDATA', 'MODEM.RAM53_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM53_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM54_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM54_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4D8,
'RAM54_RAMDATA', 'MODEM.RAM54_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM54_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM55_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM55_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4DC,
'RAM55_RAMDATA', 'MODEM.RAM55_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM55_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM56_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM56_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4E0,
'RAM56_RAMDATA', 'MODEM.RAM56_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM56_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM57_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM57_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4E4,
'RAM57_RAMDATA', 'MODEM.RAM57_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM57_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM58_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM58_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4E8,
'RAM58_RAMDATA', 'MODEM.RAM58_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM58_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM59_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM59_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4EC,
'RAM59_RAMDATA', 'MODEM.RAM59_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM59_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM60_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM60_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4F0,
'RAM60_RAMDATA', 'MODEM.RAM60_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM60_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM61_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM61_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4F4,
'RAM61_RAMDATA', 'MODEM.RAM61_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM61_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM62_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM62_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4F8,
'RAM62_RAMDATA', 'MODEM.RAM62_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM62_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM63_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM63_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x4FC,
'RAM63_RAMDATA', 'MODEM.RAM63_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM63_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM64_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM64_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x500,
'RAM64_RAMDATA', 'MODEM.RAM64_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM64_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM65_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM65_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x504,
'RAM65_RAMDATA', 'MODEM.RAM65_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM65_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM66_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM66_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x508,
'RAM66_RAMDATA', 'MODEM.RAM66_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM66_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM67_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM67_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x50C,
'RAM67_RAMDATA', 'MODEM.RAM67_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM67_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM68_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM68_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x510,
'RAM68_RAMDATA', 'MODEM.RAM68_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM68_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM69_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM69_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x514,
'RAM69_RAMDATA', 'MODEM.RAM69_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM69_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM70_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM70_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x518,
'RAM70_RAMDATA', 'MODEM.RAM70_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM70_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM71_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM71_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x51C,
'RAM71_RAMDATA', 'MODEM.RAM71_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM71_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM72_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM72_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x520,
'RAM72_RAMDATA', 'MODEM.RAM72_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM72_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM73_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM73_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x524,
'RAM73_RAMDATA', 'MODEM.RAM73_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM73_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM74_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM74_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x528,
'RAM74_RAMDATA', 'MODEM.RAM74_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM74_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM75_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM75_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x52C,
'RAM75_RAMDATA', 'MODEM.RAM75_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM75_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM76_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM76_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x530,
'RAM76_RAMDATA', 'MODEM.RAM76_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM76_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM77_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM77_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x534,
'RAM77_RAMDATA', 'MODEM.RAM77_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM77_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM78_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM78_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x538,
'RAM78_RAMDATA', 'MODEM.RAM78_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM78_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM79_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM79_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x53C,
'RAM79_RAMDATA', 'MODEM.RAM79_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM79_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM80_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM80_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x540,
'RAM80_RAMDATA', 'MODEM.RAM80_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM80_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM81_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM81_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x544,
'RAM81_RAMDATA', 'MODEM.RAM81_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM81_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM82_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM82_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x548,
'RAM82_RAMDATA', 'MODEM.RAM82_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM82_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM83_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM83_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x54C,
'RAM83_RAMDATA', 'MODEM.RAM83_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM83_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM84_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM84_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x550,
'RAM84_RAMDATA', 'MODEM.RAM84_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM84_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM85_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM85_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x554,
'RAM85_RAMDATA', 'MODEM.RAM85_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM85_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM86_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM86_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x558,
'RAM86_RAMDATA', 'MODEM.RAM86_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM86_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM87_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM87_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x55C,
'RAM87_RAMDATA', 'MODEM.RAM87_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM87_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM88_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM88_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x560,
'RAM88_RAMDATA', 'MODEM.RAM88_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM88_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM89_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM89_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x564,
'RAM89_RAMDATA', 'MODEM.RAM89_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM89_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM90_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM90_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x568,
'RAM90_RAMDATA', 'MODEM.RAM90_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM90_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM91_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM91_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x56C,
'RAM91_RAMDATA', 'MODEM.RAM91_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM91_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM92_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM92_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x570,
'RAM92_RAMDATA', 'MODEM.RAM92_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM92_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM93_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM93_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x574,
'RAM93_RAMDATA', 'MODEM.RAM93_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM93_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM94_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM94_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x578,
'RAM94_RAMDATA', 'MODEM.RAM94_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM94_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM95_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM95_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x57C,
'RAM95_RAMDATA', 'MODEM.RAM95_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM95_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM96_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM96_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x580,
'RAM96_RAMDATA', 'MODEM.RAM96_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM96_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM97_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM97_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x584,
'RAM97_RAMDATA', 'MODEM.RAM97_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM97_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM98_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM98_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x588,
'RAM98_RAMDATA', 'MODEM.RAM98_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM98_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM99_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM99_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x58C,
'RAM99_RAMDATA', 'MODEM.RAM99_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM99_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM100_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM100_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x590,
'RAM100_RAMDATA', 'MODEM.RAM100_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM100_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM101_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM101_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x594,
'RAM101_RAMDATA', 'MODEM.RAM101_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM101_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM102_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM102_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x598,
'RAM102_RAMDATA', 'MODEM.RAM102_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM102_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM103_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM103_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x59C,
'RAM103_RAMDATA', 'MODEM.RAM103_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM103_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM104_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM104_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5A0,
'RAM104_RAMDATA', 'MODEM.RAM104_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM104_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM105_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM105_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5A4,
'RAM105_RAMDATA', 'MODEM.RAM105_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM105_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM106_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM106_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5A8,
'RAM106_RAMDATA', 'MODEM.RAM106_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM106_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM107_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM107_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5AC,
'RAM107_RAMDATA', 'MODEM.RAM107_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM107_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM108_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM108_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5B0,
'RAM108_RAMDATA', 'MODEM.RAM108_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM108_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM109_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM109_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5B4,
'RAM109_RAMDATA', 'MODEM.RAM109_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM109_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM110_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM110_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5B8,
'RAM110_RAMDATA', 'MODEM.RAM110_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM110_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM111_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM111_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5BC,
'RAM111_RAMDATA', 'MODEM.RAM111_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM111_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM112_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM112_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5C0,
'RAM112_RAMDATA', 'MODEM.RAM112_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM112_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM113_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM113_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5C4,
'RAM113_RAMDATA', 'MODEM.RAM113_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM113_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM114_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM114_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5C8,
'RAM114_RAMDATA', 'MODEM.RAM114_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM114_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM115_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM115_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5CC,
'RAM115_RAMDATA', 'MODEM.RAM115_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM115_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM116_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM116_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5D0,
'RAM116_RAMDATA', 'MODEM.RAM116_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM116_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM117_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM117_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5D4,
'RAM117_RAMDATA', 'MODEM.RAM117_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM117_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM118_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM118_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5D8,
'RAM118_RAMDATA', 'MODEM.RAM118_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM118_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM119_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM119_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5DC,
'RAM119_RAMDATA', 'MODEM.RAM119_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM119_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM120_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM120_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5E0,
'RAM120_RAMDATA', 'MODEM.RAM120_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM120_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM121_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM121_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5E4,
'RAM121_RAMDATA', 'MODEM.RAM121_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM121_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM122_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM122_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5E8,
'RAM122_RAMDATA', 'MODEM.RAM122_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM122_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM123_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM123_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5EC,
'RAM123_RAMDATA', 'MODEM.RAM123_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM123_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM124_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM124_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5F0,
'RAM124_RAMDATA', 'MODEM.RAM124_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM124_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM125_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM125_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5F4,
'RAM125_RAMDATA', 'MODEM.RAM125_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM125_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM126_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM126_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5F8,
'RAM126_RAMDATA', 'MODEM.RAM126_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM126_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM127_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM127_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x5FC,
'RAM127_RAMDATA', 'MODEM.RAM127_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM127_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM128_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM128_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x600,
'RAM128_RAMDATA', 'MODEM.RAM128_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM128_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM129_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM129_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x604,
'RAM129_RAMDATA', 'MODEM.RAM129_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM129_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM130_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM130_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x608,
'RAM130_RAMDATA', 'MODEM.RAM130_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM130_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM131_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM131_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x60C,
'RAM131_RAMDATA', 'MODEM.RAM131_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM131_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM132_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM132_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x610,
'RAM132_RAMDATA', 'MODEM.RAM132_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM132_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM133_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM133_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x614,
'RAM133_RAMDATA', 'MODEM.RAM133_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM133_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM134_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM134_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x618,
'RAM134_RAMDATA', 'MODEM.RAM134_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM134_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM135_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM135_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x61C,
'RAM135_RAMDATA', 'MODEM.RAM135_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM135_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM136_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM136_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x620,
'RAM136_RAMDATA', 'MODEM.RAM136_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM136_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM137_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM137_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x624,
'RAM137_RAMDATA', 'MODEM.RAM137_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM137_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM138_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM138_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x628,
'RAM138_RAMDATA', 'MODEM.RAM138_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM138_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM139_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM139_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x62C,
'RAM139_RAMDATA', 'MODEM.RAM139_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM139_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM140_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM140_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x630,
'RAM140_RAMDATA', 'MODEM.RAM140_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM140_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM141_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM141_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x634,
'RAM141_RAMDATA', 'MODEM.RAM141_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM141_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM142_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM142_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x638,
'RAM142_RAMDATA', 'MODEM.RAM142_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM142_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM143_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM143_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x63C,
'RAM143_RAMDATA', 'MODEM.RAM143_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM143_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM144_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM144_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x640,
'RAM144_RAMDATA', 'MODEM.RAM144_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM144_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM145_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM145_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x644,
'RAM145_RAMDATA', 'MODEM.RAM145_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM145_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM146_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM146_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x648,
'RAM146_RAMDATA', 'MODEM.RAM146_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM146_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM147_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM147_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x64C,
'RAM147_RAMDATA', 'MODEM.RAM147_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM147_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM148_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM148_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x650,
'RAM148_RAMDATA', 'MODEM.RAM148_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM148_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM149_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM149_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x654,
'RAM149_RAMDATA', 'MODEM.RAM149_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM149_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM150_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM150_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x658,
'RAM150_RAMDATA', 'MODEM.RAM150_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM150_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM151_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM151_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x65C,
'RAM151_RAMDATA', 'MODEM.RAM151_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM151_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM152_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM152_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x660,
'RAM152_RAMDATA', 'MODEM.RAM152_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM152_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM153_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM153_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x664,
'RAM153_RAMDATA', 'MODEM.RAM153_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM153_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM154_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM154_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x668,
'RAM154_RAMDATA', 'MODEM.RAM154_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM154_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM155_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM155_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x66C,
'RAM155_RAMDATA', 'MODEM.RAM155_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM155_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM156_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM156_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x670,
'RAM156_RAMDATA', 'MODEM.RAM156_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM156_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM157_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM157_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x674,
'RAM157_RAMDATA', 'MODEM.RAM157_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM157_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM158_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM158_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x678,
'RAM158_RAMDATA', 'MODEM.RAM158_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM158_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM159_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM159_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x67C,
'RAM159_RAMDATA', 'MODEM.RAM159_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM159_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM160_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM160_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x680,
'RAM160_RAMDATA', 'MODEM.RAM160_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM160_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM161_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM161_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x684,
'RAM161_RAMDATA', 'MODEM.RAM161_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM161_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM162_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM162_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x688,
'RAM162_RAMDATA', 'MODEM.RAM162_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM162_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM163_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM163_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x68C,
'RAM163_RAMDATA', 'MODEM.RAM163_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM163_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM164_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM164_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x690,
'RAM164_RAMDATA', 'MODEM.RAM164_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM164_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM165_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM165_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x694,
'RAM165_RAMDATA', 'MODEM.RAM165_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM165_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM166_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM166_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x698,
'RAM166_RAMDATA', 'MODEM.RAM166_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM166_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM167_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM167_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x69C,
'RAM167_RAMDATA', 'MODEM.RAM167_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM167_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM168_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM168_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6A0,
'RAM168_RAMDATA', 'MODEM.RAM168_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM168_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM169_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM169_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6A4,
'RAM169_RAMDATA', 'MODEM.RAM169_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM169_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM170_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM170_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6A8,
'RAM170_RAMDATA', 'MODEM.RAM170_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM170_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM171_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM171_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6AC,
'RAM171_RAMDATA', 'MODEM.RAM171_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM171_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM172_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM172_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6B0,
'RAM172_RAMDATA', 'MODEM.RAM172_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM172_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM173_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM173_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6B4,
'RAM173_RAMDATA', 'MODEM.RAM173_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM173_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM174_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM174_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6B8,
'RAM174_RAMDATA', 'MODEM.RAM174_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM174_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM175_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM175_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6BC,
'RAM175_RAMDATA', 'MODEM.RAM175_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM175_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM176_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM176_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6C0,
'RAM176_RAMDATA', 'MODEM.RAM176_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM176_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM177_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM177_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6C4,
'RAM177_RAMDATA', 'MODEM.RAM177_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM177_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM178_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM178_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6C8,
'RAM178_RAMDATA', 'MODEM.RAM178_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM178_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM179_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM179_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6CC,
'RAM179_RAMDATA', 'MODEM.RAM179_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM179_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM180_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM180_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6D0,
'RAM180_RAMDATA', 'MODEM.RAM180_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM180_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM181_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM181_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6D4,
'RAM181_RAMDATA', 'MODEM.RAM181_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM181_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM182_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM182_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6D8,
'RAM182_RAMDATA', 'MODEM.RAM182_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM182_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM183_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM183_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6DC,
'RAM183_RAMDATA', 'MODEM.RAM183_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM183_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM184_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM184_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6E0,
'RAM184_RAMDATA', 'MODEM.RAM184_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM184_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM185_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM185_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6E4,
'RAM185_RAMDATA', 'MODEM.RAM185_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM185_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM186_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM186_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6E8,
'RAM186_RAMDATA', 'MODEM.RAM186_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM186_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM187_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM187_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6EC,
'RAM187_RAMDATA', 'MODEM.RAM187_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM187_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM188_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM188_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6F0,
'RAM188_RAMDATA', 'MODEM.RAM188_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM188_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM189_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM189_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6F4,
'RAM189_RAMDATA', 'MODEM.RAM189_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM189_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM190_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM190_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6F8,
'RAM190_RAMDATA', 'MODEM.RAM190_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM190_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM191_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM191_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x6FC,
'RAM191_RAMDATA', 'MODEM.RAM191_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM191_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM192_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM192_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x700,
'RAM192_RAMDATA', 'MODEM.RAM192_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM192_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM193_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM193_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x704,
'RAM193_RAMDATA', 'MODEM.RAM193_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM193_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM194_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM194_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x708,
'RAM194_RAMDATA', 'MODEM.RAM194_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM194_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM195_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM195_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x70C,
'RAM195_RAMDATA', 'MODEM.RAM195_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM195_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM196_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM196_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x710,
'RAM196_RAMDATA', 'MODEM.RAM196_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM196_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM197_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM197_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x714,
'RAM197_RAMDATA', 'MODEM.RAM197_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM197_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM198_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM198_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x718,
'RAM198_RAMDATA', 'MODEM.RAM198_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM198_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM199_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM199_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x71C,
'RAM199_RAMDATA', 'MODEM.RAM199_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM199_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM200_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM200_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x720,
'RAM200_RAMDATA', 'MODEM.RAM200_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM200_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM201_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM201_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x724,
'RAM201_RAMDATA', 'MODEM.RAM201_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM201_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM202_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM202_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x728,
'RAM202_RAMDATA', 'MODEM.RAM202_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM202_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM203_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM203_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x72C,
'RAM203_RAMDATA', 'MODEM.RAM203_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM203_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM204_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM204_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x730,
'RAM204_RAMDATA', 'MODEM.RAM204_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM204_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM205_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM205_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x734,
'RAM205_RAMDATA', 'MODEM.RAM205_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM205_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM206_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM206_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x738,
'RAM206_RAMDATA', 'MODEM.RAM206_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM206_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM207_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM207_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x73C,
'RAM207_RAMDATA', 'MODEM.RAM207_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM207_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM208_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM208_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x740,
'RAM208_RAMDATA', 'MODEM.RAM208_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM208_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM209_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM209_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x744,
'RAM209_RAMDATA', 'MODEM.RAM209_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM209_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM210_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM210_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x748,
'RAM210_RAMDATA', 'MODEM.RAM210_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM210_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM211_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM211_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x74C,
'RAM211_RAMDATA', 'MODEM.RAM211_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM211_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM212_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM212_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x750,
'RAM212_RAMDATA', 'MODEM.RAM212_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM212_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM213_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM213_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x754,
'RAM213_RAMDATA', 'MODEM.RAM213_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM213_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM214_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM214_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x758,
'RAM214_RAMDATA', 'MODEM.RAM214_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM214_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM215_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM215_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x75C,
'RAM215_RAMDATA', 'MODEM.RAM215_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM215_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM216_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM216_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x760,
'RAM216_RAMDATA', 'MODEM.RAM216_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM216_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM217_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM217_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x764,
'RAM217_RAMDATA', 'MODEM.RAM217_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM217_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM218_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM218_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x768,
'RAM218_RAMDATA', 'MODEM.RAM218_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM218_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM219_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM219_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x76C,
'RAM219_RAMDATA', 'MODEM.RAM219_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM219_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM220_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM220_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x770,
'RAM220_RAMDATA', 'MODEM.RAM220_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM220_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM221_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM221_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x774,
'RAM221_RAMDATA', 'MODEM.RAM221_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM221_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM222_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM222_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x778,
'RAM222_RAMDATA', 'MODEM.RAM222_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM222_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM223_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM223_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x77C,
'RAM223_RAMDATA', 'MODEM.RAM223_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM223_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM224_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM224_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x780,
'RAM224_RAMDATA', 'MODEM.RAM224_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM224_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM225_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM225_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x784,
'RAM225_RAMDATA', 'MODEM.RAM225_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM225_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM226_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM226_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x788,
'RAM226_RAMDATA', 'MODEM.RAM226_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM226_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM227_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM227_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x78C,
'RAM227_RAMDATA', 'MODEM.RAM227_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM227_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM228_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM228_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x790,
'RAM228_RAMDATA', 'MODEM.RAM228_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM228_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM229_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM229_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x794,
'RAM229_RAMDATA', 'MODEM.RAM229_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM229_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM230_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM230_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x798,
'RAM230_RAMDATA', 'MODEM.RAM230_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM230_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM231_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM231_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x79C,
'RAM231_RAMDATA', 'MODEM.RAM231_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM231_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM232_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM232_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7A0,
'RAM232_RAMDATA', 'MODEM.RAM232_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM232_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM233_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM233_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7A4,
'RAM233_RAMDATA', 'MODEM.RAM233_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM233_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM234_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM234_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7A8,
'RAM234_RAMDATA', 'MODEM.RAM234_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM234_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM235_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM235_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7AC,
'RAM235_RAMDATA', 'MODEM.RAM235_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM235_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM236_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM236_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7B0,
'RAM236_RAMDATA', 'MODEM.RAM236_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM236_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM237_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM237_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7B4,
'RAM237_RAMDATA', 'MODEM.RAM237_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM237_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM238_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM238_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7B8,
'RAM238_RAMDATA', 'MODEM.RAM238_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM238_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM239_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM239_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7BC,
'RAM239_RAMDATA', 'MODEM.RAM239_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM239_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM240_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM240_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7C0,
'RAM240_RAMDATA', 'MODEM.RAM240_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM240_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM241_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM241_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7C4,
'RAM241_RAMDATA', 'MODEM.RAM241_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM241_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM242_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM242_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7C8,
'RAM242_RAMDATA', 'MODEM.RAM242_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM242_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM243_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM243_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7CC,
'RAM243_RAMDATA', 'MODEM.RAM243_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM243_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM244_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM244_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7D0,
'RAM244_RAMDATA', 'MODEM.RAM244_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM244_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM245_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM245_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7D4,
'RAM245_RAMDATA', 'MODEM.RAM245_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM245_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM246_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM246_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7D8,
'RAM246_RAMDATA', 'MODEM.RAM246_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM246_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM247_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM247_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7DC,
'RAM247_RAMDATA', 'MODEM.RAM247_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM247_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM248_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM248_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7E0,
'RAM248_RAMDATA', 'MODEM.RAM248_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM248_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM249_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM249_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7E4,
'RAM249_RAMDATA', 'MODEM.RAM249_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM249_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM250_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM250_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7E8,
'RAM250_RAMDATA', 'MODEM.RAM250_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM250_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM251_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM251_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7EC,
'RAM251_RAMDATA', 'MODEM.RAM251_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM251_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM252_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM252_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7F0,
'RAM252_RAMDATA', 'MODEM.RAM252_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM252_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM253_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM253_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7F4,
'RAM253_RAMDATA', 'MODEM.RAM253_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM253_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM254_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM254_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7F8,
'RAM254_RAMDATA', 'MODEM.RAM254_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM254_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
class RM_Register_MODEM_RAM255_RAMDATA(Base_RM_Register):
def __init__(self, rmio, label):
self.__dict__['zz_frozen'] = False
super(RM_Register_MODEM_RAM255_RAMDATA, self).__init__(rmio, label,
0x40086000, 0x7FC,
'RAM255_RAMDATA', 'MODEM.RAM255_RAMDATA', 'read-write',
"",
0x00000000, 0x000000FF)
self.DATA = RM_Field_MODEM_RAM255_RAMDATA_DATA(self)
self.zz_fdict['DATA'] = self.DATA
self.__dict__['zz_frozen'] = True
| [
"acvilla@bu.edu"
] | acvilla@bu.edu |
9ccc834aebd99d7f4a512631c7877a943ff2424a | 11a24575d88d01238edf40ad75dcc45cb148a578 | /RNASeq.py | 7168f26a710c0cb2251c218d715fc44f9fb5d195 | [
"Apache-2.0"
] | permissive | warrenmcg/altanalyze | 2b20b7b830ff5a2f938a6f596a2349bcaa51d0be | b132f0bca3baaeab4afbe5475f6e47a496b79d46 | refs/heads/master | 2020-04-06T04:12:51.598146 | 2017-02-27T08:37:29 | 2017-02-27T08:37:29 | 83,016,611 | 0 | 0 | null | 2017-02-24T07:58:47 | 2017-02-24T07:58:47 | null | UTF-8 | Python | false | false | 281,168 | py | ###RNASeq
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string, os
import statistics
import math
import os.path
import unique
import update
import copy
import time
import export
import EnsemblImport; reload(EnsemblImport)
import JunctionArrayEnsemblRules
import JunctionArray; reload(JunctionArray)
import ExonArrayEnsemblRules
import multiprocessing
import logging
import traceback
import warnings
import bisect
import clustering; reload(clustering)
try:
import scipy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
except Exception: pass
try: import numpy
except Exception: pass
LegacyMode = True
try:
from scipy import average as Average
from scipy import stats
except Exception:
from statistics import avg as Average
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
######### Below code deals with building the AltDatabase #########
def collapseNoveExonBoundaries(novel_exon_coordinates,dataset_dir):
""" Merge exon predictions based on junction measurments from TopHat. The predicted exons are
bound by the identified splice site and the consensus length of reads in that sample"""
dataset_dir = string.replace(dataset_dir,'exp.','ExpressionInput/novel.')
export_data,status = AppendOrWrite(dataset_dir) ### Export all novel exons
if status == 'not found':
export_data.write('GeneID\tStrand\tExonID\tCoordinates\n')
novel_gene_exon_db={}
for (chr,coord) in novel_exon_coordinates:
key = (chr,coord)
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
try:
if side == 'left': ### left corresponds to the position of coord
intron = string.split(ji.ExonRegionID(),'-')[1][:2]
else:
intron = string.split(ji.ExonRegionID(),'-')[0][:2]
ls = [coord,coord2]
ls.sort() ### The order of this is variable
if ji.Strand() == '-':
coord2,coord = ls
else: coord,coord2 = ls
if 'I' in intron and ji.Novel() == 'side':
#if 'ENSG00000221983' == ji.GeneID():
try: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron].append((coord,coord2,ji,key,side))
except Exception: novel_gene_exon_db[ji.GeneID(),ji.Strand(),intron] = [(coord,coord2,ji,key,side)]
except Exception: pass
outdatedExons={} ### merging novel exons, delete one of the two original
for key in novel_gene_exon_db:
firstNovel=True ### First putative novel exon coordinates examined for that gene
novel_gene_exon_db[key].sort()
if key[1]=='-':
novel_gene_exon_db[key].reverse()
for (c1,c2,ji,k,s) in novel_gene_exon_db[key]:
if firstNovel==False:
#print [c1,l2] #abs(c1-l2);sys.exit()
### see if the difference between the start position of the second exon is less than 300 nt away from the end of the last
if abs(c2-l1) < 300 and os!=s: ### 80% of human exons are less than 200nt - PMID: 15217358
proceed = True
#if key[1]=='-':
if c2 in k:
novel_exon_coordinates[k] = ji,s,l1
outdatedExons[ok]=None ### merged out entry
elif l1 in ok:
novel_exon_coordinates[ok] = li,os,c2
outdatedExons[k]=None ### merged out entry
else:
proceed = False ### Hence, the two splice-site ends are pointing to two distinct versus one common exons
"""
if c2 == 18683670 or l1 == 18683670:
print key,abs(c2-l1), c1, c2, l1, l2, li.ExonRegionID(), ji.ExonRegionID();
print k,novel_exon_coordinates[k]
print ok,novel_exon_coordinates[ok]
"""
if proceed:
values = string.join([ji.GeneID(),ji.Strand(),key[2],ji.Chr()+':'+str(l1)+'-'+str(c2)],'\t')+'\n'
export_data.write(values)
### For negative strand genes, c1 is larger than c2 but is the 5' begining of the exon
l1,l2,li,ok,os = c1,c2,ji,k,s ### record the last entry
firstNovel=False
for key in outdatedExons: ### Delete the non-merged entry
del novel_exon_coordinates[key]
export_data.close()
return novel_exon_coordinates
def exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=None):
### Export the novel exon coordinates based on those in the junction BED file to examine the differential expression of the predicted novel exon
#bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/databases/hESC_differentiation_exons.bed > day20_7B__exons-novel.bed
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons'+searchChr+'.bed')
bed_data = open(bed_export_path,'w') ### Appends to existing file
for (chr,coord) in novel_exon_coordinates:
ji,side,coord2 = novel_exon_coordinates[(chr,coord)]
if side == 'left': start,stop = coord,coord2
if side == 'right': start,stop = coord2,coord
try: gene = ji.GeneID()
except Exception: gene = 'NA'
if gene == None: gene = 'NA'
if gene == None: gene = 'NA'
if gene != 'NA': ### Including these has no benefit for AltAnalyze (just slows down alignment and piles up memory)
if ji.Strand() == '-': stop,start=start,stop
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
a = [start,stop]; a.sort(); start,stop = a
bed_values = [chr,str(start),str(stop),gene,'0',str(ji.Strand())]
bed_values = cleanUpLine(string.join(bed_values,'\t'))+'\n'
bed_data.write(bed_values)
bed_data.close()
return bed_export_path
def moveBAMtoBEDFile(species,dataset_name,root_dir):
bed_export_path = filepath('AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.bed')
dataset_name = string.replace(dataset_name,'exp.','')
new_fn = root_dir+'/BAMtoBED/'+species + '_'+dataset_name+'_exons.bed'
new_fn = string.replace(new_fn,'.txt','')
print 'Writing exon-level coordinates to BED file:'
print new_fn
catFiles(bed_export_path,'chr') ### concatenate the files ot the main AltDatabase directory then move
export.customFileMove(bed_export_path,new_fn)
return new_fn
def reformatExonFile(species,type,chr_status):
if type == 'exon':
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
### Used by BEDTools to get counts per specific AltAnalyze exon region (should augment with de novo regions identified from junction analyses)
bed_export_path = 'AltDatabase/'+species+'/RNASeq/chr/'+species + '_Ensembl_exons.bed'
bed_data = export.ExportFile(bed_export_path)
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
export_path = 'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
print 'Writing',export_path
export_data = export.ExportFile(export_path)
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
x+=1
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['affy_class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
else:
try: gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t
except Exception: print t;kill
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention,
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
export_values = [gene+':'+exonid, exonid, gene, '', chr, strand, start, stop, 'known', constitutive_call, ens_exon_ids, ens_constitutive_status]
export_values+= [exonid, start, stop, splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
if type == 'exon':
if chr_status == False:
chr = string.replace(chr,'chr','') ### This will thus match up to the BAM files
bed_values = [chr,start,stop,gene+':'+exonid+'_'+ens_exon_ids,'0',strand]
bed_values = string.join(bed_values,'\t')+'\n'; bed_data.write(bed_values)
export_data.close()
if type == 'exon': bed_data.close()
def importExonAnnotations(species,type,search_chr):
if 'exon' in type:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt'
else:
filename = 'AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_junction.txt'
fn=filepath(filename); x=0; exon_annotation_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1
else:
gene, exonid, chr, strand, start, stop, constitutive_call, ens_exon_ids, splice_events, splice_junctions = t; proceed = 'yes'
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if len(search_chr)>0:
if chr != search_chr: proceed = 'no'
if proceed == 'yes':
if type == 'exon': start = int(start); stop = int(stop)
ea = EnsemblImport.ExonAnnotationsSimple(chr, strand, start, stop, gene, ens_exon_ids, constitutive_call, exonid, splice_events, splice_junctions)
if type == 'junction_coordinates':
exon1_start,exon1_stop = string.split(start,'|')
exon2_start,exon2_stop = string.split(stop,'|')
if strand == '-':
exon1_stop,exon1_start = exon1_start,exon1_stop
exon2_stop,exon2_start = exon2_start,exon2_stop
#if gene == 'ENSMUSG00000027340': print chr,int(exon1_stop),int(exon2_start)
exon_annotation_db[chr,int(exon1_stop),int(exon2_start)]=ea
elif type == 'distal-exon':
exon_annotation_db[gene] = exonid
else:
try: exon_annotation_db[gene].append(ea)
except KeyError: exon_annotation_db[gene]=[ea]
return exon_annotation_db
def exportKnownJunctionComparisons(species):
gene_junction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'standard')
gene_intronjunction_db = JunctionArrayEnsemblRules.importEnsemblUCSCAltJunctions(species,'_intronic')
for i in gene_intronjunction_db: gene_junction_db[i]=[]
gene_junction_db2={}
for (gene,critical_exon,incl_junction,excl_junction) in gene_junction_db:
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: gene_junction_db2[gene,incl_junction,excl_junction].append(critical_exon)
except Exception: gene_junction_db2[gene,incl_junction,excl_junction] = [critical_exon]
gene_junction_db = gene_junction_db2; gene_junction_db2=[]
junction_export = 'AltDatabase/' + species + '/RNASeq/'+ species + '_junction_comps.txt'
fn=filepath(junction_export); data = open(fn,'w')
print "Exporting",junction_export
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title); temp_list=[]
for (gene,incl_junction,excl_junction) in gene_junction_db:
critical_exons = unique.unique(gene_junction_db[(gene,incl_junction,excl_junction)])
critical_exon = string.join(critical_exons,'|')
temp_list.append(string.join([gene,critical_exon,excl_junction,incl_junction,gene+':'+excl_junction,gene+':'+incl_junction,'AltAnalyze'],'\t')+'\n')
temp_list = unique.unique(temp_list)
for i in temp_list: data.write(i)
data.close()
def getExonAndJunctionSequences(species):
export_exon_filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
ensembl_exon_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
### Import just the probeset region for mRNA alignment analysis
analysis_type = ('region_only','get_sequence'); array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
getCriticalJunctionSequences(critical_exon_file,species,ensembl_exon_db)
"""
### Import the full Ensembl exon sequence (not just the probeset region) for miRNA binding site analysis
analysis_type = 'get_sequence'; array_type = 'RNASeq'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_exon_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_exon_db,species,analysis_type)
"""
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
updateCriticalExonSequences(critical_exon_file, ensembl_exon_db)
def updateCriticalExonSequences(filename,ensembl_exon_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
exonseq_data = export.ExportFile(exon_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
### Export exon sequences
for gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for probeset in gene_exon_data:
critical_exon_seq = gene_exon_data[probeset]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
exonseq_data.write(values)
exonseq_data.close()
print exon_seq_db_filename, 'exported....'
def getCriticalJunctionSequences(filename,species,ensembl_exon_db):
### Assemble and export junction sequences
junction_seq_db_filename = string.replace(filename,'exon-seq','junction-seq')
junctionseq_data = export.ExportFile(junction_seq_db_filename)
critical_exon_seq_db={}; null_count={}
for gene in ensembl_exon_db:
gene_exon_data={}
for probe_data in ensembl_exon_db[gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: gene_exon_data[probeset_id] = ed.ExonSeq()
except Exception: null_count[gene]=[] ### Occurs for non-chromosomal DNA (could also download this sequence though)
if len(gene_exon_data)>0: critical_exon_seq_db[gene] = gene_exon_data
print len(null_count),'genes not assigned sequenced (e.g.,non-chromosomal)'
ensembl_exon_db=[]
junction_annotation_db = importExonAnnotations(species,'junction',[])
for gene in junction_annotation_db:
if gene in critical_exon_seq_db:
gene_exon_data = critical_exon_seq_db[gene]
for jd in junction_annotation_db[gene]:
exon1,exon2=string.split(jd.ExonRegionIDs(),'-')
p1=gene+':'+exon1
p2=gene+':'+exon2
p1_seq=gene_exon_data[p1][-15:]
p2_seq=gene_exon_data[p2][:15]
junction_seq = p1_seq+'|'+p2_seq
junctionseq_data.write(gene+':'+jd.ExonRegionIDs()+'\t'+junction_seq+'\t\n')
junctionseq_data.close()
print junction_seq_db_filename, 'exported....'
def getEnsemblAssociations(species,data_type,test_status,force):
### Get UCSC associations (download databases if necessary)
import UCSCImport
mRNA_Type = 'mrna'; run_from_scratch = 'yes'
export_all_associations = 'no' ### YES only for protein prediction analysis
update.buildUCSCAnnoationFiles(species,mRNA_Type,export_all_associations,run_from_scratch,force)
null = EnsemblImport.getEnsemblAssociations(species,data_type,test_status); null=[]
reformatExonFile(species,'exon',True); reformatExonFile(species,'junction',True)
exportKnownJunctionComparisons(species)
getExonAndJunctionSequences(species)
######### Below code deals with user read alignment as opposed to building the AltDatabase #########
class ExonInfo:
def __init__(self,start,unique_id,annotation):
self.start = start; self.unique_id = unique_id; self.annotation = annotation
def ReadStart(self): return self.start
def UniqueID(self): return self.unique_id
def Annotation(self): return self.annotation
def setExonRegionData(self,rd): self.rd = rd
def ExonRegionData(self): return self.rd
def setExonRegionID(self,region_id): self.region_id = region_id
def ExonRegionID(self): return self.region_id
def setAlignmentRegion(self,region_type): self.region_type = region_type
def AlignmentRegion(self): return self.region_type
def __repr__(self): return "ExonData values"
class JunctionData:
def __init__(self,chr,strand,exon1_stop,exon2_start,junction_id,biotype):
self.chr = chr; self.strand = strand; self._chr = chr
self.exon1_stop = exon1_stop; self.exon2_start = exon2_start
self.junction_id = junction_id; self.biotype = biotype
#self.reads = reads; self.condition = condition
self.left_exon = None; self.right_exon = None; self.jd = None; self.gene_id = None
self.trans_splicing = None
self.splice_events=''
self.splice_junctions=''
self.seq_length=''
self.uid = None
def Chr(self): return self.chr
def Strand(self): return self.strand
def Exon1Stop(self): return self.exon1_stop
def Exon2Start(self): return self.exon2_start
def setExon1Stop(self,exon1_stop): self.exon1_stop = exon1_stop
def setExon2Start(self,exon2_start): self.exon2_start = exon2_start
def setSeqLength(self,seq_length): self.seq_length = seq_length
def SeqLength(self): return self.seq_length
def BioType(self): return self.biotype
def checkExonPosition(self,exon_pos):
if exon_pos == self.Exon1Stop(): return 'left'
else: return 'right'
### These are used to report novel exon boundaries
def setExon1Start(self,exon1_start): self.exon1_start = exon1_start
def setExon2Stop(self,exon2_stop): self.exon2_stop = exon2_stop
def Exon1Start(self): return self.exon1_start
def Exon2Stop(self): return self.exon2_stop
def Reads(self): return self.reads
def JunctionID(self): return self.junction_id
def Condition(self): return self.condition
def setExonAnnotations(self,jd):
self.jd = jd
self.splice_events = jd.AssociatedSplicingEvent()
self.splice_junctions = jd.AssociatedSplicingJunctions()
self.exon_region = jd.ExonRegionIDs()
self.exonid = jd.ExonID()
self.gene_id = jd.GeneID()
self.uid = jd.GeneID()+':'+jd.ExonRegionIDs()
def ExonAnnotations(self): return self.jd
def setLeftExonAnnotations(self,ld): self.gene_id,self.left_exon = ld
def LeftExonAnnotations(self): return self.left_exon
def setRightExonAnnotations(self,rd): self.secondary_geneid,self.right_exon = rd
def RightExonAnnotations(self): return self.right_exon
def setGeneID(self,geneid): self.gene_id = geneid
def GeneID(self): return self.gene_id
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def TransSplicing(self): return self.trans_splicing
def SpliceSitesFound(self):
if self.jd != None: sites_found = 'both'
elif self.left_exon != None and self.right_exon != None: sites_found = 'both'
elif self.left_exon != None: sites_found = 'left'
elif self.right_exon != None: sites_found = 'right'
else: sites_found = None
return sites_found
def setConstitutive(self,constitutive): self.constitutive = constitutive
def Constitutive(self): return self.constitutive
def setAssociatedSplicingEvent(self,splice_events): self.splice_events = splice_events
def AssociatedSplicingEvent(self): return self.splice_events
def setAssociatedSplicingJunctions(self,splice_junctions): self.splice_junctions = splice_junctions
def AssociatedSplicingJunctions(self): return self.splice_junctions
def setExonID(self,exonid): self.exonid = exonid
def ExonID(self): return self.exonid
def setExonRegionID(self,exon_region): self.exon_region = exon_region
def ExonRegionID(self): return self.exon_region
def setUniqueID(self,uid): self.uid = uid
def UniqueID(self): return self.uid
def setLeftExonRegionData(self,li): self.li = li
def LeftExonRegionData(self): return self.li
def setRightExonRegionData(self,ri): self.ri = ri
def RightExonRegionData(self): return self.ri
def setNovel(self, side): self.side = side
def Novel(self): return self.side
def __repr__(self): return "JunctionData values"
def checkBEDFileFormat(bed_dir,root_dir):
""" This method checks to see if the BED files (junction or exon) have 'chr' proceeding the chr number.
It also checks to see if some files have two underscores and one has none or if double underscores are missing from all."""
dir_list = read_directory(bed_dir)
x=0
break_now = False
chr_present = False
condition_db={}
for filename in dir_list:
fn=filepath(bed_dir+filename)
#if ('.bed' in fn or '.BED' in fn): delim = 'r'
delim = 'rU'
if '.tab' in string.lower(filename) or '.bed' in string.lower(filename) or '.junction_quantification.txt' in string.lower(filename):
condition_db[filename]=[]
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
if line[0] == '#': x=0 ### BioScope
elif x == 0: x=1 ###skip the first line
elif x < 10: ### Only check the first 10 lines
if 'chr' in line: ### Need to look at multiple input formats (chr could be in t[0] or t[1])
chr_present = True
x+=1
else:
break_now = True
break
if break_now == True:
break
### Check to see if exon.bed and junction.bed file names are propper or faulty (which will result in downstream errors)
double_underscores=[]
no_doubles=[]
for condition in condition_db:
if '__' in condition:
double_underscores.append(condition)
else:
no_doubles.append(condition)
exon_beds=[]
junctions_beds=[]
if len(double_underscores)>0 and len(no_doubles)>0:
### Hence, a problem is likely due to inconsistent naming
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
elif len(no_doubles)>0:
for condition in no_doubles:
condition = string.lower(condition)
if 'exon' in condition:
exon_beds.append(condition)
if 'junction' in condition:
junctions_beds.append(condition)
if len(exon_beds)>0 and len(junctions_beds)>0:
print 'The input files appear to have inconsistent naming. If both exon and junction sample data are present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print 'Exiting AltAnalyze'; forceError
return chr_present
def getStrandMappingData(species):
splicesite_db={}
refExonCoordinateFile = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
splicesite_db[chr,int(start)]=strand
splicesite_db[chr,int(stop)]=strand
return splicesite_db
def importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=False,searchChr=None,getBiotype=None,testImport=False,filteredJunctions=None):
dir_list = read_directory(bed_dir)
begin_time = time.time()
if 'chr' not in searchChr:
searchChr = 'chr'+searchChr
condition_count_db={}; neg_count=0; pos_count=0; junction_db={}; biotypes={}; algorithms={}; exon_len_db={}; splicesite_db={}
if testImport == 'yes': print "Reading user RNA-seq input data files"
for filename in dir_list:
count_db={}; rows=0
fn=filepath(bed_dir+filename)
condition = export.findFilename(fn)
if '__' in condition:
### Allow multiple junction files per sample to be combined (e.g. canonical and non-canonical junction alignments)
condition=string.split(condition,'__')[0]+filename[-4:]
if ('.bed' in fn or '.BED' in fn or '.tab' in fn or '.TAB' in fn or '.junction_quantification.txt' in fn) and '._' not in condition:
if ('.bed' in fn or '.BED' in fn): delim = 'r'
else: delim = 'rU'
### The below code removes .txt if still in the filename along with .tab or .bed
if '.tab' in fn: condition = string.replace(condition,'.txt','.tab')
elif '.bed' in fn: condition = string.replace(condition,'.txt','.bed')
if '.TAB' in fn: condition = string.replace(condition,'.txt','.TAB')
elif '.BED' in fn: condition = string.replace(condition,'.txt','.BED')
if testImport == 'yes': print "Reading the bed file", [fn], condition
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,delim).xreadlines(): break
if len(line)>500: delim = 'rU'
for line in open(fn,delim).xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
rows+=1
if rows==1 or '#' == data[0]:
format_description = data
algorithm = 'Unknown'
if 'TopHat' in format_description: algorithm = 'TopHat'
elif 'HMMSplicer' in format_description: algorithm = 'HMMSplicer'
elif 'SpliceMap junctions' in format_description: algorithm = 'SpliceMap'
elif t[0] == 'E1': algorithm = 'BioScope-junction'
elif '# filterOrphanedMates=' in data or 'alignmentFilteringMode=' in data or '#number_of_mapped_reads=' in data:
algorithm = 'BioScope-exon'
elif '.junction_quantification.txt' in fn:
algorithm = 'TCGA format'
if 'barcode' in t: junction_position = 1
else: junction_position = 0
elif '.tab' in fn and len(t)==9:
try: start = float(t[1]) ### expect this to be a numerical coordinate
except Exception: continue
algorithm = 'STAR'
strand = '-' ### If no strand exists
rows=2 ### allows this first row to be processed
if len(splicesite_db)==0: ### get strand to pos info
splicesite_db = getStrandMappingData(species)
if testImport == 'yes': print condition, algorithm
if rows>1:
try:
if ':' in t[0]:
chr = string.split(t[0],':')[0]
else: chr = t[0]
if 'chr' not in chr:
chr = 'chr'+chr
if searchChr == chr or ('BioScope' in algorithm and searchChr == t[1]): proceed = True
elif searchChr == 'chrMT' and ('BioScope' not in algorithm):
if 'M' in chr: proceed = True
else: proceed = False
else: proceed = False
except IndexError:
print 'The input file:\n',filename
print 'is not formated as expected (format='+algorithm+').'
print 'search chromosome:',searchChr
print t; force_bad_exit
if proceed:
proceed = False
if '.tab' in fn or '.TAB' in fn:
### Applies to non-BED format Junction and Exon inputs (BioScope)
if 'BioScope' in algorithm:
if algorithm == 'BioScope-exon': ### Not BED format
chr,source,data_type,start,end,reads,strand,null,gene_info=t[:9]
if 'chr' not in chr: chr = 'chr'+chr
if data_type == 'exon': ### Can also be CDS
gene_info,test,rpkm_info,null = string.split(gene_info,';')
symbol = string.split(gene_info,' ')[-1]
#refseq = string.split(transcript_info,' ')[-1]
rpkm = string.split(rpkm_info,' ')[-1]
#if normalize_feature_exp == 'RPKM': reads = rpkm ### The RPKM should be adjusted +1 counts, so don't use this
biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=''
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads:
proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(exon1_stop-exon2_start)
if algorithm == 'BioScope-junction':
chr = t[1]; strand = t[2]; exon1_stop = int(t[4]); exon2_start = int(t[8]); count_paired = t[17]; count_single = t[19]; score=t[21]
if 'chr' not in chr: chr = 'chr'+chr
try: exon1_start = int(t[3]); exon2_stop = int(t[9])
except Exception: pass ### If missing, these are not assigned
reads = str(int(float(count_paired))+int(float(count_single))) ### Users will either have paired or single read (this uses either)
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
seq_length = abs(float(exon1_stop-exon2_start))
if 'STAR' in algorithm:
chr = t[0]; exon1_stop = int(t[1])-1; exon2_start = int(t[2])+1; strand=''
if 'chr' not in chr: chr = 'chr'+chr
reads = str(int(t[7])+int(t[6]))
biotype = 'junction'; biotypes[biotype]=[]; junction_id=''
if float(reads)>4 or getReads: proceed = True ### Added in version 2.0.9 to remove rare novel isoforms
if (chr,exon1_stop) in splicesite_db:
strand = splicesite_db[chr,exon1_stop]
elif (chr,exon2_start) in splicesite_db:
strand = splicesite_db[chr,exon2_start]
#else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start))
if strand == '-': ### switch the orientation of the positions
exon1_stop,exon2_start=exon2_start,exon1_stop
exon1_start = exon1_stop; exon2_stop = exon2_start
#if 9996685==exon1_stop and 10002682==exon2_stop:
#print chr, strand, reads, exon1_stop, exon2_start,proceed;sys.exit()
else:
try:
if algorithm == 'TCGA format':
coordinates = string.split(t[junction_position],',')
try: chr,pos1,strand = string.split(coordinates[0],':')
except Exception: print t;sys.exit()
chr,pos2,strand = string.split(coordinates[1],':')
if 'chr' not in chr: chr = 'chr'+chr
pos2 = str(int(pos2)-1) ### This is the bed format conversion with exons of 0 length
exon1_start, exon2_stop = pos1, pos2
reads = t[junction_position+1]
junction_id = t[junction_position]
exon1_len=0; exon2_len=0
else:
### Applies to BED format Junction input
chr, exon1_start, exon2_stop, junction_id, reads, strand, null, null, null, null, lengths, null = t
if 'chr' not in chr: chr = 'chr'+chr
exon1_len,exon2_len=string.split(lengths,',')[:2]; exon1_len = int(exon1_len); exon2_len = int(exon2_len)
exon1_start = int(exon1_start); exon2_stop = int(exon2_stop)
biotype = 'junction'; biotypes[biotype]=[]
if strand == '-':
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
### Exons have the opposite order
a = exon1_start,exon1_stop; b = exon2_start,exon2_stop
exon1_stop,exon1_start = b; exon2_stop,exon2_start = a
else:
exon1_stop = exon1_start+exon1_len; exon2_start=exon2_stop-exon2_len+1
if float(reads)>4 or getReads: proceed = True
if algorithm == 'HMMSplicer':
if '|junc=' in junction_id: reads = string.split(junction_id,'|junc=')[-1]
else: proceed = False
if algorithm == 'SpliceMap':
if ')' in junction_id and len(junction_id)>1: reads = string.split(junction_id,')')[0][1:]
else: proceed = False
seq_length = abs(float(exon1_stop-exon2_start)) ### Junction distance
except Exception,e:
#print traceback.format_exc();sys.exit()
### Applies to BED format exon input (BEDTools export)
# bamToBed -i accepted_hits.bam -split| coverageBed -a stdin -b /home/nsalomonis/databases/Mm_Ensembl_exons.bed > day0_8B__exons.bed
try: chr, start, end, exon_id, null, strand, reads, bp_coverage, bp_total, percent_coverage = t
except Exception:
print 'The file',fn,'does not appear to be propperly formatted as input.'
print t; force_exception
if 'chr' not in chr: chr = 'chr'+chr
algorithm = 'TopHat-exon'; biotype = 'exon'; biotypes[biotype]=[]
exon1_stop,exon2_start = int(start),int(end); junction_id=exon_id; seq_length = float(bp_total)
if seq_length == 0:
seq_length = abs(float(exon1_stop-exon2_start))
### Adjust exon positions - not ideal but necessary. Needed as a result of exon regions overlapping by 1nt (due to build process)
exon1_stop+=1; exon2_start-=1
#if float(reads)>4 or getReads: ### Added in version 2.0.9 to remove rare novel isoforms
proceed = True
#else: proceed = False
if proceed:
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chr == 'M': chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if strand == '+': pos_count+=1
else: neg_count+=1
if getReads and seq_length>0:
if getBiotype == biotype:
if biotype == 'junction':
### We filtered for junctions>4 reads before, now we include all reads for expressed junctions
if (chr,exon1_stop,exon2_start) in filteredJunctions:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
else:
count_db[chr,exon1_stop,exon2_start] = reads
try: exon_len_db[chr,exon1_stop,exon2_start] = seq_length
except Exception: exon_len_db[chr,exon1_stop,exon2_start] = []
elif seq_length>0:
if (chr,exon1_stop,exon2_start) not in junction_db:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,junction_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
key = chr,exon1_stop,exon2_start
algorithms[algorithm]=[]
if getReads:
if condition in condition_count_db:
### combine the data from the different files for the same sample junction alignments
count_db1 = condition_count_db[condition]
for key in count_db:
if key not in count_db1: count_db1[key] = count_db[key]
else:
combined_counts = int(count_db1[key])+int(count_db[key])
count_db1[key] = str(combined_counts)
condition_count_db[condition]=count_db1
else:
try: condition_count_db[condition] = count_db
except Exception: null=[] ### Occurs for other text files in the directory that are not used for the analysis
end_time = time.time()
if testImport == 'yes': print 'Read coordinates imported in',int(end_time-begin_time),'seconds'
if getReads:
#print len(exon_len_db), getBiotype, 'read counts present for',algorithm
return condition_count_db,exon_len_db,biotypes,algorithms
else:
if testImport == 'yes':
if 'exon' not in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'junctions present in',algorithm,'format BED files.' # ('+str(pos_count),str(neg_count)+' by strand).'
elif 'exon' in biotypes and 'BioScope' not in algorithm:
print len(junction_db),'sequence identifiers present in input files.'
else: print len(junction_db),'sequence identifiers present in BioScope input files.'
return junction_db,biotypes,algorithms
def importExonCoordinates(probeCoordinateFile,search_chr,getBiotype):
probe_coordinate_db={}
junction_db={}
biotypes={}
x=0
fn=filepath(probeCoordinateFile)
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
if x==0: x=1
else:
t = string.split(data,'\t')
probe_id = t[0]; probeset_id=t[1]; chr=t[2]; strand=t[3]; start=t[4]; end=t[5]
exon1_stop,exon2_start = int(start),int(end)
seq_length = abs(float(exon1_stop-exon2_start))
if 'chr' not in chr:
chr = 'chr'+chr ### Add the chromosome prefix
if chr == 'chrM': chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if search_chr == chr or search_chr == None:
try: biotype = t[6]
except Exception:
if seq_length>25:biotype = 'junction'
else: biotype = 'exon'
if strand == '-':
exon1_stop,exon2_start = exon2_start, exon1_stop ### this is their actual 5' -> 3' orientation
if biotype == 'junction':
exon1_start,exon2_stop = exon1_stop,exon2_start
else:
exon1_stop+=1; exon2_start-=1
biotypes[biotype]=[]
if getBiotype == biotype or getBiotype == None:
ji = JunctionData(chr,strand,exon1_stop,exon2_start,probe_id,biotype)
junction_db[chr,exon1_stop,exon2_start] = ji
try: ji.setSeqLength(seq_length) ### If RPKM imported or calculated
except Exception: null=[]
try: ji.setExon1Start(exon1_start);ji.setExon2Stop(exon2_stop)
except Exception: null=[]
probe_coordinate_db[probe_id] = chr,exon1_stop,exon2_start ### Import the expression data for the correct chromosomes with these IDs
return probe_coordinate_db, junction_db, biotypes
def importExpressionMatrix(exp_dir,root_dir,species,fl,getReads,search_chr=None,getBiotype=None):
""" Non-RNA-Seq expression data (typically Affymetrix microarray) import and mapping to an external probe-coordinate database """
begin_time = time.time()
condition_count_db={}; neg_count=0; pos_count=0; algorithms={}; exon_len_db={}
probe_coordinate_db, junction_db, biotypes = importExonCoordinates(fl.ExonMapFile(),search_chr,getBiotype)
x=0
fn=filepath(exp_dir)[:-1]
condition = export.findFilename(fn)
### If the BED was manually created on a Mac, will neeed 'rU' - test this
for line in open(fn,'rU').xreadlines(): ### changed rU to r to remove \r effectively, rather than read as end-lines
data = cleanUpLine(line)
t = string.split(data,'\t')
if '#' == data[0]: None
elif x==0:
if 'block' in t:
start_index = 7
else:
start_index = 1
headers = t[start_index:]
x=1
else:
proceed = 'yes' ### restrict by chromosome with minimum line parsing (unless we want counts instead)
probe_id=t[0]
if probe_id in probe_coordinate_db:
key = probe_coordinate_db[probe_id]
if getReads == 'no':
pass
else:
expression_data = t[start_index:]
i=0
for sample in headers:
if sample in condition_count_db:
count_db = condition_count_db[sample]
count_db[key] = expression_data[i]
exon_len_db[key]=[]
else:
count_db={}
count_db[key] = expression_data[i]
condition_count_db[sample] = count_db
exon_len_db[key]=[]
i+=1
algorithms['ProbeData']=[]
end_time = time.time()
if testImport == 'yes': print 'Probe data imported in',int(end_time-begin_time),'seconds'
if getReads == 'yes':
return condition_count_db,exon_len_db,biotypes,algorithms
else:
return junction_db,biotypes,algorithms
def adjustCounts(condition_count_db,exon_len_db):
for key in exon_len_db:
try:
null=exon_len_db[key]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
count_db[key] = str(read_count) ### Replace original counts with adjusted counts
except Exception: null=[]
return condition_count_db
def calculateRPKM(condition_count_db,exon_len_db,biotype_to_examine):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads
mapped_reads={}
for condition in condition_count_db:
mapped_reads[condition]=0
count_db = condition_count_db[condition]
for key in count_db:
read_count = count_db[key]
mapped_reads[condition]+=float(read_count)
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads:
average_total_reads+=mapped_reads[i]
if testImport == 'yes':
print 'condition:',i,'total reads:',mapped_reads[i]
average_total_reads = average_total_reads/len(condition_count_db)
if testImport == 'yes':
print 'average_total_reads:',average_total_reads
k=0
c=math.pow(10.0,9.0)
for key in exon_len_db:
try:
for condition in condition_count_db:
total_mapped_reads = mapped_reads[condition]
try: read_count = float(condition_count_db[condition][key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
except KeyError: read_count = 1 ###Was zero, but needs to be one for more realistic log2 fold calculations
if biotype_to_examine == 'junction': region_length = 60.0
else:
try: region_length = exon_len_db[key]
except Exception: continue ### This should only occur during testing (when restricting to one or few chromosomes)
if read_count == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
rpkm = c*(float(read_count)/(float(average_total_reads)*region_length))
try:
if region_length == 0:
region_length = abs(int(key[2]-key[1]))
rpkm = c*(read_count/(float(total_mapped_reads)*region_length))
except Exception:
print condition, key
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [read_count,total_mapped_reads,region_length];k=1; forceError
condition_count_db[condition][key] = str(rpkm) ### Replace original counts with RPMK
except Exception:
if k == 1: kill
null=[]
return condition_count_db
def calculateGeneLevelStatistics(steady_state_export,species,expressed_gene_exon_db,normalize_feature_exp,array_names,fl,excludeLowExp=True,exportRPKMs=False):
global UserOptions; UserOptions = fl
exp_file = string.replace(steady_state_export,'-steady-state','')
if normalize_feature_exp == 'RPKM':
exp_dbase, all_exp_features, array_count = importRawCountData(exp_file,expressed_gene_exon_db,excludeLowExp=excludeLowExp)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=excludeLowExp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
steady_state_db = calculateGeneRPKM(steady_state_db)
if exportRPKMs:
exportGeneCounts(steady_state_export,array_names,steady_state_db,dataType='RPKMs')
else:
exp_dbase, all_exp_features, array_count = importNormalizedCountData(exp_file,expressed_gene_exon_db)
steady_state_db = obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp); exp_dbase=[]
exportGeneCounts(steady_state_export,array_names,steady_state_db)
return steady_state_db, all_exp_features
def exportGeneCounts(steady_state_export,headers,gene_count_db,dataType='counts'):
### In addition to RPKM gene-level data, export gene level counts and lengths (should be able to calculate gene RPKMs from this file)
if dataType=='counts':
export_path = string.replace(steady_state_export,'exp.','counts.')
else:
export_path = steady_state_export
export_data = export.ExportFile(export_path)
title = string.join(['Ensembl']+headers,'\t')+'\n'
export_data.write(title)
for gene in gene_count_db:
sample_counts=[]
for count_data in gene_count_db[gene]:
try: read_count,region_length = count_data
except Exception: read_count = count_data
sample_counts.append(str(read_count))
sample_counts = string.join([gene]+sample_counts,'\t')+'\n'
export_data.write(sample_counts)
export_data.close()
def importGeneCounts(filename,import_type):
### Import non-normalized original counts and return the max value
counts_filename = string.replace(filename,'exp.','counts.')
status = verifyFile(counts_filename)
if status == 'not found': ### Occurs for non-normalized counts
counts_filename = filename
fn=filepath(counts_filename); x=0; count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
gene = t[0]
if import_type == 'max':
count_db[gene] = str(max(map(float,t[1:])))
else:
count_db[gene] = map(float,t[1:])
return count_db,array_names
def calculateGeneRPKM(gene_count_db):
"""Determines the total number of reads in a sample and then calculates RPMK relative to a pre-determined junction length (60).
60 was choosen, based on Illumina single-end read lengths of 35 (5 nt allowed overhand on either side of the junction)"""
### Get the total number of mapped reads (relative to all gene aligned rather than genome aligned exon reads)
mapped_reads={}
for gene in gene_count_db:
index=0
for (read_count,total_len) in gene_count_db[gene]:
try: mapped_reads[index]+=float(read_count)
except Exception: mapped_reads[index]=float(read_count)
index+=1
### Use the average_total_reads when no counts reported such that 0 counts are comparable
average_total_reads = 0
for i in mapped_reads: average_total_reads+=mapped_reads[i]
average_total_reads = average_total_reads/(index+1) ###
c=math.pow(10.0,9.0)
for gene in gene_count_db:
index=0; rpkms = []
for (read_count,region_length) in gene_count_db[gene]:
total_mapped_reads = mapped_reads[index]
#print [read_count],[region_length],[total_mapped_reads]
#if gene == 'ENSMUSG00000028186': print [read_count, index, total_mapped_reads,average_total_reads,region_length]
if read_count == 0: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
else:
try: rpkm = c*(float(read_count+1)/(float(total_mapped_reads)*region_length)) ### read count is incremented +1 (see next line)
except Exception: read_count=1; rpkm = c*(float(read_count)/(float(average_total_reads)*region_length)) ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
#if gene == 'ENSMUSG00000028186': print rpkm,read_count,index,total_mapped_reads,average_total_reads,region_length
#if gene == 'ENSMUSG00000026049': print gene_count_db[gene], mapped_reads[index], rpkm
rpkms.append(rpkm)
index+=1
gene_count_db[gene] = rpkms ### Replace original counts with RPMK
return gene_count_db
def deleteOldAnnotations(species,root_dir,dataset_name):
db_dir = root_dir+'AltDatabase/'+species
try:
status = export.deleteFolder(db_dir)
if status == 'success':
print "...Previous experiment database deleted"
except Exception: null=[]
count_dir = root_dir+'ExpressionInput/Counts'
try: status = export.deleteFolder(count_dir)
except Exception: pass
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
try: os.remove(filepath(export_path))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','counts.')))
except Exception: null=[]
try: os.remove(filepath(string.replace(export_path,'exp.','novel.')))
except Exception: null=[]
from copy_reg import pickle
from types import MethodType
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
def call_it(instance, name, args=(), kwargs=None):
"indirect caller for instance methods and multiprocessing"
if kwargs is None:
kwargs = {}
return getattr(instance, name)(*args, **kwargs)
def alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,Multi=None):
fl = exp_file_location_db[dataset_name]
try: multiThreading = fl.multiThreading()
except Exception: multiThreading = True
print 'multiThreading:',multiThreading
normalize_feature_exp = fl.FeatureNormalization()
testImport='no'
rnaseq_begin_time = time.time()
p = AlignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset_name,testImport)
chromosomes = p.getChromosomes()
### The following files need to be produced from chromosome specific sets later
countsFile = p.countsFile()
exonFile = p.exonFile()
junctionFile = p.junctionFile()
junctionCompFile = p.junctionCompFile()
novelJunctionAnnotations = p.novelJunctionAnnotations()
#chromosomes = ['chr1']
#p('chrY'); p('chr1'); p('chr2')
#chromosomes = ['chr8','chr17']
multiprocessing_pipe = True
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
try:
mlp=Multi
pool_size = mlp.cpu_count()
print 'Using %d processes' % pool_size
if multiprocessing_pipe and multiThreading:
### This is like pool, but less efficient (needed to get print outs)
s = pool_size; b=0
chr_blocks=[]
while s<len(chromosomes):
chr_blocks.append(chromosomes[b:s])
b+=pool_size; s+=pool_size
chr_blocks.append(chromosomes[b:s])
queue = mlp.Queue()
results=[]
#parent_conn, child_conn=multiprocessing.Pipe()
for chromosomes in chr_blocks:
procs=list()
#print 'Block size:',len(chromosomes)
for search_chr in chromosomes:
proc = mlp.Process(target=p, args=(queue,search_chr)) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
procs.append(proc)
proc.start()
for _ in procs:
val = queue.get()
if p.AnalysisMode() == 'GUI': print '*',
results.append(val)
for proc in procs:
proc.join()
elif multiThreading:
pool = mlp.Pool(processes=pool_size)
chr_vars=[]
for search_chr in chromosomes:
chr_vars.append(([],search_chr)) ### As an alternative for the pipe version above, pass an empty list rather than queue
results = pool.map(p, chr_vars) ### worker jobs initiated in tandem
try:pool.close(); pool.join(); pool = None
except Exception: pass
else:
forceThreadingError
print 'Read exon and junction mapping complete'
except Exception,e:
#print e
print 'Proceeding with single-processor version align...'
try: proc.close; proc.join; proc = None
except Exception: pass
try: pool.close(); pool.join(); pool = None
except Exception: pass
results=[] ### For single-thread compatible versions of Python
for search_chr in chromosomes:
result = p([],search_chr)
results.append(result)
results_organized=[]
for result_set in results:
if len(result_set[0])>0: ### Sometimes chromsomes are missing
biotypes = result_set[0]
results_organized.append(list(result_set[1:]))
pooled_results = [sum(value) for value in zip(*results_organized)] # combine these counts
pooled_results = [biotypes]+pooled_results
p.setCountsOverview(pooled_results) # store as retreivable objects
catFiles(countsFile,'Counts')
catFiles(junctionFile,'junctions')
catFiles(exonFile,'exons')
catFiles(junctionCompFile,'comps')
catFiles(novelJunctionAnnotations,'denovo')
if normalize_feature_exp == 'RPKM':
fastRPKMCalculate(countsFile)
rnaseq_end_time = time.time()
print '...RNA-seq import completed in',int(rnaseq_end_time-rnaseq_begin_time),'seconds\n'
biotypes = p.outputResults()
return biotypes
def alignCoordinatesToGeneExternal(species,coordinates_to_annotate):
chr_strand_gene_dbs,location_gene_db,chromosomes,gene_location_db = getChromosomeStrandCoordinates(species,'no')
read_aligned_to_gene=0
for (chr,strand) in coordinates_to_annotate:
if (chr,strand) in chr_strand_gene_dbs:
chr_gene_locations = chr_strand_gene_dbs[chr,strand]
chr_reads = coordinates_to_annotate[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,'no',read_aligned_to_gene)
### Gene objects will be updated
def catFiles(outFileDir,folder):
""" Concatenate all the chromosomal files but retain only the first header """
root_dir = export.findParentDir(outFileDir)+folder+'/'
dir_list = read_directory(root_dir)
firstFile=True
with open(filepath(outFileDir), 'w') as outfile:
for fname in dir_list:
chr_file = root_dir+fname
header=True
with open(filepath(chr_file)) as infile:
for line in infile:
if header:
header=False
if firstFile:
outfile.write(line)
firstFile=False
else: outfile.write(line)
export.deleteFolder(root_dir)
def error(msg, *args):
return multiprocessing.get_logger().error(msg, *args)
class AlignExonsAndJunctionsToEnsembl:
def setCountsOverview(self, overview):
self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count = overview
def getChromosomes(self):
chr_list=list()
for c in self.chromosomes:
### Sort chromosome by int number
ci=string.replace(c,'chr','')
try: ci = int(ci)
except Exception: pass
chr_list.append((ci,c))
chr_list.sort()
chr_list2=list()
for (i,c) in chr_list: chr_list2.append(c) ### sorted
return chr_list2
def countsFile(self):
return string.replace(self.expfile,'exp.','counts.')
def junctionFile(self):
junction_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_junctions.txt'
return junction_file
def exonFile(self):
exon_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_Ensembl_exons.txt'
return exon_file
def junctionCompFile(self):
junction_comp_file = self.root_dir+'AltDatabase/'+self.species+'/RNASeq/'+self.species + '_junction_comps_updated.txt'
return junction_comp_file
def novelJunctionAnnotations(self):
junction_annotation_file = self.root_dir+'AltDatabase/ensembl/'+self.species+'/'+self.species + '_alternative_junctions_de-novo.txt'
return junction_annotation_file
def AnalysisMode(self): return self.analysisMode
def __init__(self,species,exp_file_location_db,dataset_name,testImport):
self.species = species; self.dataset_name = dataset_name
self.testImport = testImport
fl = exp_file_location_db[dataset_name]
bed_dir=fl.BEDFileDir()
root_dir=fl.RootDir()
#self.stdout = fl.STDOUT()
try: platformType = fl.PlatformType()
except Exception: platformType = 'RNASeq'
try: analysisMode = fl.AnalysisMode()
except Exception: analysisMode = 'GUI'
### This occurs when run using the BAMtoBED pipeline in the GUI
if 'exp.' not in dataset_name:
dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name:
dataset_name+='.txt'
self.dataset_name = dataset_name
### Import experimentally identified junction splice-sites
normalize_feature_exp = fl.FeatureNormalization()
if platformType == 'RNASeq':
chr_status = checkBEDFileFormat(bed_dir,root_dir) ### If false, need to remove 'chr' from the search_chr
else:
chr_status = True
#self.fl = fl # Can not pass this object in pool or it breaks
self.platformType = platformType
self.analysisMode = analysisMode
self.root_dir = root_dir
self.normalize_feature_exp = normalize_feature_exp
self.bed_dir = bed_dir
self.chr_status = chr_status
self.exonBedBuildStatus = fl.ExonBedBuildStatus()
self.expfile = root_dir+'ExpressionInput/'+dataset_name
if testImport == 'yes':
print 'Chromosome annotation detected =',chr_status
#if self.exonBedBuildStatus == 'yes':
reformatExonFile(species,'exon',chr_status) ### exports BED format exons for exon expression extraction
"""
Strategies to reduce memory in RNASeq:
1) (done)Delete old AltDatabase-local version if it exists before starting
2) (done)Check to see if a file exists before writing it and if so append rather than create
3) (done)Get counts last and normalize last in for exons and junctions separately.
4) (done)Delete objects explicitly before importing any new data (define a new function that just does this).
5) (done)Get all chromosomes first then parse exon and junction coordinate data on a per known chromosome basis.
6) (done)Prior to deleting all junction/exon object info for each chromsome, save the coordinate(key)-to-annotation information for the read count export file."""
### Delete any existing annotation databases that currently exist (redundant with below)
deleteOldAnnotations(species,root_dir,dataset_name)
###Define variables to report once reads for all chromosomes have been aligned
#global self.known_count; global self.novel_junction_count; global self.one_found; global self.not_found; global self.both_found; global self.trans_splicing_reads
#global self.junctions_without_exon_gene_alignments; global self.exons_without_gene_alignment_count; global self.junction_simple_db; global self.chr_strand_gene_dbs
self.known_count=0; self.novel_junction_count=0; self.one_found=0; self.not_found=0; self.both_found=0; self.trans_splicing_reads=0
self.junctions_without_exon_gene_alignments=0; self.exons_without_gene_alignment_count=0; self.junction_simple_db={}
###Begin Chromosome specific read to exon alignments
self.chr_strand_gene_dbs,self.location_gene_db,chromosomes,self.gene_location_db = getChromosomeStrandCoordinates(species,testImport)
self.chromosomes = chromosomes
print "Processing exon/junction coordinates sequentially by chromosome"
print "Note: this step is time intensive (can be hours) and no print statements may post for a while"
def outputResults(self):
exportDatasetLinkedGenes(self.species,self.gene_location_db,self.root_dir) ### Include an entry for gene IDs to include constitutive expression for RPKM normalized data
chr_gene_locations=[]; self.location_gene_db=[]; self.chr_strand_gene_dbs=[]
#print 'user coordinates imported/processed'
#print 'Importing read counts from coordinate data...'
biotypes = self.biotypes_store
### Output summary statistics
if self.normalize_feature_exp != 'none':
print self.normalize_feature_exp, 'normalization complete'
if 'junction' in biotypes:
print 'Imported Junction Statistics:'
print ' ',self.known_count, 'junctions found in Ensembl/UCSC and',self.novel_junction_count,'are novel'
print ' ',self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes)'
print ' ',self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
if (float(self.known_count)*10)<float(self.novel_junction_count):
print '\nWARNING!!!!! Few junctions aligned to known exons. Ensure that the AltAnalyze Ensembl database\nversion matches the genome build aligned to!\n'
if 'exon' in biotypes:
print 'Imported Exon Statistics:'
print ' ',self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
print 'User databases and read counts written to:', self.root_dir[:-1]+'ExpressionInput'
### END CHROMOSOME SPECIFIC ANALYSES
if self.exonBedBuildStatus == 'yes':
bedfile = moveBAMtoBEDFile(self.species,self.dataset_name,self.root_dir)
print 'Exon BED file updated with novel exon predictions from junction file'
return bedfile; sys.exit()
clearObjectsFromMemory(self.junction_simple_db); self.junction_simple_db=[]
return biotypes
def test(self, search_chr):
print search_chr
def __call__(self, queue, search_chr):
try:
#sys.stdout = self.stdout
platformType = self.platformType
testImport = self.testImport
species = self.species
dataset_name = self.dataset_name
platformType = self.platformType
analysisMode = self.analysisMode
root_dir = self.root_dir
normalize_feature_exp = self.normalize_feature_exp
bed_dir = self.bed_dir
chr_status = self.chr_status
junction_annotations={}
if chr_status == False:
searchchr = string.replace(search_chr,'chr','')
else:
searchchr = search_chr
if platformType == 'RNASeq':
junction_db,biotypes,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,searchChr=searchchr,testImport=testImport)
else:
normalize_feature_exp = 'quantile'
junction_db,biotypes,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'no',search_chr=searchchr)
self.biotypes_store = biotypes
if len(junction_db)>0:
### Determine which kind of data is being imported, junctions, exons or both
unmapped_exon_db={}
if 'junction' in biotypes:
### Get all known junction splice-sites
ens_junction_coord_db = importExonAnnotations(species,'junction_coordinates',search_chr)
if testImport == 'yes':
print len(ens_junction_coord_db),'Ensembl/UCSC junctions imported'
### Identify known junctions sites found in the experimental dataset (perfect match)
novel_junction_db={}; novel_exon_db={}
for key in junction_db:
ji=junction_db[key]
if ji.BioType()=='junction':
if key in ens_junction_coord_db:
jd=ens_junction_coord_db[key]
ji.setExonAnnotations(jd)
self.known_count+=1
else:
novel_junction_db[key]=junction_db[key]; self.novel_junction_count+=1
#if 75953254 in key: print key; sys.exit()
else:
unmapped_exon_db[key]=junction_db[key]
ens_exon_db = importExonAnnotations(species,'exon',search_chr)
if 'junction' in biotypes:
if testImport == 'yes':
print self.known_count, 'junctions found in Ensembl/UCSC and',len(novel_junction_db),'are novel.'
### Separate each junction into a 5' and 3' splice site (exon1_coord_db and exon2_coord_db)
exon1_coord_db={}; exon2_coord_db={}
for (chr,exon1_stop,exon2_start) in ens_junction_coord_db:
jd = ens_junction_coord_db[(chr,exon1_stop,exon2_start)]
exon1_coord_db[chr,exon1_stop] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[0]
exon2_coord_db[chr,exon2_start] = jd.GeneID(),string.split(jd.ExonRegionIDs(),'-')[1]
clearObjectsFromMemory(ens_junction_coord_db); ens_junction_coord_db=[] ### Clear object from memory
### Get and re-format individual exon info
exon_region_db={}
#if 'exon' not in biotypes:
for gene in ens_exon_db:
for rd in ens_exon_db[gene]:
exon_region_db[gene,rd.ExonRegionIDs()]=rd
### Add the exon annotations from the known junctions to the exons to export dictionary
exons_to_export={}
for key in junction_db:
ji=junction_db[key]
if ji.ExonAnnotations() != None:
jd = ji.ExonAnnotations()
exon1, exon2 = string.split(jd.ExonRegionIDs(),'-')
key1 = jd.GeneID(),exon1; key2 = jd.GeneID(),exon2
exons_to_export[key1] = exon_region_db[key1]
exons_to_export[key2] = exon_region_db[key2]
### For novel experimental junctions, identify those with at least one matching known 5' or 3' site
exons_not_identified = {}; novel_exon_coordinates={}
for (chr,exon1_stop,exon2_start) in novel_junction_db:
ji = novel_junction_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
if (chr,exon1_stop) in exon1_coord_db and (chr,exon2_start) in exon2_coord_db:
### Assign exon annotations to junctions where both splice-sites are known in Ensembl/UCSC
### Store the exon objects, genes and regions (le is a tuple of gene and exon region ID)
### Do this later for the below un-assigned exons
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le); ji.setLeftExonRegionData(exon_region_db[le])
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re); ji.setRightExonRegionData(exon_region_db[re])
if le[0] != re[0]: ### Indicates Trans-splicing (e.g., chr7:52,677,568-52,711,750 mouse mm9)
ji.setTransSplicing(); #print exon1_stop,le,exon2_start,re,ji.Chr(),ji.Strand()
self.both_found+=1; #print 'five',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
else:
if (chr,exon1_stop) in exon1_coord_db: ### hence, exon1_stop is known, so report the coordinates of exon2 as novel
le=exon1_coord_db[(chr,exon1_stop)]; ji.setLeftExonAnnotations(le)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop() ### Employ this strategy to avoid duplicate exons with differing lengths (mainly an issue if analyzing only exons results)
ji.setNovel('side')
elif (chr,exon2_start) in exon2_coord_db: ### hence, exon2_start is known, so report the coordinates of exon1 as novel
re=exon2_coord_db[(chr,exon2_start)]; ji.setRightExonAnnotations(re) ### In very rare cases, a gene can be assigned here, even though the splice-site is on the opposite strand (not worthwhile filtering out)
self.one_found+=1; #print 'three',(chr,exon1_stop,exon2_start),exon1_coord_db[(chr,exon1_stop)]
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
ji.setNovel('side')
else:
self.not_found+=1; #if self.not_found < 10: print (chr,exon1_stop,exon2_start)
novel_exon_coordinates[ji.Chr(),exon1_stop] = ji,'right',ji.Exon1Start()
novel_exon_coordinates[ji.Chr(),exon2_start] = ji,'left',ji.Exon2Stop()
ji.setNovel('both')
### We examine reads where one splice-site aligns to a known but the other not, to determine if trans-splicing occurs
try: exons_not_identified[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_identified[chr,ji.Strand()] = [(coord,ji)]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,'junction') ### Includes known exons
"""
#print self.both_found, ' where both and', self.one_found, 'where one splice-site are known out of',self.both_found+self.one_found+self.not_found
#print 'Novel junctions where both splice-sites are known:',self.both_found
#print 'Novel junctions where one splice-site is known:',self.one_found
#print 'Novel junctions where the splice-sites are not known:',self.not_found
clearObjectsFromMemory(exon_region_db); exon_region_db=[] ### Clear memory of this object
read_aligned_to_gene=0
for (chr,strand) in exons_not_identified:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_identified[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
### Set GeneID for each coordinate object (primary and seconardary GeneIDs)
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'novel junctions aligned to Ensembl genes out of',self.one_found+self.not_found
clearObjectsFromMemory(exons_not_identified); exons_not_identified=[] ## Clear memory of this object
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping is to the 5'UTR of a gene for the left splice-site (novel alternative promoter)
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
if ji.GeneID() != None:
geneid = ji.GeneID()
proceed = 'no'
if ji.SpliceSitesFound() == None: proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'left': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
elif ji.SpliceSitesFound() == 'right': proceed = 'yes'; coordinates = [exon1_stop,exon2_start]
if proceed == 'yes':
for coordinate in coordinates:
if ji.TransSplicing() == 'yes':
#print ji.Chr(),ji.GeneID(), ji.SecondaryGeneID(), ji.Exon1Stop(), ji.Exon2Start()
self.trans_splicing_reads+=1
if ji.checkExonPosition(coordinate) == 'right': geneid = ji.SecondaryGeneID()
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),'novel')
try: novel_exon_db[geneid].append(exon_data)
except KeyError: novel_exon_db[geneid] = [exon_data]
else:
### write these out
self.junctions_without_exon_gene_alignments+=1
### Remove redundant exon entries and store objects
for key in novel_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(novel_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
novel_exon_db[key] = exon_data_objects
#print self.trans_splicing_reads,'trans-splicing junctions found (two aligning Ensembl genes).'
#print self.junctions_without_exon_gene_alignments, 'junctions where neither splice-site aligned to a gene'
#if 'X' in search_chr: print len(ens_exon_db),len(ens_exon_db['ENSMUSG00000044424'])
alignReadsToExons(novel_exon_db,ens_exon_db,testImport=testImport)
### Link exon annotations up with novel junctions
junction_region_db,exons_to_export = annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export)
### Add the exon region data from known Ensembl/UCSC matched junctions to junction_region_db for recipricol junction analysis
for key in junction_db:
ji=junction_db[key]; jd = ji.ExonAnnotations()
try:
uid = jd.GeneID()+':'+jd.ExonRegionIDs(); ji.setUniqueID(uid)
try: junction_region_db[jd.GeneID()].append((formatID(uid),jd.ExonRegionIDs()))
except KeyError: junction_region_db[jd.GeneID()] = [(formatID(uid),jd.ExonRegionIDs())]
except AttributeError: null=[] ### Occurs since not all entries in the dictionary are perfect junction matches
try: novel_exon_coordinates = collapseNoveExonBoundaries(novel_exon_coordinates,root_dir+dataset_name) ### Joins inferred novel exon-IDs (5' and 3' splice sites) from adjacent and close junction predictions
except Exception: pass ### No errors encountered before
#if self.exonBedBuildStatus == 'yes':
### Append to the exported BED format exon coordinate file
bedfile = exportNovelExonToBedCoordinates(species,novel_exon_coordinates,chr_status,searchChr=searchchr)
### Identify reciprocol junctions and retrieve splice-event annotations for exons and inclusion junctions
junction_annotations,critical_exon_annotations = JunctionArray.inferJunctionComps(species,('RNASeq',junction_region_db,root_dir),searchChr=searchchr)
clearObjectsFromMemory(junction_region_db); junction_region_db=[]
### Reformat these dictionaries to combine annotations from multiple reciprocol junctions
junction_annotations = combineExonAnnotations(junction_annotations)
critical_exon_annotations = combineExonAnnotations(critical_exon_annotations)
if 'exon' in biotypes:
if testImport == 'yes':
print len(unmapped_exon_db),'exon genomic locations imported.'
### Create a new dictionary keyed by chromosome and strand
exons_not_aligned={}
for (chr,exon1_stop,exon2_start) in unmapped_exon_db:
ji = unmapped_exon_db[(chr,exon1_stop,exon2_start)]
coord = [exon1_stop,exon2_start]; coord.sort()
try: exons_not_aligned[chr,ji.Strand()].append((coord,ji))
except KeyError: exons_not_aligned[chr,ji.Strand()] = [(coord,ji)]
read_aligned_to_gene=0
for (chr,strand) in exons_not_aligned:
if (chr,strand) in self.chr_strand_gene_dbs:
chr_gene_locations = self.chr_strand_gene_dbs[chr,strand]
chr_reads = exons_not_aligned[chr,strand]
chr_gene_locations.sort(); chr_reads.sort()
read_aligned_to_gene=geneAlign(chr,chr_gene_locations,self.location_gene_db,chr_reads,'no',read_aligned_to_gene)
#print read_aligned_to_gene, 'exons aligned to Ensembl genes out of',self.one_found+self.not_found
align_exon_db={}; exons_without_gene_alignments={}; multigene_exon=0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
if ji.GeneID() == None:
try:
if ji.SecondaryGeneID() != None:
### Occurs if mapping outside known exon boundaries for one side of the exon
ji.setGeneID(ji.SecondaryGeneID()); ji.setSecondaryGeneID(''); #print key, ji.GeneID(), ji.Strand(), ji.SecondaryGeneID()
except Exception: null=[]
else:
if 'ENS' in ji.JunctionID():
if ji.GeneID() not in ji.JunctionID(): ### Hence, there were probably two overlapping Ensembl genes and the wrong was assigned based on the initial annotations
original_geneid = string.split(ji.JunctionID(),':')[0]
if original_geneid in ens_exon_db: ji.setGeneID(original_geneid) #check if in ens_exon_db (since chromosome specific)
if ji.GeneID() != None:
geneid = ji.GeneID()
coordinates = [exon1_stop,exon2_start]
for coordinate in coordinates:
if ji.TransSplicing() != 'yes': ### This shouldn't occur for exons
exon_data = (coordinate,ji.Chr()+'-'+str(coordinate),'novel')
try: align_exon_db[geneid].append(exon_data)
except KeyError: align_exon_db[geneid] = [exon_data]
else:
multigene_exon+=1 ### Shouldn't occur due to a fix in the gene-alignment method which will find the correct gene on the 2nd interation
else: exons_without_gene_alignments[key]=ji; self.exons_without_gene_alignment_count+=1
### Remove redundant exon entries and store objects (this step may be unnecessary)
for key in align_exon_db:
exon_data_objects=[]
exon_data_list = unique.unique(align_exon_db[key])
exon_data_list.sort()
for e in exon_data_list:
ed = ExonInfo(e[0],e[1],e[2])
exon_data_objects.append(ed)
align_exon_db[key] = exon_data_objects
#print self.exons_without_gene_alignment_count, 'exons where neither aligned to a gene'
#if self.exons_without_gene_alignment_count>3000: print 'NOTE: Poor mapping of these exons may be due to an older build of\nEnsembl than the current version. Update BAMtoBED mappings to correct.'
begin_time = time.time()
alignReadsToExons(align_exon_db,ens_exon_db)
end_time = time.time()
if testImport == 'yes':
print 'Exon sequences aligned to exon regions in',int(end_time-begin_time),'seconds'
### Combine the start and end region alignments into a single exon annotation entry
combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db)
clearObjectsFromMemory(unmapped_exon_db); clearObjectsFromMemory(align_exon_db); clearObjectsFromMemory(novel_exon_db)
unmapped_exon_db=[]; align_exon_db=[]; novel_exon_db=[]
"""
if fl.ExonBedBuildStatus() == 'no':
exportNovelJunctions(species,exons_without_gene_alignments,condition_count_db,root_dir,dataset_name,'exon') ### Includes known exons
"""
clearObjectsFromMemory(exons_without_gene_alignments); exons_without_gene_alignments=[]
### Export both exon and junction annotations
if 'junction' in biotypes:
### Export the novel user exon annotations
exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Export the novel user exon-junction annotations (original junction_db objects updated by above processing)
exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=testImport,searchChr=searchchr)
### Clear memory once results are exported (don't want to delete actively used objects)
if 'junction' in biotypes:
clearObjectsFromMemory(exons_to_export); clearObjectsFromMemory(critical_exon_annotations)
clearObjectsFromMemory(novel_junction_db); novel_junction_db=[]
clearObjectsFromMemory(novel_exon_coordinates); novel_exon_coordinates=[]
exons_to_export=[]; critical_exon_annotations=[]
clearObjectsFromMemory(exon1_coord_db); clearObjectsFromMemory(exon2_coord_db)
exon1_coord_db=[]; exon2_coord_db=[]
if 'exon' in biotypes:
clearObjectsFromMemory(exons_not_aligned); exons_not_aligned=[]
clearObjectsFromMemory(ens_exon_db); ens_exon_db=[]
### Add chromsome specific junction_db data to a simple whole genome dictionary
for key in junction_db:
ji = junction_db[key]
if ji.GeneID()!=None and ji.UniqueID()!=None: self.junction_simple_db[key]=ji.UniqueID()
#returnLargeGlobalVars()
clearObjectsFromMemory(junction_db); clearObjectsFromMemory(junction_annotations)
junction_db=[]; junction_annotations=[]; chr_reads=[]
for biotype in biotypes:
### Import Read Counts (do this last to conserve memory)
if platformType == 'RNASeq':
condition_count_db,exon_len_db,biotypes2,algorithms = importBEDFile(bed_dir,root_dir,species,normalize_feature_exp,getReads=True,searchChr=searchchr,getBiotype=biotype,testImport=testImport,filteredJunctions=self.junction_simple_db)
else:
condition_count_db,exon_len_db,biotypes2,algorithms = importExpressionMatrix(bed_dir,root_dir,species,fl,'yes',getBiotype=biotype)
###First export original counts, rather than quantile normalized or RPKM
self.exportJunctionCounts(species,self.junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,'counts',searchChr=searchchr)
clearObjectsFromMemory(condition_count_db); clearObjectsFromMemory(exon_len_db); condition_count_db=[]; exon_len_db=[]
if analysisMode == 'commandline':
print 'finished parsing data for chromosome:',search_chr ### Unix platforms are not displaying the progress in real-time
else:
pass #print "*",
try: queue.put([self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count])
except Exception:
### If queue is not a multiprocessing object
queue = [self.biotypes_store, self.known_count, self.novel_junction_count, self.trans_splicing_reads, self.junctions_without_exon_gene_alignments, self.exons_without_gene_alignment_count]
return queue
except Exception:
print traceback.format_exc()
error(traceback.format_exc())
multiprocessing.log_to_stderr().setLevel(logging.DEBUG)
raise
def exportJunctionCounts(self,species,junction_simple_db,exon_len_db,condition_count_db,root_dir,dataset_name,biotype,count_type,searchChr=None):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
export_path = root_dir+'ExpressionInput/'+dataset_name
if count_type == 'counts':
export_path = string.replace(export_path,'exp.','counts.') ### separately export counts
if searchChr !=None:
export_path = string.replace(export_path,'ExpressionInput','ExpressionInput/Counts')
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
self.countsFile = export_path
if self.testImport == 'yes':
print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['AltAnalyze_ID']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in self.junction_simple_db:
chr,exon1_stop,exon2_start = key
if biotype == 'junction':
coordinates = chr+':'+str(exon1_stop)+'-'+str(exon2_start)
elif biotype == 'exon':
coordinates = chr+':'+str(exon1_stop-1)+'-'+str(exon2_start+1)
try:
null=exon_len_db[key]
if count_type == 'counts': values = [self.junction_simple_db[key]+'='+coordinates]
else: values = [self.junction_simple_db[key]]
for condition in condition_count_db: ###Memory crash here
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
except Exception: null=[]
export_data.close()
def countsDir(self):
return self.countsFile
def calculateRPKMsFromGeneCounts(filename,species,AdjustExpression):
""" Manual way of calculating gene RPKMs from gene counts only """
gene_lengths = getGeneExonLengths(species)
fastRPKMCalculate(filename,GeneLengths=gene_lengths,AdjustExpression=AdjustExpression)
def fastRPKMCalculate(counts_file,GeneLengths=None,AdjustExpression=True):
export_path = string.replace(counts_file,'counts.','exp.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file); header=True
exon_sum_array=[]; junction_sum_array=[]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
exon_sum_array=[0]*len(samples)
junction_sum_array=[0]*len(samples)
else:
try: values = map(float,t[1:])
except Exception:
print traceback.format_exc()
print t
badCountsLine
### get the total reads/sample
if '-' in string.split(t[0],'=')[0]:
junction_sum_array = [sum(value) for value in zip(*[junction_sum_array,values])]
else:
exon_sum_array = [sum(value) for value in zip(*[exon_sum_array,values])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides warnings associated with Scipy for n=1 sample comparisons
jatr=Average(junction_sum_array) # Average of the total maped reads
eatr=Average(exon_sum_array) # Average of the total maped reads
if AdjustExpression:
offset = 1
else:
offset = 0
header=True
c=math.pow(10.0,9.0)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
export_data.write(line) ### Write header
header=False
else:
try:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
l=abs(int(coordinates[1])-int(coordinates[0])) ### read-length
except Exception: ### Manual way of calculating gene RPKMs from gene counts only
exon_id = t[0]
try: l = GeneLengths[exon_id]
except Exception: continue #Occurs when Ensembl genes supplied from an external analysis
try: read_counts = map(lambda x: int(x)+offset, t[1:])
except Exception: read_counts = map(lambda x: int(float(x))+offset, t[1:])
if '-' in exon_id:
count_stats = zip(read_counts,junction_sum_array)
atr = jatr
l=60
else:
count_stats = zip(read_counts,exon_sum_array)
atr = eatr
values=[]
#rpkm = map(lambda (r,t): c*(r/(t*l)), count_stats) ### Efficent way to convert to rpkm, but doesn't work for 0 counts
for (r,t) in count_stats:
if r == 1: ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
t = atr
try:
rpkm = str(c*(r/(t*l)))
#print c,r,t,l,exon_id,rpkm;sys.exit()
values.append(rpkm)
except Exception,e:
print e
print t[0]
print 'Error Encountered... Exon or Junction of zero length encoutered... RPKM failed... Exiting AltAnalyze.'
print 'This error may be due to inconsistent file naming. If both exon and junction sample data is present, make sure they are named propperly.'
print 'For example: cancer1__exon.bed, cancer1__junction.bed (double underscore required to match these samples up)!'
print [r,t,l];k=1; forceError
values = string.join([exon_id]+values,'\t')+'\n'
export_data.write(values)
export_data.close()
def mergeCountFiles(counts_file1,counts_file2):
### Used internally to merge count files that are very large and too time-consuming to recreate (regenerate them)
export_path = string.replace(counts_file2,'counts.','temp-counts.')
export_data = export.ExportFile(export_path) ### Write this new file
fn=filepath(counts_file1); header=True
count_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
else:
try: value = t[si]
except Exception: print t; sys.exit()
### get the total reads/sample
count_db[t[0]] = value
fn=filepath(counts_file2); header=True
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
si = samples.index('H9.102.2.5.bed')+1
export_data.write(line)
else:
try: t[si] = count_db[t[0]]
except Exception: pass ### keep the current value
export_data.write(string.join(t,'\t')+'\n')
export_data.close()
def getGeneExonLengths(species):
gene_lengths={}
filename = 'AltDatabase/'+species+'/RNASeq/'+species+'_Ensembl_exons.txt'
fn=filepath(filename)
firstLine=True
for line in open(fn,'rU').xreadlines():
line = line.rstrip('\n')
if firstLine:
firstLine=False
else:
t = string.split(line,'\t')
geneID = t[2]; start = int(t[6]); end = int(t[7]); exonID = t[1]
if 'E' in exonID:
try: gene_lengths[geneID]+=abs(end-start)
except Exception: gene_lengths[geneID]=abs(end-start)
return gene_lengths
def importRawCountData(filename,expressed_gene_exon_db,excludeLowExp=True):
""" Identifies exons or junctions to evaluate gene-level expression. This function, as it is currently written:
1) examines the RPKM and original read counts associated with all exons
2) removes exons/junctions that do not meet their respective RPKM AND read count cutoffs
3) returns ONLY those exons and genes deemed expressed, whether constitutive selected or all exons
"""
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import RPKM normalized expression values
fn=filepath(filename); x=0; rpkm_dbase={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]
max_count=max(map(float,t[1:]))
if max_count>=exon_rpkm_threshold or excludeLowExp==False: rpkm_dbase[exon_id]=[] ### Only retain exons/junctions meeting the RPKM threshold
### Import non-normalized original counts
counts_filename = string.replace(filename,'exp.','counts.')
fn=filepath(counts_filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id,coordinates = string.split(t[0],'=')
coordinates = string.split(coordinates,':')[1]
coordinates = string.split(coordinates,'-')
length=abs(int(coordinates[1])-int(coordinates[0]))
max_count=max(map(float,t[1:])); proceed = 'no'
if '-' in exon_id:
length = 60.0
if max_count>=junction_exp_threshold or excludeLowExp==False:
### Only considered when exon data is not present in the analysis
proceed = 'yes'
elif max_count>=exon_exp_threshold or excludeLowExp==False: proceed = 'yes'
if proceed == 'yes' and exon_id in rpkm_dbase: ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import:### Forces an error if not in the steady-state pre-determined set (CS or all-exons) - INCLUDE HERE TO FILTER ALL FEATURES
exp_dbase[exon_id] = t[1:],length ### Include sequence length for normalization
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
try:null=array_count
except Exception:
print 'No exons or junctions considered expressed (based user thresholds). Exiting analysis.'; force_exit
return exp_dbase, all_exp_features, array_count
def importNormalizedCountData(filename,expressed_gene_exon_db):
### Get expression values for exon/junctions to analyze
seq_ids_to_import={}
for gene in expressed_gene_exon_db:
for exonid in expressed_gene_exon_db[gene]: seq_ids_to_import[exonid]=[]
### Define thresholds
exon_exp_threshold = UserOptions.ExonExpThreshold()
junction_exp_threshold = UserOptions.JunctionExpThreshold()
exon_rpkm_threshold = UserOptions.ExonRPKMThreshold()
gene_rpkm_threshold = UserOptions.RPKMThreshold()
gene_exp_threshold = UserOptions.GeneExpThreshold()
### Import non-normalized original counts
fn=filepath(filename); x=0; exp_dbase={}
all_exp_features={} ### Don't filter for only gene-expression reporting
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: array_names = t[1:]; x=1
else:
exon_id=t[0]; proceed = 'no'
max_count=max(map(float,t[1:]))
if '-' in exon_id:
if max_count>=junction_exp_threshold: proceed = 'yes'
elif max_count>=exon_exp_threshold: proceed = 'yes'
if proceed == 'yes': ### Ensures that the maximum sample (not group) user defined count threshold is achieved at the exon or junction-level
all_exp_features[exon_id]=None
if exon_id in seq_ids_to_import: ### If a "constitutive" or exon-level feature (filter missing prior to 2.0.8 - bug)
exp_dbase[exon_id] = t[1:],0 ### Add the zero just to comply with the raw count input format (indicates exon length)
for exon in exp_dbase: array_count = len(exp_dbase[exon][0]); break
return exp_dbase, all_exp_features, array_count
def obtainGeneCounts(expressed_gene_exon_db,species,exp_dbase,array_count,normalize_feature_exp,excludeLowExp=True):
###Calculate avg expression for each sample for each exon (using constitutive or all exon values)
if excludeLowExp == False:
gene_lengths = getGeneExonLengths(species)
steady_state_db={}
for gene in expressed_gene_exon_db:
x = 0; gene_sum=0
exon_list = expressed_gene_exon_db[gene]
while x < array_count:
exp_list=[]; len_list=[]
for exon in exon_list:
try:
exp_val = exp_dbase[exon][0][x]
if normalize_feature_exp == 'RPKM':
### Decided to include all exons, expressed or not to prevent including lowly expressed exons that are long, that can bias the expression call
#if float(exp_val) != 0: ### Here, we use the original raw count data, whereas above is the adjusted quantile or raw count data
exp_list.append(exp_val); len_list.append(exp_dbase[exon][1]) ### This is for RNASeq -> don't include undetected exons - made in v.204
else: exp_list.append(exp_val) #elif float(exp_val) != 1:
except KeyError: null =[] ###occurs if the expression exon list is missing some of these exons
try:
if len(exp_list)==0:
for exon in exon_list:
try:
exp_list.append(exp_dbase[exon][0][x]); len_list.append(exp_dbase[exon][1])
#kill
except KeyError: null=[] ### Gene entries will cause this error, since they are in the database but not in the count file
if normalize_feature_exp == 'RPKM':
sum_const_exp=sum(map(float,exp_list)); gene_sum+=sum_const_exp
sum_length=sum(len_list) ### can have different lengths for each sample, since only expressed exons are considered
if excludeLowExp == False:
sum_length = gene_lengths[gene] ### Uses the all annotated exon lengths
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append((sum_const_exp,sum_length))
except KeyError: steady_state_db[gene] = [(sum_const_exp,sum_length)]
else:
avg_const_exp=Average(exp_list)
if avg_const_exp != 1: gene_sum+=avg_const_exp
### Add only one avg-expression value for each array, this loop
try: steady_state_db[gene].append(avg_const_exp)
except KeyError: steady_state_db[gene] = [avg_const_exp]
except Exception: null=[] ### Occurs when processing a truncated dataset (for testing usually) - no values for the gene should be included
x += 1
if gene_sum==0:
try:
del steady_state_db[gene] ### Hence, no genes showed evidence of expression (most critical for RNA-Seq)
except Exception: null=[] ### Error occurs when a gene is added to the database from self.location_gene_db, but is not expressed
return steady_state_db
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var])>1:
print var, len(globals()[var])
except Exception: null=[]
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def AppendOrWrite(export_path):
export_path = filepath(export_path)
status = verifyFile(export_path)
if status == 'not found':
export_data = export.ExportFile(export_path) ### Write this new file
else:
export_data = open(export_path,'a') ### Appends to existing file
return export_data, status
def quantileNormalizationSimple(condition_count_db):
### Basic quantile normalization method (average ranked expression values)
### Get all junction or exon entries
key_db={}
for condition in condition_count_db:
count_db = condition_count_db[condition]
for key in count_db: key_db[key]=[]
condition_unnormalized_db={}
for key in key_db:
### Only look at the specific biotype of interest for each normalization
for condition in condition_count_db:
count_db = condition_count_db[condition]
try:
count = float(count_db[key])+1 ###This adjustment allows us to obtain more realist folds where 0 is compared and use log2
count_db[key] = [] ### Set equal to null as a temporary measure to save memory
except KeyError: count = 1.00 ###Was zero, but needs to be one for more realistic log2 fold calculations
### store the minimal information to recover the original count and ID data prior to quantile normalization
try: condition_unnormalized_db[condition].append([count,key])
except Exception: condition_unnormalized_db[condition]=[[count,key]]
quantile_normalize_db={}; key_db={}
for condition in condition_unnormalized_db:
condition_unnormalized_db[condition].sort() ### Sort lists by count number
rank=0 ### thus, the ID is the rank order of counts
for (count,key) in condition_unnormalized_db[condition]:
try: quantile_normalize_db[rank].append(count)
except KeyError: quantile_normalize_db[rank] = [count]
rank+=1
### Get the average value for each index
for rank in quantile_normalize_db:
quantile_normalize_db[rank] = Average(quantile_normalize_db[rank])
for condition in condition_unnormalized_db:
rank=0
count_db = condition_count_db[condition]
for (count,key) in condition_unnormalized_db[condition]:
avg_count = quantile_normalize_db[rank]
rank+=1
count_db[key] = str(avg_count) ### re-set this value to the normalized value
try:
clearObjectsFromMemory(condition_unnormalized_db); condition_unnormalized_db = []
clearObjectsFromMemory(quantile_normalize_db); quantile_normalize_db = []
except Exception: None
return condition_count_db
def combineExonAnnotations(db):
for i in db:
list1=[]; list2=[]
for (junctions,splice_event) in db[i]:
list1.append(junctions); list2.append(splice_event)
junctions = EnsemblImport.combineAnnotations(list1)
splice_event = EnsemblImport.combineAnnotations(list2)
db[i] = junctions,splice_event
return db
def formatID(id):
### JunctionArray methods handle IDs with ":" different than those that lack this
return string.replace(id,':','@')
def getChromosomeStrandCoordinates(species,testImport):
### For novel junctions with no known-splice site, map to genes
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
chr_strand_gene_db = {}; location_gene_db = {}; chromosome_names={}; all_chromosomes={}
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
location_gene_db[chr,int(start),int(end)] = gene,strand
try: chr_strand_gene_db[chr,strand].append((int(start),int(end)))
except KeyError: chr_strand_gene_db[chr,strand] = [(int(start),int(end))]
if testImport == 'yes':
if chr=='chr1': chromosome_names[chr]=[]
if chr=='chr19': chromosome_names[chr]=[] ### Gene rich chromosome
if chr=='chrMT': chromosome_names[chr]=[] ### Gene rich chromosome
elif len(chr)<7: chromosome_names[chr]=[]
all_chromosomes[chr]=[]
### Some organisms aren't organized into classical chromosomes (why I don't know)
if len(chromosome_names)<10 and len(all_chromosomes)>9 and testImport=='no': chromosome_names = all_chromosomes
return chr_strand_gene_db,location_gene_db,chromosome_names,gene_location_db
def exportDatasetLinkedExons(species,exons_to_export,critical_exon_annotations,root_dir,testImport=None,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_exons.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/exons/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
### We stored these in a dictionary to make sure each exon is written only once and so we can organize by gene
exons_to_export_list=[]
for key in exons_to_export:
ed = exons_to_export[key]
exons_to_export_list.append((key,ed))
exons_to_export_list.sort()
for (key,ed) in exons_to_export_list:
constitutive_call = 'no'; ens_constitutive_status = '0'
try:
red = ed.ExonRegionData()
exon_region = ed.ExonRegionID()
start = str(ed.ReadStart()); stop = start
if '-' not in exon_region and '_' not in exon_region: annotation = 'known'
else: annotation = 'novel'
except Exception:
red = ed ### For annotated exons, no difference in the annotations
exon_region = ed.ExonRegionIDs()
start = str(red.ExonStart()); stop = str(red.ExonStop())
constitutive_call = red.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
annotation = 'known'
uid = red.GeneID()+':'+exon_region
splice_events = red.AssociatedSplicingEvent(); splice_junctions = red.AssociatedSplicingJunctions()
if uid in critical_exon_annotations:
splice_junctions,splice_events = critical_exon_annotations[uid]
export_values = [uid, exon_region, red.GeneID(), '', red.Chr(), red.Strand(), start, stop, annotation, constitutive_call, red.ExonID(), ens_constitutive_status]
export_values+= [exon_region, str(red.ExonStart()), str(red.ExonStop()), splice_events, splice_junctions]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportNovelJunctions(species,novel_junction_db,condition_count_db,root_dir,dataset_name,biotype):
if 'exp.' not in dataset_name: dataset_name = 'exp.'+dataset_name
if '.txt' not in dataset_name: dataset_name+='.txt'
dataset_name = string.replace(dataset_name,'exp','novel')
dataset_name = string.replace(dataset_name,'.txt','.'+biotype+'.txt')
export_path = root_dir+'ExpressionInput/'+dataset_name
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
title = ['chr','strand','start','stop','start Ensembl','end Ensembl','known start', 'known end']
for condition in condition_count_db: title.append(condition)
export_data.write(string.join(title,'\t')+'\n')
for key in novel_junction_db:
ji = novel_junction_db[key]
try: gene1 = str(ji.GeneID())
except Exception: gene1=''
try: gene2 = str(ji.SecondaryGeneID())
except Exception: gene2 = 'None'
try: le = str(ji.LeftExonAnnotations())
except Exception: le = ''
try: re = str(ji.RightExonAnnotations())
except Exception: re = ''
if biotype == 'junction':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start())]
elif biotype == 'exon':
values = [ji.Chr(), ji.Strand(), str(ji.Exon1Stop()-1), str(ji.Exon2Start()+1)] ### correct for initial adjustment
values += [gene1,gene2,le,re]
for condition in condition_count_db:
count_db = condition_count_db[condition]
try: read_count = count_db[key]
except KeyError: read_count = '0'
values.append(read_count)
export_data.write(string.join(values,'\t')+'\n')
export_data.close()
def exportDatasetLinkedGenes(species,gene_location_db,root_dir):
"""Include an entry for gene IDs to include constitutive expression for RPKM normalized data"""
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
export_data,status = AppendOrWrite(export_path)
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
export_values = [gene, 'E0.1',gene, '', chr, strand, str(start), str(end), 'known', 'yes', gene, '1']
export_values+= ['E0.1', str(start), str(end), '', '']
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def exportDatasetLinkedJunctions(species,junction_db,junction_annotations,root_dir,testImport=False,searchChr=None):
export_path = root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt'
if searchChr != None:
export_path = string.replace(export_path,'RNASeq/'+species,'RNASeq/junctions/'+species)
export_path = string.replace(export_path,'.txt','.'+searchChr+'.txt')
if testImport == 'yes': print 'Writing',export_path
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
export_title = ['AltAnalyzeID','exon_id','ensembl_gene_id','transcript_cluster_id','chromosome','strand','probeset_start','probeset_stop']
export_title +=['class','constitutive_probeset','ens_exon_ids','ens_constitutive_status','exon_region','exon-region-start(s)','exon-region-stop(s)','splice_events','splice_junctions']
export_title = string.join(export_title,'\t')+'\n'; export_data.write(export_title)
for key in junction_db:
(chr,exon1_stop,exon2_start) = key
ji=junction_db[key]
#print key, ji.UniqueID(), ji.GeneID()
if ji.GeneID()!=None and ji.UniqueID()!=None:
if ji.UniqueID() in junction_annotations: ### Obtained from JunctionArray.inferJunctionComps()
junctions,splice_events = junction_annotations[ji.UniqueID()]
if ji.TransSplicing() == 'yes':
if len(splice_events)>0: splice_events+= '|trans-splicing'
else: splice_events = 'trans-splicing'
ji.setAssociatedSplicingEvent(splice_events); ji.setAssociatedSplicingJunctions(junctions)
elif ji.TransSplicing() == 'yes':
ji.setAssociatedSplicingEvent('trans-splicing')
try:
try: constitutive_call = ji.Constitutive()
except Exception:
jd = ji.ExonAnnotations()
constitutive_call = jd.Constitutive()
if constitutive_call == 'yes': ens_constitutive_status = '1'
else: ens_constitutive_status = '0'
annotation = 'known'
except Exception:
constitutive_call = 'no'; ens_constitutive_status = '0'; annotation = 'novel'
if 'I' in ji.ExonRegionID() or 'U' in ji.ExonRegionID() or '_' in ji.ExonRegionID():
annotation = 'novel' ### Not previously indicated well (as I remember) for exon-level reads - so do this
export_values = [ji.UniqueID(), ji.ExonRegionID(), ji.GeneID(), '', ji.Chr(), ji.Strand(), str(ji.Exon1Stop()), str(ji.Exon2Start()), annotation, constitutive_call, ji.ExonID(), ens_constitutive_status]
export_values+= [ji.ExonRegionID(), str(ji.Exon1Stop()), str(ji.Exon2Start()), ji.AssociatedSplicingEvent(), ji.AssociatedSplicingJunctions()]
export_values = string.join(export_values,'\t')+'\n'; export_data.write(export_values)
export_data.close()
def combineDetectedExons(unmapped_exon_db,align_exon_db,novel_exon_db):
### Used for exon alignments (both start position and end position aligned to exon/intron/UTR regions)
### Reformat align_exon_db to easily lookup exon data
aligned_exon_lookup_db={}
for gene in align_exon_db:
for ed in align_exon_db[gene]:
aligned_exon_lookup_db[gene,ed.ReadStart()]=ed
#if gene == 'ENSMUSG00000064181': print ed.ReadStart(),ed.ExonRegionID()
### Reformat novel_exon_db to easily lookup exon data - created from junction analysis (rename above exons to match novel junctions)
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()+1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()+1]=ed
except Exception: null=[]
try:
### Only store exons that are found in the novel exon file
null = aligned_exon_lookup_db[gene,ed.ReadStart()-1] ### offset introduced on import
novel_exon_lookup_db[gene,ed.ReadStart()-1]=ed
except Exception: null=[]
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
x = 0
for key in unmapped_exon_db:
(chr,exon1_stop,exon2_start) = key
ji=unmapped_exon_db[key]
proceed = 'no'
if ji.GeneID() != None:
e1 = (ji.GeneID(),exon1_stop)
e2 = (ji.GeneID(),exon2_start)
exon_info=[]; override_annotation = None; found=[]
try: null = aligned_exon_lookup_db[e1]; found.append(1)
except Exception: null=[]
try: null = aligned_exon_lookup_db[e2]; found.append(2)
except Exception: null=[]
try: null = novel_exon_lookup_db[e1]; override_annotation = 1
except Exception:
try: null = novel_exon_lookup_db[e2]; override_annotation = 2
except Exception: null=[]
if len(found)>0:
### Below is not the simplist way to do this, but should be the fastest
if 1 in found: exon_info.append(aligned_exon_lookup_db[e1])
if 2 in found: exon_info.append(aligned_exon_lookup_db[e2])
if len(exon_info) == 2: ed1,ed2 = exon_info
else:
ed1 = exon_info[0]; ed2 = ed1; x+=1 ### if only one splice site aligned to a gene region (shouldn't occur)
if x == 2: null=[]; #print 'SOME EXONS FOUND WITH ONLY ONE ALIGNING POSITION...',key,ji.GeneID(),ed1.ExonRegionID(),e1,e2
try: red1 = ed1.ExonRegionData(); red2 = ed2.ExonRegionData()
except Exception:
"""
print [ji.GeneID(), ji.Chr(), key]
print e1, e2
try: print ed1.ExonRegionData()
except Exception: 'ed1 failed'
try: print ed2.ExonRegionData()
except Exception: 'ed2 failed'
"""
continue
region1 = ed1.ExonRegionID(); region2 = ed2.ExonRegionID()
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand()
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if red1.Constitutive() == 'yes' or red2.Constitutive() == 'yes': constitutive_call = 'yes'
else: constitutive_call = 'no'
ji.setConstitutive(constitutive_call)
report_both_regions = 'no'
try:
### If the annotations are from a BED file produced by AltAnalyze, novel alternative splice sites may be present
### if the below variable is not created, then this exon may over-ride the annotated exon region (e.g., E15.1 is over-written by E15.1_1234;E15.1_1256)
if 'ENS' in ji.JunctionID() and ':' not in ji.JunctionID(): report_both_regions = 'yes'
except Exception: null=[]
try:
### If the annotations are from a BED file produced by AltAnalyze, it is possible for to a known exon to share a splice-site coordinate
### with a novel junction exon. This will cause both to have the same override_annotation. Prevent this with the below 2nd override
if 'ENS' in ji.JunctionID() and ':' in ji.JunctionID(): override_annotation = None
except Exception: null=[]
if override_annotation != None:
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
if override_annotation == 1: region_id = region1 ### This forces a TopHat exon to be named for the splice-site position
else: region_id = region2
else:
if report_both_regions == 'no':
### Don't include specific start and end coordinates if inside a known exon
if ed1.AlignmentRegion() == 'exon': region1 = string.split(region1,'_')[0]
if ed2.AlignmentRegion() == 'exon': region2 = string.split(region2,'_')[0]
if ed1.AlignmentRegion() == 'full-intron' and ed2.AlignmentRegion() == 'full-intron':
region1 = string.split(region1,'_')[0]; region2 = string.split(region2,'_')[0]
### Below adjustmements need to compenstate for adjustments made upon import
if '_' in region1: region1 = string.split(region1,'_')[0]+'_'+str(int(string.split(region1,'_')[-1])-1)
if '_' in region2: region2 = string.split(region2,'_')[0]+'_'+str(int(string.split(region2,'_')[-1])+1)
ji.setExon1Stop(ji.Exon1Stop()-1); ji.setExon2Start(ji.Exon2Start()+1)
if override_annotation != None: null=[] ### It is already assigned above
elif region1 == region2: region_id = region1
elif ji.Strand() == '+': region_id = region1+';'+region2
else: region_id = region2+';'+region1 ### start and stop or genomically assigned
uid = ji.GeneID()+':'+region_id
#try: exon_region_db[ji.GeneID()].append((formatID(uid),region_id))
#except KeyError: exon_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid) ### hgu133
### Export format for new exons to add to the existing critical exon database (those in exon_region_db are combined with analyzed junctions)
#exons_to_export[ji.GeneID(),region_id] = ji
else:
#print key, ji.GeneID(), ji.JunctionID(); sys.exit()
null=[] ### Occurs because two genes are overlapping
#return exons_to_export
def annotateNovelJunctions(novel_junction_db,novel_exon_db,exons_to_export):
### Reformat novel_exon_db to easily lookup exon data
novel_exon_lookup_db={}
for gene in novel_exon_db:
for ed in novel_exon_db[gene]:
novel_exon_lookup_db[gene,ed.ReadStart()]=ed
### Lookup the propper exon region ID and gene ID to format the unique ID and export coordinates
junction_region_db={}
unknown_gene_junctions={}
for key in novel_junction_db:
(chr,exon1_stop,exon2_start) = key
ji=novel_junction_db[key]
proceed = 'no'
if ji.GeneID() != None:
if ji.SpliceSitesFound() != 'both':
e1 = (ji.GeneID(),exon1_stop)
if ji.TransSplicing() == 'yes':
e2 = (ji.SecondaryGeneID(),exon2_start)
else: e2 = (ji.GeneID(),exon2_start)
if e1 in novel_exon_lookup_db and e2 in novel_exon_lookup_db:
proceed = 'yes'
try: ed1 = novel_exon_lookup_db[e1]; red1 = ed1.ExonRegionData(); gene1 = e1[0]
except Exception: print e1; kill
ed2 = novel_exon_lookup_db[e2]; red2 = ed2.ExonRegionData(); gene2 = e2[0]
### If the splice-site was a match to a known junciton splice site, use it instead of that identified by exon-region location overlapp
if ji.LeftExonAnnotations() != None: region1 = ji.LeftExonAnnotations()
else: region1 = ed1.ExonRegionID(); exons_to_export[gene1,region1] = ed1
if ji.RightExonAnnotations() != None: region2 = ji.RightExonAnnotations()
else: region2 = ed2.ExonRegionID(); exons_to_export[gene2,region2] = ed2
#print region1,region2,ji.GeneID(),ji.Chr(),ji.Strand(), ji.LeftExonAnnotations(), ji.RightExonAnnotations()
else:
proceed = 'yes'
region1 = ji.LeftExonAnnotations()
region2 = ji.RightExonAnnotations()
red1 = ji.LeftExonRegionData()
red2 = ji.RightExonRegionData()
### Store the individual exons for export
gene1 = ji.GeneID()
if ji.TransSplicing() == 'yes': gene2 = ji.SecondaryGeneID()
else: gene2 = ji.GeneID()
exons_to_export[gene1,region1] = red1
exons_to_export[gene2,region2] = red2
if proceed == 'yes':
try: splice_junctions = EnsemblImport.combineAnnotations([red1.AssociatedSplicingJunctions(),red2.AssociatedSplicingJunctions()])
except Exception: print red1, red2;sys.exit()
splice_events = EnsemblImport.combineAnnotations([red1.AssociatedSplicingEvent(),red2.AssociatedSplicingEvent()])
ji.setAssociatedSplicingJunctions(splice_junctions)
ji.setAssociatedSplicingEvent(splice_events)
ens_exon_ids = EnsemblImport.combineAnnotations([red1.ExonID(),red2.ExonID()])
ji.setExonID(ens_exon_ids)
if ji.TransSplicing() == 'yes':
uid = ji.GeneID()+':'+region1+'-'+ji.SecondaryGeneID()+':'+region2
region_id = uid
### When trans-splicing occurs, add the data twice to junction_region_db for the two different genes
### in JunctionArray.inferJunctionComps, establish two separate gene junctions with a unique ID for the non-gene exon
try: junction_region_db[ji.GeneID()].append((formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start())))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region1+'-'+'U1000.1_'+str(ji.Exon2Start()))]
try: junction_region_db[ji.SecondaryGeneID()].append((formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2))
except KeyError: junction_region_db[ji.SecondaryGeneID()]=[(formatID(uid),'U0.1_'+str(ji.Exon1Stop())+'-'+region2)]
else:
uid = ji.GeneID()+':'+region1+'-'+region2
region_id = region1+'-'+region2
try: junction_region_db[ji.GeneID()].append((formatID(uid),region_id))
except KeyError: junction_region_db[ji.GeneID()]=[(formatID(uid),region_id)]
ji.setExonRegionID(region_id)
ji.setUniqueID(uid)
else:
unknown_gene_junctions[key]=[]
return junction_region_db,exons_to_export
def alignReadsToExons(novel_exon_db,ens_exon_db,testImport=False):
### Simple method for aligning a single coordinate to an exon/intron region of an already matched gene
examined_exons=0; aligned_exons=0
for gene in ens_exon_db: #novel_exon_db
try:
region_numbers=[]; region_starts=[]; region_stops=[]
for ed in novel_exon_db[gene]:
examined_exons+=1; aligned_status=0; index=-1
for rd in ens_exon_db[gene]:
index+=1 ### keep track of exon/intron we are in
region_numbers.append(int(string.split(rd.ExonRegionIDs()[1:],'.')[0]))
if rd.Strand() == '-': region_starts.append(rd.ExonStop()); region_stops.append(rd.ExonStart())
else: region_starts.append(rd.ExonStart()); region_stops.append(rd.ExonStop())
#print [rd.ExonStart(),rd.ExonStop(), rd.Strand()]
#print [ed.ReadStart(),rd.ExonStart(),rd.ExonStop()]
if ed.ReadStart()>=rd.ExonStart() and ed.ReadStart()<=rd.ExonStop():
ed.setAlignmentRegion('exon')
if 'I' in rd.ExonRegionIDs(): ### In an annotated intron
ed.setAlignmentRegion('intron')
ord = rd; updated = None
try: ### If the splice site is a novel 3' splice site then annotate as the 3' exon (less than 50nt away)
nrd = ens_exon_db[gene][index+1]
if (abs(ed.ReadStart()-nrd.ExonStart())<3) or (abs(ed.ReadStart()-nrd.ExonStop())<3):
ed.setAlignmentRegion('full-intron') ### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-nrd.ExonStart())<50) or (abs(ed.ReadStart()-nrd.ExonStop())<50): rd = nrd; updated = 1
except Exception: null=[]
try:
prd = ens_exon_db[gene][index-1]
if (abs(ed.ReadStart()-prd.ExonStart())<3) or (abs(ed.ReadStart()-prd.ExonStop())<3):
ed.setAlignmentRegion('full-intron')### this is the start/end of intron coordinates
elif (abs(ed.ReadStart()-prd.ExonStart())<50) or (abs(ed.ReadStart()-prd.ExonStop())<50):
if updated==1: rd = ord; ###Hence the intron is too small to descriminate between alt5' and alt3' exons
else: rd = prd
except Exception: null=[]
ed.setExonRegionData(rd); aligned_exons+=1; aligned_status=1
ed.setExonRegionID(rd.ExonRegionIDs()+'_'+str(ed.ReadStart()))
#print rd.ExonRegionIDs()+'_'+str(ed.ReadStart())
break
if aligned_status == 0: ### non-exon/intron alinging sequences
region_numbers.sort(); region_starts.sort(); region_stops.sort()
if (rd.Strand() == '+' and ed.ReadStart()>=rd.ExonStop()) or (rd.Strand() == '-' and rd.ExonStop()>=ed.ReadStart()):
### Applicable to 3'UTR (or other trans-splicing) aligning
utr_id = 'U'+str(region_numbers[-1])+'.1_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_stops[-1],region_stops[-1],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
else:
### Applicable to 5'UTR (or other trans-splicing) aligning
utr_id = 'U0.1'+'_'+str(ed.ReadStart())
ud = EnsemblImport.ExonAnnotationsSimple(rd.Chr(),rd.Strand(),region_starts[0],region_starts[0],gene,'','no',utr_id,'','')
ed.setExonRegionID(utr_id)
ed.setExonRegionData(ud)
ed.setAlignmentRegion('UTR')
except Exception: null=[]
if testImport == 'yes': print aligned_exons, 'splice sites aligned to exon region out of', examined_exons
def geneAlign(chr,chr_gene_locations,location_gene_db,chr_reads,switch_coord,read_aligned_to_gene):
""" This function aligns the start or end position for each feature (junction or exon) to a gene, in two
steps by calling this function twice. In the second interation, the coordinates are reversed """
index = 0 ### Don't examine genes already looked at
genes_assigned = 0; trans_splicing=[]
for (coord,ji) in chr_reads: ### junction coordinates or exon coordinates with gene object
if index >5: index -=5 ### It is possible for some genes to overlap, so set back the index of genomically ranked genes each time
gene_id_obtained = 'no'
if switch_coord == 'no': rs,re=coord ### reverse the coordinates for the second iteration
else: re,rs=coord ### first-interation coordinates (start and end)
while index < len(chr_gene_locations):
cs,ce = chr_gene_locations[index]
#print [re,rs,cs,ce, ji.Chromosome()];sys.exit()
### Determine if the first listed coordinate lies within the gene
if cs <= rs and ce >= rs:
### Yes, it does
gene,strand = location_gene_db[chr,cs,ce]
if switch_coord == 'yes': ### Only applies to coordinates, where the end-position didn't lie in the same gene as the start-position
if cs <= re and ce >= re:
### This occurs when the first iteration detects a partial overlap, but the gene containing both coordinates is downstream
### Hence, not trans-splicing
ji.setGeneID(gene)
break
first_geneid = ji.GeneID() ### see what gene was assigned in the first iteration (start position only)
#print ['trans',coord, first_geneid, gene] ### Note: in rare cases, an exon can overlap with two genes (bad Ensembl annotations?)
ji.setTransSplicing()
side = ji.checkExonPosition(rs)
if side == 'left':
ji.setGeneID(gene)
ji.setSecondaryGeneID(first_geneid)
else:
ji.setSecondaryGeneID(gene)
#if ji.GeneID() == None: print 'B',coord, ji.GeneID(), secondaryGeneID()
#print ji.GeneID(), ji.SecondaryGeneID();kill
genes_assigned+=1; gene_id_obtained = 'yes'
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
### First iteration, store the identified gene ID (only looking at the start position)
ji.setGeneID(gene); gene_id_obtained = 'yes'
#print gene, rs, re, cs, ce
### Check the end position, to ensure it is also lies within the gene region
if cs <= re and ce >= re:
genes_assigned+=1
else:
### Hence, the end lies outside the gene region
trans_splicing.append((coord,ji))
### Check to see if this gene represents a multi-gene spanning region (overlaps with multiple gene loci)
try:
### This code was used to check and see if the gene is multi-spanning. Appears that the < sign is wrong > anyways, never go to the next gene unless the next read has passed it
#cs2,ce2 = chr_gene_locations[index+1]
#if cs2 < ce: index+=1 ### Continue analysis (if above is correct, the gene will have already been assigned)
#else: break
break
except Exception: break
else:
if rs < ce and re < ce: break
elif switch_coord == 'no' and cs <= re and ce >= re:
### This can occur if the left junction splice site is in an exon and the other is the UTR as opposed to another gene
gene,strand = location_gene_db[chr,cs,ce]
ji.setSecondaryGeneID(gene); gene_id_obtained = 'yes'
#print gene, coord, ji.Strand(), ji.GeneID()
index+=1
if gene_id_obtained == 'no':
### These often appear to be genes predicted by tBLASTn at UCSC but not by Ensembl (e.g., chr17:27,089,652-27,092,318 mouse mm9)
null=[]
#ji.setGeneID(None) ### This is not necessary, since if one exon does not align to a gene it is still a valid alignment
#print chr,coord
read_aligned_to_gene += genes_assigned
#print genes_assigned, chr, 'Gene IDs assigned out of', len(chr_reads)
#print len(trans_splicing),'reads with evidence of trans-splicing'
### For any coordinate-pair where the end-position doesn't lie within the same gene as the start, re-run for those to see which gene they are in
if switch_coord == 'no' and len(trans_splicing)>0:
read_aligned_to_gene = geneAlign(chr,chr_gene_locations,location_gene_db,trans_splicing,'yes',read_aligned_to_gene)
return read_aligned_to_gene
def getNovelExonCoordinates(species,root_dir):
""" Currently, any novel exon determined during initial RNA-Seq read annotation with defined start and end coordinates, only has
the exon-end coordinate, not start, in it's name. However, the start and stop are indicated in the counts.Experiment.txt file.
To get this, we parse that file and only store exons with an I or U in them and then correct for this in the matching function below """
exp_dir = root_dir+'/ExpressionInput/'
dir_list = read_directory(exp_dir)
counts_file = None
for file in dir_list:
if 'counts.' in file and 'steady' not in file:
counts_file = file
### Example
#ENSG00000137076:I17.1_35718353=chr9:35718353-35718403 (novel exon coordinates - just sorted, not necessarily in the correct order)
#ENSG00000137076:E17.1-I17.1_35718403=chr9:35718809-35718403 (5' supporting junction)
#ENSG00000137076:I17.1_35718353-E18.1=chr9:35718353-35717783 (3' supporting junction)
#here, once we see that I17.1_35718353 is the exon ID, we know we need to get the function with -I17.1_35718403 (always the second value)
if counts_file!=None:
fn=filepath(exp_dir+counts_file)
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'exons',{}) ### Get novel exons
print 'Reading counts file'
novel_exon_db = parseCountFile(fn,'junctions',novel_exon_db) ### Get novel exons
return novel_exon_db
def getMaxCounts(fn,cutoff,filterExport=False,filterExportDir=False):
firstLine=True
expressed_uids={}
if filterExport != False:
eo=export.ExportFile(filterExportDir)
for line in open(fn,'rU').xreadlines():
Line = line.rstrip('\n')
t = string.split(Line,'\t')
key = t[0]
if firstLine:
firstLine = False
if filterExport != False:
eo.write(line)
else:
if filterExport != False:
if key in filterExport:
eo.write(line)
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: maxExp = max(map(lambda x: float(x), t[1:]))
except Exception:
if 'NA' in t[1:]:
tn = [0 if x=='NA' else x for x in t[1:]] ### Replace NAs
maxExp = max(map(lambda x: float(x), tn))
elif '' in t[1:]:
tn = [0 if x=='' else x for x in t[1:]] ### Replace blanks
maxExp = max(map(lambda x: float(x), tn))
else:
maxExp=cutoff+1
#gene = string.split(uid,':')[0]
if maxExp > cutoff:
expressed_uids[uid] = []
return expressed_uids
def importBiologicalRelationships(species):
### Combine non-coding Ensembl gene annotations with UniProt functional annotations
import ExpressionBuilder
custom_annotation_dbase={}
try: coding_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
except Exception: coding_db = {}
try: gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
except Exception: gene_to_symbol_db = {}
for gene in coding_db:
#coding_type = string.split(coding_db[gene][-1],'|')
coding_type = coding_db[gene][-1]
if 'protein_coding' in coding_type:
coding_type = 'protein_coding'
else:
coding_type = 'ncRNA'
if gene in gene_to_symbol_db:
symbol = string.lower(gene_to_symbol_db[gene][0])
### The below genes cause issues with many single cell datasets in terms of being highly correlated
if 'rpl'==symbol[:3] or 'rps'==symbol[:3] or 'mt-'==symbol[:3] or '.' in symbol or 'gm'==symbol[:2]:
coding_type = 'ncRNA'
try: gene_db = custom_annotation_dbase[coding_type]; gene_db[gene]=[]
except Exception: custom_annotation_dbase[coding_type] = {gene:[]}
filename = 'AltDatabase/uniprot/'+species+'/custom_annotations.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
ens_gene,compartment,custom_class = t[:3]
if 'GPCR' in custom_class:
custom_class = ['GPCR']
else:
custom_class = string.split(custom_class,'|')
custom_class = string.split(compartment,'|')+custom_class
for cc in custom_class:
try: gene_db = custom_annotation_dbase[cc]; gene_db[ens_gene]=[]
except Exception: custom_annotation_dbase[cc] = {ens_gene:[]}
#custom_annotation_dbase={}
try:
filename = 'AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-BioMarkers.txt'
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
gene,null,celltype = t[:3]
try: gene_db = custom_annotation_dbase['BioMarker']; gene_db[gene]=[]
except Exception: custom_annotation_dbase['BioMarker'] = {gene:[]}
print len(custom_annotation_dbase), 'gene classes imported'
except Exception: pass
return custom_annotation_dbase
def importGeneSets(geneSetType,filterType=None,geneAnnotations=None):
gene_db={}
if 'Ontology' in geneSetType:
filename = 'AltDatabase/goelite/'+species+'/nested/Ensembl_to_Nested-GO.txt'
ontology=True
else:
filename = 'AltDatabase/goelite/'+species+'/gene-mapp/Ensembl-'+geneSetType+'.txt'
ontology=False
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if ontology:
gene,category = t
else: gene,null,category = t[:3]
if filterType==None:
try: gene_db[gene].append(category)
except Exception: gene_db[gene] = [category]
elif filterType in category:
if gene in geneAnnotations:
gene = geneAnnotations[gene][0]
gene_db[gene]=[]
return gene_db
def singleCellRNASeqWorkflow(Species, platform, expFile, mlp, exp_threshold=5, rpkm_threshold=5, drivers=False, parameters = None, reportOnly=False):
global species
global rho_cutoff
species = Species
removeOutliers = False
if parameters != None:
rpkm_threshold = parameters.ExpressionCutoff()
exp_threshold = parameters.CountsCutoff()
rho_cutoff = parameters.RhoCutoff()
restrictBy = parameters.RestrictBy()
try: removeOutliers = parameters.RemoveOutliers()
except Exception: pass
if platform == 'exons':
rpkm_threshold=0
exp_threshold=0
else:
rho_cutoff = 0.4
restrictBy = 'protein_coding'
onlyIncludeDrivers=True
if platform != 'exons':
platform = checkExpressionFileFormat(expFile,platform)
if platform != 'RNASeq':
if rpkm_threshold>1.9999:
rpkm_threshold = math.log(rpkm_threshold,2) ### log2 transform
if removeOutliers:
### Remove samples with low relative number of genes expressed
try:
import shutil
print '***Removing outlier samples***'
import sampleIndexSelection
output_file = expFile[:-4]+'-OutliersRemoved.txt'
sampleIndexSelection.statisticallyFilterFile(expFile,output_file,rpkm_threshold)
if 'exp.' in expFile:
### move the original groups and comps files
groups_file = string.replace(expFile,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = groups_file[:-4]+'-OutliersRemoved.txt'
comps_file = string.replace(groups_file,'groups.','comps.')
comps_filtered_file = string.replace(groups_filtered_file,'groups.','comps.')
counts_file = string.replace(expFile,'exp.','counts.')
counts_filtered_file = string.replace(output_file,'exp.','counts.')
try: os.rename(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
try: os.rename(comps_file,comps_filtered_file) ### if present copy over
except Exception: pass
try: shutil.copyfile(counts_file,counts_filtered_file) ### if present copy over
except Exception: pass
expFile = output_file
print ''
except Exception:
print '***Filtering FAILED***'
print traceback.format_exc()
expressed_uids_rpkm = getMaxCounts(expFile,rpkm_threshold)
try: expressed_uids_counts = getMaxCounts(string.replace(expFile,'exp.','counts.'),exp_threshold)
except Exception: expressed_uids_counts=expressed_uids_rpkm
if len(expressed_uids_counts) > 0:
try: expressed_uids = expressed_uids_rpkm.viewkeys() & expressed_uids_counts.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(expressed_uids_rpkm,expressed_uids_counts)
else:
expressed_uids = expressed_uids_rpkm
print 'Genes filtered by counts:',len(expressed_uids_counts)
print 'Genes filtered by expression:',len(expressed_uids_rpkm),len(expressed_uids)
#expressed_uids = filterByProteinAnnotation(species,expressed_uids)
print len(expressed_uids), 'expressed genes by RPKM/TPM (%d) and counts (%d)' % (rpkm_threshold,exp_threshold)
#"""
import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
try: biological_categories = importBiologicalRelationships(species)
except Exception:
restrictBy = None
biological_categories={}
print 'Missing annotation file in:','AltDatabase/uniprot/'+species+'/custom_annotations.txt !!!!!'
if restrictBy !=None:
genes = biological_categories['protein_coding']
genes_temp=dict(genes)
for gene in genes_temp:
if gene in gene_to_symbol_db:
genes[gene_to_symbol_db[gene][0]]=[] ### add symbols
genes_temp={}
else:
genes = {}
for i in expressed_uids: genes[i]=[]
"""
genes.update(biological_categories['BioMarker'])
genes.update(biological_categories['transcription regulator'])
genes.update(biological_categories['splicing regulator'])
genes.update(biological_categories['kinase'])
genes.update(biological_categories['GPCR'])
"""
expressed_uids_db={}; guide_genes={}
for id in expressed_uids: expressed_uids_db[id]=[]
if platform == 'exons': ### For splicing-index value filtering
expressed_uids=[]
for uid in expressed_uids_db:
geneID = string.split(uid,':')[0]
geneID = string.split(geneID,' ')[-1]
if geneID in genes: expressed_uids.append(uid)
else:
try: expressed_uids = genes.viewkeys() & expressed_uids_db.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(genes,expressed_uids_db)
#print len(expressed_uids)
expressed_uids_db2={}
for id in expressed_uids: expressed_uids_db2[id]=[]
if drivers != False:
guide_genes = getDrivers(drivers)
if onlyIncludeDrivers:
try: expressed_uids = guide_genes.viewkeys() & expressed_uids_db2.viewkeys() ### common
except Exception: expressed_uids = getOverlappingKeys(guide_genes,expressed_uids_db2)
if len(expressed_uids)<10:
expressed_uids=[]
for uid in expressed_uids_db:
expressed_uids.append(uid)
print len(expressed_uids), 'expressed IDs being further analyzed'
#sys.exit()
print_out = findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,parameters=parameters,reportOnly=reportOnly)
return print_out
def getOverlappingKeys(db1,db2):
db3=[]
for key in db1:
if key in db2:
db3.append(key)
return db3
def getDrivers(filename):
fn = filepath(filename)
firstLine=True
drivers={}
for line in open(fn,'rU').xreadlines():
line = line.rstrip()
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
gene = t[0]
drivers[gene]=[]
print 'Imported %d guide genes' % len(drivers)
return drivers
def filterByProteinAnnotation(species,expressed_uids):
import ExpressionBuilder
custom_annotation_dbase = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
expressed_uids_protein=[]
for gene in expressed_uids:
if gene in custom_annotation_dbase:
compartment,custom_class = custom_annotation_dbase[gene]
if 'protein_coding' in custom_class:
expressed_uids_protein.append(gene)
if len(expressed_uids_protein)>10:
return expressed_uids_protein
else:
return expressed_uids
def CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[]):
firstLine=True
expressed_values={}
expressed_values_filtered={}
cv_list=[]
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
if uid in expressed_uids:
vs = list(values); vs.sort()
cv = statistics.stdev(values)/statistics.avg(values)
if samplesDiffering<1: samplesDiffering=1
if platform == 'RNASeq':
if (vs[-1*samplesDiffering]/vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
else:
if (vs[-1*samplesDiffering]-vs[samplesDiffering])>fold: ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
cv_list.append((cv,uid))
if uid in guideGenes:
expressed_values[uid] = values
cv_list.append((10000,uid)) ### Very high CV
cv_list.sort()
cv_list.reverse()
x=0
for (cv,uid) in cv_list:
x+=1
"""
if uid == 'ENSMUSG00000003882':
print x, 'ilr7'
"""
for (cv,uid) in cv_list[:5000]:
expressed_values_filtered[uid] = expressed_values[uid]
return expressed_values_filtered, fold, samplesDiffering, headers
def determinePattern(vs):
max_vs = max(vs)
min_vs = min(vs)
lower_max = max_vs - (max_vs*0.01)
upper_min = abs(max_vs)*0.01
s = bisect.bisect_right(vs,upper_min) ### starting low 15% index position
e = bisect.bisect_left(vs,lower_max) ### ending upper 85% index position
#print vs
#print max_vs, min_vs
#print lower_max, upper_min
#print s, e
avg = statistics.avg(vs[s:e+1])
m = bisect.bisect_left(vs,avg)
ratio = vs[m]/vs[((e-s)/2)+s-2] ### If the ratio is close to 1, a sigmoidal or linear pattern likely exists
print ratio
#sys.exit()
return ratio
def checkExpressionFileFormat(expFile,platform):
firstLine=True
inputMax=0; inputMin=10000
expressed_values={}
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception:pass
try:
if max(values)>inputMax: inputMax = max(values)
except Exception:
pass
if inputMax>100: ### Thus, not log values
platform = 'RNASeq'
else:
platform = "3'array"
return platform
def optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=[]):
firstLine=True
expressed_values={}
for line in open(expFile,'rU').xreadlines():
key = string.split(line,'\t')[0]
t = string.split(line,'\t')
if firstLine:
headers = line
firstLine = False
else:
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
try: values = map(lambda x: float(x), t[1:])
except Exception:
values = t[1:]
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: float(x), values)
else:
values=[]
for value in t[1:]:
try: values.append(float(value))
except Exception: values.append(-9999)
values = numpy.ma.masked_values(values, -9999.)
#gene = string.split(uid,':')[0]
#if uid == 'ENSMUSG00000041515': print 'IRF8'
if uid in expressed_uids:
#slope_exp_ratio = determinePattern(vs)
#if slope_exp_ratio<2 and slope_exp_ratio>0.5:
if platform == 'RNASeq':
try: values = map(lambda x: math.log(x+1,2),values)
except Exception:
if 'NA' in values:
values = [0 if x=='NA' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
elif '' in values:
values = [0 if x=='' else x for x in values] ### Replace NAs
values = map(lambda x: math.log(x+1,2),values)
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
else:
vs = list(values); vs.sort()
if (vs[-1*samplesDiffering]-vs[samplesDiffering-1])>math.log(fold,2): ### Ensures that atleast 4 samples are significantly different in the set
expressed_values[uid] = values
if uid in guideGenes:
expressed_values[uid] = values
#if uid == 'ENSMUSG00000062825': print (vs[-1*samplesDiffering]-vs[samplesDiffering]),math.log(fold,2);sys.exit()
print len(expressed_uids),'genes examined and', len(expressed_values),'genes expressed for a fold cutoff of', fold
if len(expressed_uids)==0 or len(expressed_values)==0:
print options_result_in_no_genes
elif len(expressed_uids) < 50 and len(expressed_values)>0:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)>14000:
if platform == 'exons':
fold+=0.1
else:
fold+=1
samplesDiffering+=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes)
elif fold == 1.2 and samplesDiffering == 1:
return expressed_values, fold, samplesDiffering, headers
elif len(expressed_values)<50:
fold-=0.2
samplesDiffering-=1
if samplesDiffering<1: samplesDiffering = 1
if fold < 1.1: fold = 1.2
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guideGenes)
else:
return expressed_values, fold, samplesDiffering, headers
return expressed_values, fold, samplesDiffering, headers
def intraCorrelation(expressed_values,mlp):
if mlp.cpu_count() < 3:
processors = mlp.cpu_count()
else: processors = 8
pool = mlp.Pool(processes=processors)
si = (len(expressed_values)/processors)
s = si; b=0
db_ls=[]
if len(expressed_values)<10: forceError ### will si to be zero and an infanite loop
while s<len(expressed_values):
db_ls.append(dict(expressed_values.items()[b:s]))
b+=si; s+=si
db_ls.append(dict(expressed_values.items()[b:s]))
### Create an instance of MultiZscoreWorker (store the variables to save memory)
workerMulti = MultiCorrelatePatterns(expressed_values)
results = pool.map(workerMulti,db_ls)
#for i in db_ls: workerMulti(i)
pool.close(); pool.join(); pool = None
correlated_genes={}
for a in results:
for k in a: correlated_genes[k] = a[k]
return correlated_genes
def findCommonExpressionProfiles(expFile,species,platform,expressed_uids,guide_genes,mlp,fold=2,samplesDiffering=2,parameters=None,reportOnly=False):
use_CV=False
row_metric = 'correlation'; row_method = 'average'
column_metric = 'cosine'; column_method = 'hopach'
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; transpose = False; graphic_links=[]
if parameters != None:
fold = parameters.FoldDiff()
samplesDiffering = parameters.SamplesDiffering()
amplifyGenes = parameters.amplifyGenes()
if 'Guide' in parameters.GeneSelection():
amplifyGenes = False ### This occurs when running ICGS with the BOTH option, in which Guide3 genes are retained - ignore these
parameters.setGeneSelection('')
parameters.setClusterGOElite('')
excludeCellCycle = parameters.ExcludeCellCycle()
import clustering
row_metric = 'correlation'; row_method = 'average'
column_metric = parameters.ColumnMetric(); column_method = parameters.ColumnMethod()
original_column_metric = column_metric
original_column_method = column_method
color_gradient = 'yellow_black_blue'; graphic_links=[]
if platform == 'exons': color_gradient = 'yellow_black_blue'
guide_genes = parameters.JustShowTheseIDs()
cell_cycle_id_list = []
else:
amplifyGenes = False
excludeCellCycle = False
if platform != 'exons':
platform = checkExpressionFileFormat(expFile,platform)
else:
if LegacyMode: pass
else:
fold = math.pow(2,0.5)
fold = 1.25
#"""
if use_CV:
expressed_values, fold, samplesDiffering, headers = CoeffVar(expFile,platform,expressed_uids,fold=2,samplesDiffering=2,guideGenes=guide_genes)
else:
print 'Finding an optimal number of genes based on differing thresholds to include for clustering...'
#fold=1; samplesDiffering=1
expressed_values, fold, samplesDiffering, headers = optimizeNumberOfGenesForDiscovery(expFile,platform,expressed_uids,fold=fold,samplesDiffering=samplesDiffering,guideGenes=guide_genes) #fold=2,samplesDiffering=2
print 'Evaluating',len(expressed_values),'genes, differentially expressed',fold,'fold for at least',samplesDiffering*2,'samples'
#sys.exit()
import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
areYouSure=False
if (excludeCellCycle == 'strict' or excludeCellCycle == True) and areYouSure:
cc_param = copy.deepcopy(parameters)
cc_param.setPathwaySelect('cell cycle')
cc_param.setGeneSet('GeneOntology')
cc_param.setGeneSelection('amplify')
transpose = cc_param
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
if len(expressed_values)<1000:
row_method = 'hopach'; row_metric = 'correlation'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
if len(headers)>7000: ### For very ultra-large datasets
column_method = 'average'
cc_graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cell_cycle_id_list = genericRowIDImport(string.replace(cc_graphic_links[0][-1],'.png','.txt'))
expressed_values2 = {}
for id in expressed_values:
try: symbolID = gene_to_symbol_db[id][0]
except Exception: symbolID = id
if id not in cell_cycle_id_list and symbolID not in cell_cycle_id_list:
expressed_values2[id]=expressed_values[id]
print len(expressed_values)-len(expressed_values2),'cell-cycle associated genes removed for cluster discovery'
expressed_values = expressed_values2
print 'amplifyGenes:',amplifyGenes
### Write out filtered list to amplify and to filtered.YourExperiment.txt
filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
groups_file = string.replace(expFile,'exp.','groups.')
groups_filtered_file = string.replace(filtered_file,'exp.','groups.')
groups_file = string.replace(groups_file,'-steady-state','')
groups_filtered_file = string.replace(groups_filtered_file,'-steady-state','')
try: export.customFileCopy(groups_file,groups_filtered_file) ### if present copy over
except Exception: pass
writeFilteredFile(filtered_file,platform,headers,{},expressed_values,[])
filtered_file_new = string.replace(expFile,'exp.','filteredExp.')
try: export.customFileCopy(filtered_file,filtered_file_new) ### if present copy over
except Exception: pass
if reportOnly:
print_out = '%d genes, differentially expressed %d fold for at least %d samples' % (len(expressed_values), fold, samplesDiffering*2)
return print_out
if len(expressed_values)<1400 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
row_method = 'weighted'; row_metric = 'cosine'
if amplifyGenes:
transpose = parameters
try:
if len(parameters.GeneSelection())>0:
parameters.setGeneSelection(parameters.GeneSelection()+' amplify')
print 'Finding correlated genes to the input geneset(s)...'
else:
print 'Finding intra-correlated genes from the input geneset(s)...'
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
except Exception:
parameters.setGeneSelection(parameters.GeneSelection()+' IntraCorrelatedOnly amplify')
print 'Finding intra-correlated genes from the input geneset(s)...'
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
#return graphic_links
import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(graphic_links[-1][-1][:-4]+'.txt')
headers = ['UID']+column_header
expressed_values2={}
for i in row_header: ### Filter the expressed values for the intra-correlated queried gene set and replace
try: expressed_values2[i]=expressed_values[i]
except Exception:
try:
e = symbol_to_gene[i][0]
expressed_values2[e]=expressed_values[e]
except Exception:
pass
expressed_values = expressed_values2
print 'Looking for common gene expression profiles for class assignment...',
begin_time = time.time()
useNumpyCorr=True
negative_rho = rho_cutoff*-1
#results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
#eo = export.ExportFile(results_file[:-4]+'-genes.txt')
if useNumpyCorr:
row_ids=[]
x = []
for id in expressed_values:
row_ids.append(id)
x.append(expressed_values[id])
#if id== 'Bcl2l11': print expressed_values[id];sys.exit()
D1 = numpy.corrcoef(x)
print 'initial correlations obtained'
i=0
correlated_genes={}
if 'exons' == platform or 'AltExon' == platform:
for score_ls in D1:
correlated = []
geneID = row_ids[i]
refgene = string.split(geneID,':')[0]
k=0
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
if refgene not in row_ids[k]:
correlated.append((v,row_ids[k]))
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
correlated = map(lambda x:x[1],correlated)
correlated_genes[geneID] = correlated
i+=1
else:
for score_ls in D1:
correlated = []
geneID = row_ids[i]
k=0; temp=[]
for v in score_ls:
if v>rho_cutoff:# or v<negative_rho:
#scores.append((v,row_ids[k]))
correlated.append((v,row_ids[k]))
#temp.append((geneID,row_ids[k],str(v)))
k+=1
correlated.sort()
if LegacyMode == False:
correlated.reverse()
correlated = map(lambda x:x[1],correlated)
if len(correlated)>0:
correlated_genes[geneID] = correlated
#for (a,b,c) in temp: eo.write(a+'\t'+b+'\t'+c+'\n')
i+=1
else:
### Find common patterns now
performAllPairwiseComparisons = True
if performAllPairwiseComparisons:
correlated_genes = intraCorrelation(expressed_values,mlp)
print len(correlated_genes), 'highly correlated genes found for downstream clustering.'
else: correlated_genes={}
atleast_10={}
if len(correlated_genes)<70: connections = 0
elif len(correlated_genes)<110: connections = 4
else: connections = 5
numb_corr=[]
for i in correlated_genes:
if len(correlated_genes[i])>connections:
numb_corr.append([len(correlated_genes[i]),i])
atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
x=0
for k in correlated_genes[i]:
if x<30: ### cap it at 30
atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
x+=1
if len(atleast_10)<30:
print 'Initial correlated set too small, getting anything correlated'
for i in correlated_genes:
if len(correlated_genes[i])>0:
numb_corr.append([len(correlated_genes[i]),i])
atleast_10[i]=correlated_genes[i] ### if atleast 10 genes apart of this pattern
for k in correlated_genes[i]:
atleast_10[k]=correlated_genes[k] ### add all correlated keys and values
if len(atleast_10) == 0:
atleast_10 = expressed_values
#eo.close()
print len(atleast_10), 'genes correlated to multiple other members (initial filtering)'
### go through the list from the most linked to the least linked genes, only reported the most linked partners
removeOutlierDrivenCorrelations=True
exclude_corr=[]
numb_corr.sort(); numb_corr.reverse()
numb_corr2=[]
#print len(numb_corr)
if removeOutlierDrivenCorrelations and samplesDiffering != 1:
for key in numb_corr: ### key gene
associations,gene = key
temp_corr_matrix_db={}; rows=[]; temp_corr_matrix=[]
gene_exp_vals = list(expressed_values[gene]) ### copy the list
max_index = gene_exp_vals.index(max(gene_exp_vals))
del gene_exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
#print len(correlated_genes[gene])
for k in correlated_genes[gene]:
exp_vals = list(expressed_values[k]) ### copy the list
#print exp_vals
del exp_vals[max_index]
#temp_corr_matrix.append(exp_vals); rows.append(gene)
#print exp_vals,'\n'
temp_corr_matrix_db[k]=exp_vals
temp_corr_matrix.append(exp_vals); rows.append(gene)
correlated_hits = pearsonCorrelations(gene_exp_vals,temp_corr_matrix_db)
try: avg_corr = numpyCorrelationMatrix(temp_corr_matrix,rows,gene)
except Exception: avg_corr = 0
#if gene_to_symbol_db[gene][0] == 'ISL1' or gene_to_symbol_db[gene][0] == 'CD10' or gene_to_symbol_db[gene][0] == 'POU3F2':
if len(correlated_hits)>0:
if LegacyMode:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<rho_cutoff: ### compare to the below
pass
else:
numb_corr2.append([len(correlated_hits),gene])
else:
if (float(len(correlated_hits))+1)/len(correlated_genes[gene])<0.5 or avg_corr<(rho_cutoff-0.1):
#exclude_corr.append(key)
#if gene == 'XXX': print len(correlated_hits),len(correlated_genes[gene]), avg_corr, rho_cutoff-0.1
pass
else:
numb_corr2.append([len(correlated_hits),gene])
#print (float(len(correlated_hits))+1)/len(correlated_genes[gene]), len(correlated_genes[gene]), key
numb_corr = numb_corr2
numb_corr.sort(); numb_corr.reverse()
#print len(numb_corr)
exclude_corr={}; new_filtered_set={}
limit=0
for key in numb_corr: ### key gene
associations,gene = key
#if 'ENSG00000016082' in correlated_genes[gene] or 'ENSG00000016082' == gene: print gene_to_symbol_db[gene],associations
if gene not in exclude_corr:
for k in correlated_genes[gene]:
exclude_corr[k]=[]
new_filtered_set[k]=[]
new_filtered_set[gene]=[]
limit+=1
#print key
#if limit==1: break
atleast_10 = new_filtered_set
addMultipleDrivers=True
if len(guide_genes)>0 and addMultipleDrivers: ### Artificially weight the correlated genes with known biological driverse
for gene in guide_genes:
y=1
while y<2:
if y==1:
try: atleast_10[gene]=expressed_values[gene]
except Exception: break
else:
try: atleast_10[gene+'-'+str(y)]=expressed_values[gene]
except Exception: break
expressed_values[gene+'-'+str(y)]=expressed_values[gene] ### Add this new ID to the database
#print gene+'-'+str(y)
y+=1
#atleast_10 = expressed_values
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10)
print len(atleast_10),'final correlated genes'
end_time = time.time()
print 'Initial clustering completed in',int(end_time-begin_time),'seconds'
results_file = string.replace(expFile[:-4]+'-CORRELATED-FEATURES.txt','exp.','/SamplePrediction/')
if len(atleast_10)<1200 and column_method == 'hopach':
row_method = 'hopach'; row_metric = 'correlation'
else:
if LegacyMode:
row_method = 'average'; row_metric = 'euclidean'
else:
row_method = 'weighted'; row_metric = 'cosine'
#print row_method, row_metric
correlateByArrayDirectly = False
if correlateByArrayDirectly:
import clustering
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file)
new_column_header = map(lambda x: int(x[5:]),column_header)
matrix = [new_column_header]+matrix
matrix = zip(*matrix) ### transpose
exp_sample_db={}
for sample_data in matrix:
exp_sample_db[sample_data[0]] = sample_data[1:]
correlated_arrays = intraCorrelation(exp_sample_db,mpl)
print len(correlated_arrays), 'highly correlated arrays from gene subsets.'
mimum_corr_arrays={}
for i in correlated_arrays:
if len(correlated_arrays[i])>1:
linked_lists=correlated_arrays[i]+[i]
for k in correlated_arrays[i]:
linked_lists+=correlated_arrays[k]
linked_lists = unique.unique(linked_lists)
linked_lists.sort()
# print len(linked_lists), linked_lists
else:
try:
import clustering
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors (possibly outside of LegacyMode)
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
if len(graphic_links)==0:
graphic_links = clustering.runHCexplicit(results_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphic_links[0][1],'.png','.txt')
except Exception: pass
#exportGroupsFromClusters(cluster_file,expFile,platform)
#"""
#filtered_file = export.findParentDir(expFile)+'/amplify/'+export.findFilename(expFile)
#graphic_links = [(1,'/Users/saljh8/Desktop/Grimes/KashishNormalization/test/ExpressionInput/SamplePrediction/DataPlots/Clustering-CombinedSingleCell_March_15_2015-CORRELATED-FEATURES-hierarchical_cosine_euclidean.txt')]
try: graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,ColumnMethod=column_method)
except Exception: print traceback.format_exc()
row_metric = 'correlation'; row_method = 'hopach'
#column_metric = 'cosine'
#if LegacyMode: column_method = 'hopach'
cellCycleRemove1=[]; cellCycleRemove2=[]
try:
newDriverGenes1, cellCycleRemove1 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes1_str = 'Guide1 '+string.join(newDriverGenes1.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes1_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
if column_method != 'hopach': row_method = 'average' ### needed due to PC errors
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes2, cellCycleRemove2 = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',stringency='strict',numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,ColumnMethod=column_method)
newDriverGenes2_str = 'Guide2 '+string.join(newDriverGenes2.keys(),' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes2_str) ### force correlation to these targetGenes
parameters.setGeneSet('None Selected') ### silence this
parameters.setPathwaySelect('None Selected')
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
newDriverGenes3 = unique.unique(newDriverGenes1.keys()+newDriverGenes2.keys())
cellCycleRemove=cellCycleRemove1+cellCycleRemove2 ### It is possible for a cell cycle guide-gene to be reported in both guide1 and 2, but only as cell cycle associated in one of them
newDriverGenes3_filtered=[]
for i in newDriverGenes3:
if not i in cellCycleRemove:
newDriverGenes3_filtered.append(i)
newDriverGenes3_str = 'Guide3 '+string.join(newDriverGenes3_filtered,' ')+' amplify positive'
parameters.setGeneSelection(newDriverGenes3_str)
try:
parameters.setClusterGOElite('BioMarkers')
"""
if species == 'Mm' or species == 'Hs' or species == 'Rn':
parameters.setClusterGOElite('BioMarkers')
else:
parameters.setClusterGOElite('GeneOntology')
"""
except Exception, e:
print e
graphic_links = clustering.runHCexplicit(filtered_file, graphic_links, row_method, row_metric, column_method, column_metric, color_gradient, parameters, display=False, Normalize=True)
except Exception:
print traceback.format_exc()
try: copyICGSfiles(expFile,graphic_links)
except Exception: pass
return graphic_links
def copyICGSfiles(expFile,graphic_links):
if 'ExpressionInput' in expFile:
root_dir = string.split(expFile,'ExpressionInput')[0]
else:
root_dir = string.split(expFile,'AltResults')[0]
import shutil
destination_folder = root_dir+'/ICGS'
try: os.mkdir(destination_folder)
except Exception: pass
for (order,png) in graphic_links:
file = export.findFilename(png)
txt = string.replace(file,'.png','.txt')
pdf = string.replace(file,'.png','.pdf')
dest_png = destination_folder+'/'+file
dest_txt = destination_folder+'/'+txt
dest_pdf = destination_folder+'/'+pdf
shutil.copy(png, dest_png)
shutil.copy(png[:-4]+'.txt', dest_txt)
shutil.copy(png[:-4]+'.pdf', dest_pdf)
def pearsonCorrelations(ref_gene_exp,exp_value_db):
correlated=[]
for gene in exp_value_db:
rho,p = stats.pearsonr(ref_gene_exp,exp_value_db[gene])
if rho>rho_cutoff or rho<(rho_cutoff*-1):
if rho!= 1:
correlated.append(gene)
#print len(exp_value_db),len(correlated);sys.exit()
return correlated
def numpyCorrelationMatrix(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
scores = []
for score_ls in D1:
for v in score_ls:
scores.append(v)
return numpy.average(scores)
def numpyCorrelationMatrixCount(x,rows,cutoff=0.4,geneTypeReport=None):
### Find which genes are most correlated
D1 = numpy.corrcoef(x)
gene_correlation_counts={}
i=0
for score_ls in D1:
correlated_genes=[]
geneID = rows[i]
k=0; genes_to_report=[]
for rho in score_ls:
if rho>cutoff:
correlated_genes.append(rows[k])
if rows[k] in geneTypeReport:
genes_to_report.append(rows[k])
k+=1
gene_correlation_counts[geneID]=len(correlated_genes),genes_to_report
i+=1
return gene_correlation_counts
def numpyCorrelationMatrixGene(x,rows,gene):
D1 = numpy.corrcoef(x)
gene_correlations={}
i=0
for score_ls in D1:
scores = []
geneID = rows[i]
k=0
for v in score_ls:
scores.append((v,rows[k]))
k+=1
scores.sort()
gene_correlations[geneID] = scores
i+=1
correlated_genes={}
rho_values = map(lambda (r,g): r,gene_correlations[gene])
genes = map(lambda (r,g): g,gene_correlations[gene])
s1 = bisect.bisect_right(rho_values,rho_cutoff)
s2 = bisect.bisect_left(rho_values,-1*rho_cutoff)
correlated = genes[:s2] ### for the right bisect, remove self correlations with -1
correlated = genes[s1:] ### for the left bisect, remove self correlations with -1
#print len(rows), len(correlated);sys.exit()
return len(correlated)/len(rows)
def numpyCorrelationMatrixGeneAlt(x,rows,genes,gene_to_symbol,rho_cutoff):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=RuntimeWarning) ### hides import warnings
D1 = numpy.ma.corrcoef(x)
i=0
gene_correlations={}
for score_ls in D1:
scores = []
try: symbol = gene_to_symbol[rows[i]][0]
except Exception: symbol = '$'
if rows[i] in genes or symbol in genes:
k=0
for v in score_ls:
if str(v)!='nan':
if v > rho_cutoff:
uid = rows[k]
if uid in gene_to_symbol: uid = gene_to_symbol[uid][0]
scores.append((v,uid))
k+=1
scores.sort()
scores.reverse()
scores = map(lambda x: x[1], scores[:140]) ### grab the top 140 correlated gene symbols only
if len(symbol)==1: symbol = rows[i]
gene_correlations[symbol] = scores
i+=1
return gene_correlations
def genericRowIDImport(filename):
id_list=[]
for line in open(filename,'rU').xreadlines():
uid = string.split(line,'\t')[0]
if ' ' in uid:
for id in string.split(uid,' '):
id_list.append(id)
else:
id_list.append(uid)
return id_list
def writeFilteredFile(results_file,platform,headers,gene_to_symbol_db,expressed_values,atleast_10,excludeGenes=[]):
eo = export.ExportFile(results_file)
try: headers = string.replace(headers,'row_clusters-flat','UID')
except Exception:
headers = string.join(headers,'\t')+'\n'
headers = string.replace(headers,'row_clusters-flat','UID')
eo.write(headers)
keep=[]; sort_genes=False
e=0
if len(atleast_10)==0:
atleast_10 = expressed_values
sort_genes = True
for i in atleast_10:
if i in gene_to_symbol_db:
symbol = gene_to_symbol_db[i][0]
else: symbol = i
if i not in excludeGenes and symbol not in excludeGenes:
if i not in keep:
keep.append((symbol,i))
if sort_genes:
keep.sort(); keep.reverse()
for (symbol,i) in keep:
"""
if platform == 'RNASeq':
values = map(lambda x: logTransform(x), expressed_values[i])
else:
"""
values = map(str,expressed_values[i])
eo.write(string.join([symbol]+values,'\t')+'\n')
e+=1
eo.close()
def remoteGetDriverGenes(Species,platform,results_file,numSamplesClustered=3,excludeCellCycle=False,ColumnMethod='hopach'):
global species
species = Species
guideGenes, cellCycleRemove = correlateClusteredGenes(platform,results_file,stringency='strict',excludeCellCycle=excludeCellCycle,ColumnMethod=ColumnMethod)
guideGenes = string.join(guideGenes.keys(),' ')+' amplify positive'
return guideGenes
def correlateClusteredGenes(platform,results_file,stringency='medium',numSamplesClustered=3,
excludeCellCycle=False,graphics=[],ColumnMethod='hopach',rhoCuttOff=0.2, transpose=False,
includeMoreCells=False):
if numSamplesClustered<1: numSamplesClustered=1
### Get all highly variably but low complexity differences, typically one or two samples that are really different
if stringency == 'medium':
new_results_file = string.replace(results_file,'.txt','-filtered.txt')
new_results_file = string.replace(new_results_file,'.cdt','-filtered.txt')
eo = export.ExportFile(new_results_file)
medVarHighComplexity=[]; medVarLowComplexity=[]; highVarHighComplexity=[]; highVarLowComplexity=[]
if transpose==False or includeMoreCells:
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=6,transpose=transpose)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=3,hits_to_report=6,transpose=transpose) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.2,hits_cutoff=1,hits_to_report=6,filter=True,numSamplesClustered=numSamplesClustered,transpose=transpose)
else:
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=1,hits_to_report=4,transpose=transpose)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
#combined_results = highVarHighComplexity
if stringency == 'strict':
medVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
medVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=4,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered) #hits_cutoff=6
highVarLowComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.5,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
highVarHighComplexity, column_header = correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=3,hits_to_report=50,filter=True,numSamplesClustered=numSamplesClustered)
#combined_results = dict(medVarLowComplexity.items() + medVarLowComplexity.items() + highVarLowComplexity.items() + highVarHighComplexity.items())
combined_results={}
for i in medVarLowComplexity: combined_results[i]=[]
for i in medVarHighComplexity: combined_results[i]=[]
for i in highVarLowComplexity: combined_results[i]=[]
for i in highVarHighComplexity: combined_results[i]=[]
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCuttOff,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCuttOff+0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle)
if guideGenes == 'TooFewBlocks':
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=rhoCuttOff+0.2,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,forceOutput=True)
if len(guideGenes)>200:
print 'Too many drivers (>200)... performing more stringent filtering...'
guideGenes, addition_cell_cycle_associated = correlateClusteredGenesParameters(results_file,rho_cutoff=0.1,hits_cutoff=0,hits_to_report=1,geneFilter=combined_results,excludeCellCycle=excludeCellCycle,restrictTFs=True)
return guideGenes, addition_cell_cycle_associated
#B4galt6, Prom1
for tuple_ls in combined_results:
data_length = len(tuple_ls);break
if data_length == len(column_header):
eo.write(string.join(column_header,'\t')+'\n')
else:
eo.write(string.join(['UID']+column_header,'\t')+'\n')
#combined_results = highVarHighComplexity
for tuple_ls in combined_results:
eo.write(string.join(list(tuple_ls),'\t')+'\n')
eo.close()
cluster = True
if cluster == True and transpose==False:
import clustering
if ColumnMethod == 'hopach':
row_method = 'hopach'
column_method = 'hopach'
else:
column_method = ColumnMethod
row_method = 'average'
row_metric = 'correlation'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
if platform == 'exons': color_gradient = 'yellow_black_blue'
transpose = False
try:
len(guide_genes)
except Exception:
guide_genes = []
graphics = clustering.runHCexplicit(new_results_file, graphics, row_method, row_metric, column_method, column_metric, color_gradient, transpose, display=False, Normalize=True, JustShowTheseIDs=guide_genes)
cluster_file = string.replace(graphics[0][1],'.png','.txt')
#exportGroupsFromClusters(cluster_file,expFile,platform)
return graphics, new_results_file
def correlateClusteredGenesParameters(results_file,rho_cutoff=0.3,hits_cutoff=4,hits_to_report=5,
filter=False,geneFilter=None,numSamplesClustered=3,excludeCellCycle=False,restrictTFs=False,
forceOutput=False,transpose=False):
import clustering
addition_cell_cycle_associated=[]
if geneFilter != None:
geneFilter_db={}
for i in geneFilter:
geneFilter_db[i[0]]=[]
geneFilter=geneFilter_db
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(results_file,geneFilter=geneFilter)
if transpose: ### If performing reduce cluster heterogeneity on cells rather than on genes
#print 'Transposing matrix'
matrix = map(numpy.array, zip(*matrix)) ### coverts these to tuples
column_header, row_header = row_header, column_header
Platform = None
for i in row_header:
if 'ENS' in i and '-' in i and ':' in i: Platform = 'exons'
if hits_to_report == 1:
### Select the best gene using correlation counts and TFs
try:
import OBO_import; import ExpressionBuilder
gene_to_symbol_db = ExpressionBuilder.importGeneAnnotations(species)
symbol_to_gene = OBO_import.swapKeyValues(gene_to_symbol_db)
TFs = importGeneSets('Biotypes',filterType='transcription regulator',geneAnnotations=gene_to_symbol_db)
if excludeCellCycle == True or excludeCellCycle == 'strict':
cell_cycle = importGeneSets('KEGG',filterType='Cell cycle:',geneAnnotations=gene_to_symbol_db)
cell_cycle_go = importGeneSets('GeneOntology',filterType='GO:0022402',geneAnnotations=gene_to_symbol_db)
for i in cell_cycle_go:
cell_cycle[i]=[]
print len(cell_cycle),'cell cycle genes being considered.'
else:
cell_cycle={}
except Exception:
symbol_to_gene={}; TFs={}; cell_cycle={}
gene_corr_counts = numpyCorrelationMatrixCount(matrix,row_header,cutoff=0.4,geneTypeReport=TFs)
#try: column_header = map(lambda x: string.split(x,':')[1],column_header[1:])
#except Exception: column_header = column_header[1:]
i=0
block=0
block_db={}
for row in matrix:
if i!=0:
rho,p = stats.pearsonr(row,matrix[i-1]) ### correlate to the last ordered row
#if row_header[i] == 'Pax6': print [block],row_header[i-1],rho,rho_cutoff
"""
try:
if row_header[i] in guide_genes: print row_header[i], rho
if row_header[i-1] in guide_genes: print row_header[i-1], rho
if row_header[i+1] in guide_genes: print row_header[i+1], rho
except Exception:
pass
"""
#if hits_to_report == 1: print [block],row_header[i], row_header[i-1],rho,rho_cutoff
#print rho
if rho>0.95:
pass ### don't store this
elif rho>rho_cutoff:
try:
block_db[block].append(i) ### store the row index
except Exception:
block_db[block] = [i] ### store the row index
else:
block+=1
block_db[block] = [i] ### store the row index
else:
block_db[block] = [i] ### store the row index
i+=1
if hits_to_report == 1:
if len(block_db)<4 and forceOutput==False:
return 'TooFewBlocks', None
guideGenes={}
### Select the top TFs or non-TFs with the most gene correlations
for b in block_db:
corr_counts_gene = []; cell_cycle_count=[]
#print len(block_db), b, map(lambda i: row_header[i],block_db[b])
for (gene,i) in map(lambda i: (row_header[i],i),block_db[b]):
corr_counts_gene.append((len(gene_corr_counts[gene][1]),gene_corr_counts[gene][0],gene))
if gene in cell_cycle:
cell_cycle_count.append(gene)
corr_counts_gene.sort(); tfs=[]
#print b, corr_counts_gene, '***',len(cell_cycle_count)
if (len(cell_cycle_count)>1) or (len(corr_counts_gene)<4 and (len(cell_cycle_count)>0)): pass
else:
tf_count=0
for (r,t, gene) in corr_counts_gene:
if gene in TFs:
if gene not in cell_cycle:
if restrictTFs==True and tf_count==0: pass
else:
guideGenes[gene]=[]
tf_count+=1
if len(tfs)==0:
gene = corr_counts_gene[-1][-1]
if gene in cell_cycle and LegacyMode: pass
else:
guideGenes[gene]=[]
#block_db[b]= [corr_counts_gene[-1][-1]] ### save just the selected gene indexes
### Additional filter to remove guides that will bring in cell cycle genes (the more guides the more likely)
if excludeCellCycle == 'strict':
#print 'guides',len(guideGenes)
guideCorrelated = numpyCorrelationMatrixGeneAlt(matrix,row_header,guideGenes,gene_to_symbol_db,rho_cutoff)
guideGenes={}
for gene in guideCorrelated:
cell_cycle_count=[]
for corr_gene in guideCorrelated[gene]:
if corr_gene in cell_cycle: cell_cycle_count.append(corr_gene)
#print gene, len(cell_cycle_count),len(guideCorrelated[gene])
if (float(len(cell_cycle_count))/len(guideCorrelated[gene]))>.15 or (len(guideCorrelated[gene])<4 and (len(cell_cycle_count)>0)):
print gene, cell_cycle_count
addition_cell_cycle_associated.append(gene)
pass
else:
guideGenes[gene]=[]
print 'additional Cell Cycle guide genes removed:',addition_cell_cycle_associated
print len(guideGenes), 'novel guide genes discovered:', guideGenes.keys()
return guideGenes,addition_cell_cycle_associated
def greaterThan(x,results_file,numSamplesClustered):
if 'alt_junctions' not in results_file and Platform == None:
if x>(numSamplesClustered-1): return 1
else: return 0
else:
return 1
max_block_size=0
### Sometimes the hits_cutoff is too stringent so take the largest size instead
for block in block_db:
indexes = len(block_db[block])
if indexes>max_block_size: max_block_size=indexes
max_block_size-=1
retained_ids={}; final_rows = {}
for block in block_db:
indexes = block_db[block]
#print [block], len(indexes),hits_cutoff,max_block_size
if len(indexes)>hits_cutoff or len(indexes)>max_block_size: ###Increasing this helps get rid of homogenous clusters of little significance
#if statistics.avg(matrix[indexes[0]][1:]) < -2: print statistics.avg(matrix[indexes[0]][1:]), len(indexes)
gene_names = map(lambda i: row_header[i], indexes)
#if 'Pax6' in gene_names or 'WNT8A' in gene_names: print '******',hits_to_report, gene_names
indexes = indexes[:hits_to_report]
if filter:
new_indexes = []
for index in indexes:
vs = list(matrix[index])
a = map(lambda x: greaterThan(x,results_file,numSamplesClustered),vs)
b=[1]*numSamplesClustered
c = [(i, i+len(b)) for i in range(len(a)) if a[i:i+len(b)] == b]
if len(c)>0: #http://stackoverflow.com/questions/10459493/find-indexes-of-sequence-in-list-in-python
new_indexes.append(index)
"""
vs.sort()
try:
if abs(vs[-5]-vs[5])>6: new_indexes.append(index)
except Exception:
if abs(vs[-1]-vs[1])>6: new_indexes.append(index)"""
indexes = new_indexes
#if block == 1: print map(lambda i:row_header[i],indexes)
#print indexes;sys.exit()
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
for i in indexes:
retained_ids[row_header[i]]=[]
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
if len(indexes)>hits_cutoff or len(indexes)>max_block_size:
indexes = indexes[:hits_to_report]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
if len(final_rows)==0:
for block in block_db:
indexes = block_db[block]
for ls in map(lambda i: [row_header[i]]+map(str,(matrix[i])), indexes):
final_rows[tuple(ls)]=[]
#print 'block length:',len(block_db), 'genes retained:',len(retained_ids)
return final_rows, column_header
def exportGroupsFromClusters(cluster_file,expFile,platform):
lineNum=1
for line in open(cluster_file,'rU').xreadlines():
line = line[:-1]
t = string.split(line,'\t')
if lineNum==1: names = t[2:]; lineNum+=1
elif lineNum==2: clusters = t[2:]; lineNum+=1
else: break
unique_clusters=[] ### Export groups
out_obj = export.ExportFile(string.replace(expFile,'exp.','groups.'))
for name in names:
cluster = clusters[names.index(name)]
if platform == 'RNASeq':
if 'junction_quantification' not in name and '.bed' not in name:
name = name+'.bed'
elif 'junction_quantification.txt' not in name and '.txt' not in name and '.bed' not in name:
name = name+'.txt'
if ':' in name:
name = string.split(name,':')[1]
out_obj.write(name+'\t'+cluster+'\t'+cluster+'\n')
if cluster not in unique_clusters: unique_clusters.append(cluster)
out_obj.close()
comps=[] #Export comps
out_obj = export.ExportFile(string.replace(expFile,'exp.','comps.'))
for c1 in unique_clusters:
for c2 in unique_clusters:
temp=[int(c2),int(c1)]; temp.sort(); temp.reverse()
if c1 != c2 and temp not in comps:
out_obj.write(str(temp[0])+'\t'+str(temp[1])+'\n')
comps.append(temp)
out_obj.close()
def logTransform(value):
try: v = math.log(value,2)
except Exception: v = math.log(0.001,2)
return str(v)
class MultiCorrelatePatterns():
def __init__(self,expressed_values):
self.expressed_values = expressed_values
def __call__(self,features_to_correlate):
from scipy import stats
correlated_genes={}
for uid in features_to_correlate:
ref_values = self.expressed_values[uid]
for uid2 in self.expressed_values:
values = self.expressed_values[uid2]
rho,p = stats.pearsonr(values,ref_values)
if rho>rho_cutoff or rho<-1*rho_cutoff:
if uid!=uid2 and rho != 1.0:
try: correlated_genes[uid].append(uid2)
except Exception: correlated_genes[uid] = [uid]
return correlated_genes
def parseCountFile(fn,parseFeature,search_exon_db):
novel_exon_db={}; firstLine=True
unique_genes={}
for line in open(fn,'rU').xreadlines():
key = string.split(line,'\t')[0]
#t = string.split(line,'\t')
if firstLine: firstLine = False
else:
#uid, coordinates = string.split(key,'=')
#values = map(lambda x: float(x), t[1:])
#gene = string.split(uid,':')[0]
#if max(values)>5: unique_genes[gene] = []
if '_' in key: ### Only look at novel exons
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
uid, coordinates = string.split(key,'=')
gene = string.split(uid,':')[0]
if parseFeature == 'exons':
if '-' not in uid:
chr,coordinates = string.split(coordinates,':') ### Exclude the chromosome
coord1,coord2 = string.split(coordinates,'-')
intron = string.split(uid,'_')[0]
intron = string.split(intron,':')[1]
first = intron+'_'+coord1
second = intron+'_'+coord2
proceed = True
if first in uid: search_uid = second ### if the first ID is already the one looked for, store the second with the exon ID
elif second in uid: search_uid = first
else:
proceed = False
#print uid, first, second; sys.exit()
#example: ENSG00000160785:E2.15_156170151;E2.16_156170178=chr1:156170151-156170178
if proceed:
try: novel_exon_db[gene].append((uid,search_uid))
except Exception: novel_exon_db[gene] = [(uid,search_uid)]
elif '-' in uid and 'I' in uid: ### get junctions
if gene in search_exon_db:
for (u,search_uid) in search_exon_db[gene]:
#if gene == 'ENSG00000137076': print u,search_uid,uid
if search_uid in uid:
novel_exon_db[uid] = u ### Relate the currently examined novel exon ID to the junction not current associated
#if gene == 'ENSG00000137076': print u, uid
#print uid;sys.exit()
#print len(unique_genes); sys.exit()
return novel_exon_db
def getJunctionType(species,fn):
root_dir = string.split(fn,'ExpressionInput')[0]
fn = filepath(root_dir+'AltDatabase/'+species+'/RNASeq/'+species + '_Ensembl_junctions.txt')
firstLine=True
junction_type_db={}; type_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
id=t[0]; junction_type = t[8]
if '-' in id:
if 'trans-splicing' in line:
junction_type = 'trans-splicing'
junction_type_db[id] = junction_type
try: type_db[junction_type]+=1
except Exception: type_db[junction_type]=1
print 'Breakdown of event types'
for type in type_db:
print type, type_db[type]
return junction_type_db
def maxCount(ls):
c=0
for i in ls:
if i>0.5: c+=1
return c
def getHighExpNovelExons(species,fn):
""" Idea - if the ranking of exons based on expression changes from one condition to another, alternative splicing is occuring """
junction_type_db = getJunctionType(species,fn)
### Possible issue detected with novel exon reads: ['ENSG00000121577'] ['119364543'] cardiac
exon_max_exp_db={}; uid_key_db={}; firstLine=True
novel_intronic_junctions = {}
novel_intronic_exons = {}
cutoff = 0.2
read_threshold = 0.5
expressed_junction_types={}
features_to_export={}
exon_coord_db={}
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
if firstLine: firstLine = False
else:
key=t[0]
#ENSG00000112695:I2.1_75953139=chr6:75953139-75953254
try: uid, coordinates = string.split(key,'=')
except Exception: uid = key
gene = string.split(uid,':')[0]
values = map(lambda x: float(x), t[1:])
max_read_counts = max(values)
try: exon_max_exp_db[gene].append((max_read_counts,uid))
except Exception: exon_max_exp_db[gene] = [(max_read_counts,uid)]
uid_key_db[uid] = key ### retain the coordinate info
if '-' in uid and (':E' in uid or '-E' in uid):
junction_type = junction_type_db[uid]
if max_read_counts>read_threshold:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types[junction_type]+=1
except Exception: expressed_junction_types[junction_type]=1
if junction_type == 'trans-splicing' and'_' not in uid:
try: expressed_junction_types['known transplicing']+=1
except Exception: expressed_junction_types['known transplicing']=1
elif junction_type == 'novel' and '_' not in uid:
try: expressed_junction_types['novel but known sites']+=1
except Exception: expressed_junction_types['novel but known sites']=1
elif junction_type == 'novel' and 'I' not in uid:
try: expressed_junction_types['novel but within 50nt of a known sites']+=1
except Exception: expressed_junction_types['novel but within 50nt of a known sites']=1
elif 'I' in uid and '_' in uid and junction_type!='trans-splicing':
#print uid;sys.exit()
try: expressed_junction_types['novel intronic junctions']+=1
except Exception: expressed_junction_types['novel intronic junctions']=1
coord = string.split(uid,'_')[-1]
if '-' in coord:
coord = string.split(coord,'-')[0]
try: novel_intronic_junctions[gene]=[coord]
except Exception: novel_intronic_junctions[gene].append(coord)
elif ('I' in uid or 'U' in uid) and '_' in uid and max_read_counts>read_threshold:
if '-' not in uid:
samples_expressed = maxCount(values)
if samples_expressed>2:
try: expressed_junction_types['novel intronic exon']+=1
except Exception: expressed_junction_types['novel intronic exon']=1
coord = string.split(uid,'_')[-1]
#print uid, coord;sys.exit()
#if 'ENSG00000269897' in uid: print [gene,coord]
try: novel_intronic_exons[gene].append(coord)
except Exception: novel_intronic_exons[gene]=[coord]
exon_coord_db[gene,coord]=uid
print 'Expressed (count>%s for at least 3 samples) junctions' % read_threshold
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
expressed_junction_types={}
#print len(novel_intronic_junctions)
#print len(novel_intronic_exons)
for gene in novel_intronic_junctions:
if gene in novel_intronic_exons:
for coord in novel_intronic_junctions[gene]:
if coord in novel_intronic_exons[gene]:
try: expressed_junction_types['confirmed novel intronic exons']+=1
except Exception: expressed_junction_types['confirmed novel intronic exons']=1
uid = exon_coord_db[gene,coord]
features_to_export[uid]=[]
#else: print [gene], novel_intronic_junctions[gene]; sys.exit()
for junction_type in expressed_junction_types:
print junction_type, expressed_junction_types[junction_type]
out_file = string.replace(fn,'.txt','-highExp.txt')
print 'Exporting the highest expressed exons to:', out_file
out_obj = export.ExportFile(out_file)
### Compare the relative expression of junctions and exons separately for each gene (junctions are more comparable)
for gene in exon_max_exp_db:
junction_set=[]; exon_set=[]; junction_exp=[]; exon_exp=[]
exon_max_exp_db[gene].sort()
exon_max_exp_db[gene].reverse()
for (exp,uid) in exon_max_exp_db[gene]:
if '-' in uid: junction_set.append((exp,uid)); junction_exp.append(exp)
else: exon_set.append((exp,uid)); exon_exp.append(exp)
if len(junction_set)>0:
maxJunctionExp = junction_set[0][0]
try: lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(junction_exp)
except Exception: print junction_exp;sys.exit()
if int_qrt_range>0:
maxJunctionExp = int_qrt_range
junction_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxJunctionExp,cutoff)), junction_set)
high_exp_junctions = []
for (uid,p) in junction_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export: ### novel exons only right now
out_obj.write(uid_key_db[uid]+'\t'+p+'\n') ### write out the original ID with coordinates
if len(exon_set)>0:
maxExonExp = exon_set[0][0]
lower25th,median_val,upper75th,int_qrt_range = statistics.iqr(exon_exp)
if int_qrt_range>0:
maxExonExp = int_qrt_range
exon_percent_exp = map(lambda x: (x[1],expThreshold(x[0]/maxExonExp,cutoff)), exon_set)
high_exp_exons = []
for (uid,p) in exon_percent_exp: ### ID and percentage of expression
if p!='NA':
if uid in features_to_export:
out_obj.write(uid_key_db[uid]+'\t'+p+'\n')
out_obj.close()
def expThreshold(ratio,cutoff):
#print [ratio,cutoff]
if ratio>cutoff: return str(ratio)
else: return 'NA'
def compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir):
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
filtered_dir_db={}
#"""
try: novel_exon_junction_db = getNovelExonCoordinates(species,root_dir)
except Exception:
#print traceback.format_exc()
print 'No counts file found.'
novel_exon_junction_db={} ### only relevant to RNA-Seq analyses
for comparison_file in summary_results_db:
for results_file in dir_list:
if (comparison_file in results_file and '-exon-inclusion-results.txt' in results_file) and ('comparison' not in results_file):
try: filtered_dir_db[comparison_file].append(results_file)
except Exception: filtered_dir_db[comparison_file] = [results_file]
try: os.remove(string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt')
except Exception: pass
for comparison_file in filtered_dir_db:
alt_result_files = filtered_dir_db[comparison_file]
#print alt_result_files, comparison_file
importAltAnalyzeExonResults(alt_result_files,novel_exon_junction_db,results_dir)
#"""
### Build combined clusters of high-confidence exons
graphics2=[]; graphics=[]
import ExpressionBuilder
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExonConfirmed')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
try:
input_dir = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExon/'
cluster_file, rows_in_file = ExpressionBuilder.buildAltExonClusterInputs(input_dir,species,array_type,dataType='AltExon')
if rows_in_file > 5000: useHOPACH = False
else: useHOPACH = True
if rows_in_file < 12000:
graphics2 = ExpressionBuilder.exportHeatmap(cluster_file,useHOPACH=useHOPACH)
except Exception: pass
return graphics+graphics2
class SplicingData:
def __init__(self,score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp,protein_annot,domain_inferred,domain_overlap,method,dataset):
self.score = score; self.dataset = dataset
self.symbol = symbol;
self.description=description;self.exonid=exonid;self.probesets=probesets;self.direction=direction
self.splicing_event=splicing_event;self.external_exon=external_exon;self.genomic_loc=genomic_loc;
self.gene_exp=gene_exp;self.protein_annot=protein_annot;self.domain_inferred=domain_inferred
self.domain_overlap=domain_overlap;self.method=method
def Score(self): return self.score
def setScore(self,score): self.score = score
def GeneExpression(self): return self.gene_exp
def Dataset(self): return self.dataset
def Symbol(self): return self.symbol
def Description(self): return self.description
def ExonID(self): return self.exonid
def appendExonID(self,exonid): self.exonid+='|'+exonid
def Probesets(self): return self.probesets
def ProbesetDisplay(self):
if len(self.Probesets()[1])>0:
return string.join(self.Probesets(),'-')
else:
return self.Probesets()[0]
def ProbesetsSorted(self):
### Don't sort the original list
a = [self.probesets[0],self.probesets[1]]
a.sort()
return a
def Direction(self): return self.direction
def setDirection(self,direction): self.direction = direction
def SplicingEvent(self): return self.splicing_event
def ProteinAnnotation(self): return self.protein_annot
def DomainInferred(self): return self.domain_inferred
def DomainOverlap(self): return self.domain_overlap
def Method(self): return self.method
def setEvidence(self,evidence): self.evidence = evidence
def Evidence(self): return self.evidence
def GenomicLocation(self): return self.genomic_loc
def setExonExpStatus(self, exon_expressed): self.exon_expressed = exon_expressed
def ExonExpStatus(self): return self.exon_expressed
def importAltAnalyzeExonResults(dir_list,novel_exon_junction_db,results_dir):
regulated_critical_exons={}; converted_db={}
includeExonJunctionComps=True ### Allow ASPIRE comparisons with the inclusion feature as an exon to count for additive reciprocal evidence
print "Reading AltAnalyze results file"
root_dir = string.split(results_dir,'AltResults')[0]
for filename in dir_list:
x=0; regulated_critical_exon_temp={}
fn=filepath(results_dir+filename)
new_filename = string.join(string.split(filename,'-')[:-5],'-')
if '_vs_' in filename and '_vs_' in new_filename: export_filename = new_filename
else: export_filename = string.join(string.split(filename,'-')[:-5],'-')
export_path = results_dir+export_filename+'-comparison-evidence.txt'
try: os.remove(filepath(export_path)) ### If we don't do this, the old results get added to the new
except Exception: null=[]
if 'AltMouse' in filename:
altmouse_ensembl_db = importAltMouseEnsembl()
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0: x=1; #print t[12],t[13],t[22],t[23]
else:
converted = False ### Indicates both junction sides were regulated
geneid = t[0]; exonid = t[4]; probeset1 = t[6]; probeset2 = ''; score = t[1][:4]; symbol = t[2]; description = t[3]; regions = t[-4]; direction = t[5]
genomic_loc = t[-1]; splicing_event = t[-3]; external_exon = t[-6]; gene_exp_fold = t[-8]; protein_annot = t[14]; domain_inferred = t[15]; domain_overlap = t[17]
expressed_exon = 'NA'
if 'RNASeq' in filename: expressed_exon = 'no' ### Set by default
if ':' in geneid: geneid = string.split(geneid,':')[0] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if 'FIRMA' in fn: method = 'FIRMA'
elif 'splicing-index' in fn: method = 'splicing-index'
if 'ASPIRE' in filename or 'linearregres' in filename:
f1=float(t[12]); f2=float(t[13]); probeset1 = t[8]; probeset2 = t[10]; direction = t[6]; exonid2 = t[5]; splicing_event = t[-4]
protein_annot = t[19]; domain_inferred = t[20]; domain_overlap = t[24]; method = 'linearregres'; regions = t[-5]
exon1_exp=float(t[-15]); exon2_exp=float(t[-14]); fold1=float(t[12]); fold2=float(t[13])
if fold1<0: fold1 = 1 ### don't factor in negative changes
if fold2<0: fold2 = 1 ### don't factor in negative changes
"""
if 'RNASeq' not in filename:
exon1_exp = math.pow(2,exon1_exp)
exon2_exp = math.log(2,exon2_exp)
m1 = exon1_exp*fold1
m2 = exon2_exp*fold2
max_exp = max([m1,m2])
min_exp = min([m1,m2])
percent_exon_expression = str(min_exp/max_exp)
"""
if 'ASPIRE' in filename: method = 'ASPIRE'; score = t[1][:5]
if '-' not in exonid and includeExonJunctionComps == False:
exonid=None ### Occurs when the inclusion just in an exon (possibly won't indicate confirmation so exclude)
else: exonid = exonid+' vs. '+exonid2
if 'AltMouse' in filename:
try: geneid = altmouse_ensembl_db[geneid]
except Exception: geneid = geneid
if 'RNASeq' not in filename and 'junction' not in filename: regions = string.replace(regions,'-','.')
else:
if 'RNASeq' in filename and '-' not in exonid:
fold = float(t[10]); exon_exp = float(t[18]); gene_exp = float(t[19])
if fold < 0: fold = -1.0/fold
GE_fold = float(gene_exp_fold)
if GE_fold < 0: GE_fold = -1.0/float(gene_exp_fold)
exon_psi1 = abs(exon_exp)/(abs(gene_exp))
exon_psi2 = (abs(exon_exp)*fold)/(abs(gene_exp)*GE_fold)
max_incl_exon_exp = max([exon_psi1,exon_psi2])
#if max_incl_exon_exp>0.20: expressed_exon = 'yes'
expressed_exon = max_incl_exon_exp
#if 'I2.1_75953139' in probeset1:
#print [exon_exp,gene_exp,exon_exp*fold,gene_exp*GE_fold]
#print exon_psi1, exon_psi2;sys.exit()
probesets = [probeset1,probeset2]
if (method == 'splicing-index' or method == 'FIRMA') and ('-' in exonid) or exonid == None:
pass #exclude junction IDs
else:
regions = string.replace(regions,';','|')
regions = string.replace(regions,'-','|')
regions = string.split(regions,'|')
for region in regions:
if len(region) == 0:
try: region = t[17]+t[18] ### For junction introns where no region ID exists
except Exception: null=[]
if ':' in region: region = string.split(region,':')[-1] ### User reported that gene:gene was appearing and not sure exactly where or why but added this to address it
if probeset1 in novel_exon_junction_db:
uid = novel_exon_junction_db[probeset1] ### convert the uid (alternative exon) to the annotated ID for the novel exon
converted_db[uid] = probeset1
else:
uid = geneid+':'+region
ss = SplicingData(score,symbol,description,exonid,probesets,direction,splicing_event,external_exon,genomic_loc,gene_exp_fold,protein_annot,domain_inferred,domain_overlap,method,filename)
ss.setExonExpStatus(str(expressed_exon))
try: regulated_critical_exon_temp[uid].append(ss)
except Exception: regulated_critical_exon_temp[uid] = [ss]
#print filename, len(regulated_critical_exon_temp)
for uid in regulated_critical_exon_temp:
report=None
if len(regulated_critical_exon_temp[uid])>1:
### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
scores=[]
for ss in regulated_critical_exon_temp[uid]: scores.append((float(ss.Score()),ss))
scores.sort()
if (scores[0][0]*scores[-1][0])<0:
ss1 = scores[0][1]; ss2 = scores[-1][1]
if ss1.ProbesetsSorted() == ss2.ProbesetsSorted(): ss1.setDirection('mutual') ### same exons, hence, mutually exclusive event (or similiar)
else: ss1.setDirection('both') ### opposite directions in the same comparison-file, hence, conflicting data
report=[ss1]
else:
if abs(scores[0][0])>abs(scores[-1][0]): report=[scores[0][1]]
else: report=[scores[-1][1]]
else:
report=regulated_critical_exon_temp[uid]
### Combine data from different analysis files
try: regulated_critical_exons[uid]+=report
except Exception: regulated_critical_exons[uid]=report
"""if 'ENSG00000204120' in uid:
print uid,
for i in regulated_critical_exon_temp[uid]:
print i.Probesets(),
print ''
"""
try: report[0].setEvidence(len(regulated_critical_exon_temp[uid])) ###set the number of exons demonstrating regulation of this exons
except Exception: null=[]
clearObjectsFromMemory(regulated_critical_exon_temp)
export_data,status = AppendOrWrite(export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location'],'\t')+'\n'
export_data.write(header)
combined_export_path = string.split(results_dir,'AltResults')[0]+'AltResults/Clustering/Combined-junction-exon-evidence.txt'
combined_export_data, status= AppendOrWrite(combined_export_path)
if status == 'not found':
header = string.join(['uid','source-IDs','symbol','description','exonids','independent confirmation','score','regulation direction','alternative exon annotations','associated isoforms','inferred regulated domains','overlapping domains','method','supporting evidence score','novel exon: high-confidence','percent exon expression of gene','differential gene-expression','genomic location','comparison'],'\t')+'\n'
combined_export_data.write(header)
print len(regulated_critical_exons), 'regulated exon IDs imported.\n'
print 'writing:',export_path; n=0
# print [len(converted_db)]
### Check for alternative 3' or alternative 5' exon regions that were not matched to the right reciprocal junctions (occurs because only one of the exon regions is called alternative)
regulated_critical_exons_copy={}
for uid in regulated_critical_exons:
regulated_critical_exons_copy[uid]=regulated_critical_exons[uid]
u=0
### This is most applicable to RNA-Seq since the junction IDs correspond to the Exon Regions not the probeset Exon IDs
for uid in regulated_critical_exons_copy: ### Look through the copied version since we can't delete entries while iterating through
ls = regulated_critical_exons_copy[uid]
u+=1
#if u<20: print uid
for jd in ls:
if jd.Method() != 'splicing-index' and jd.Method() != 'FIRMA':
try: ### Applicable to RNA-Seq
gene,exonsEx = string.split(jd.Probesets()[1],':') ### Exclusion probeset will have the exon not annotated as the critical exon (although it should be as well)
gene,exonsIn = string.split(jd.Probesets()[0],':')
except Exception:
gene, ce = string.split(uid,':')
exonsIn, exonsEx = string.split(jd.ExonID(),'vs.')
if gene !=None:
critical_exon = None
five_prime,three_prime = string.split(exonsEx,'-')
try: five_primeIn,three_primeIn = string.split(exonsIn,'-')
except Exception: five_primeIn = exonsIn; three_primeIn = exonsIn ### Only should occur during testing when a exon rather than junction ID is considered
#if gene == 'ENSG00000133083': print five_prime,three_prime, five_primeIn,three_primeIn
if five_primeIn == five_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+three_prime
exonid = three_prime
elif three_primeIn == three_prime: ### Hence, the exclusion 3' exon should be added
critical_exon = gene+':'+five_prime
exonid = five_prime
else:
if ('5' in jd.SplicingEvent()) or ('five' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('3' in jd.SplicingEvent()) or ('three' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
elif ('alt-N-term' in jd.SplicingEvent()) or ('altPromoter' in jd.SplicingEvent()):
critical_exon = gene+':'+five_prime
exonid = five_prime
elif ('alt-C-term' in jd.SplicingEvent()):
critical_exon = gene+':'+three_prime
exonid = three_prime
#print critical_exon, uid, jd.ExonID(),jd.SplicingEvent(); sys.exit()
if critical_exon != None:
if critical_exon in regulated_critical_exons:
#print uid, critical_exon; sys.exit()
if len(regulated_critical_exons[critical_exon]) == 1:
if len(ls)==1 and uid in regulated_critical_exons: ### Can be deleted by this method
if 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID() and 'vs.' not in regulated_critical_exons[critical_exon][0].ExonID():
regulated_critical_exons[uid].append(regulated_critical_exons[critical_exon][0])
del regulated_critical_exons[critical_exon]
elif uid in regulated_critical_exons: ###If two entries already exit
ed = regulated_critical_exons[uid][1]
ed2 = regulated_critical_exons[critical_exon][0]
if 'vs.' not in ed.ExonID() and 'vs.' not in ed2.ExonID():
if ed.Direction() != ed2.Direction(): ### should be opposite directions
ed.appendExonID(exonid)
ed.setEvidence(ed.Evidence()+1)
ed.setScore(ed.Score()+'|'+ed2.Score())
del regulated_critical_exons[critical_exon]
firstEntry=True
for uid in regulated_critical_exons:
if uid in converted_db:
converted = True
else: converted = False
#if 'ENSG00000133083' in uid: print [uid]
exon_level_confirmation = 'no'
ls = regulated_critical_exons[uid]
jd = regulated_critical_exons[uid][0] ### We are only reporting one here and that's OK, since we are only reporting the top scores... won't include all inclusion junctions.
if len(ls)>1:
methods = []; scores = []; direction = []; exonids = []; probesets = []; evidence = 0; genomic_location = []
junctionids=[]
junction_data_found = 'no'; exon_data_found = 'no'
for jd in ls:
if jd.Method() == 'ASPIRE' or jd.Method() == 'linearregres':
junction_data_found = 'yes'
methods.append(jd.Method())
scores.append(jd.Score())
direction.append(jd.Direction())
exonids.append(jd.ExonID())
junctionids.append(jd.ExonID())
probesets.append(jd.ProbesetDisplay())
evidence+=jd.Evidence()
genomic_location.append(jd.GenomicLocation())
### Prefferentially obtain isoform annotations from the reciprocal analysis which is likely more accurate
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
for ed in ls:
if ed.Method() == 'splicing-index' or ed.Method() == 'FIRMA':
exon_data_found = 'yes' ### pick one of them
methods.append(ed.Method())
scores.append(ed.Score())
direction.append(ed.Direction())
exonids.append(ed.ExonID())
probesets.append(ed.ProbesetDisplay())
evidence+=ed.Evidence()
genomic_location.append(ed.GenomicLocation())
#isoform_annotations = [ed.ProteinAnnotation(), ed.DomainInferred(), ed.DomainOverlap()]
if junction_data_found == 'yes' and exon_data_found == 'yes':
exon_level_confirmation = 'yes'
for junctions in junctionids:
if 'vs.' in junctions:
j1 = string.split(junctions,' vs. ')[0] ### inclusion exon or junction
if '-' not in j1: ### not a junction, hence, may not be sufficient to use for confirmation (see below)
if 'I' in j1: ### intron feature
if '_' in j1: ### novel predicted exon
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'yes'
else:
if '_' in j1:
exon_level_confirmation = 'no'
else:
exon_level_confirmation = 'partial'
method = string.join(methods,'|')
unique_direction = unique.unique(direction)
genomic_location = unique.unique(genomic_location)
if len(unique_direction) == 1: direction = unique_direction[0]
else: direction = string.join(direction,'|')
score = string.join(scores,'|')
probesets = string.join(probesets,'|')
exonids_unique = unique.unique(exonids)
if len(exonids_unique) == 1: exonids = exonids_unique[0]
else: exonids = string.join(exonids,'|')
if len(genomic_location) == 1: genomic_location = genomic_location[0]
else: genomic_location = string.join(genomic_location,'|')
evidence = str(evidence)
if 'mutual' in direction: direction = 'mutual'
if len(ls) == 1:
probesets = jd.ProbesetDisplay()
direction = jd.Direction()
score = jd.Score()
method = jd.Method()
exonids = jd.ExonID()
evidence = jd.Evidence()
genomic_location = jd.GenomicLocation()
isoform_annotations = [jd.ProteinAnnotation(), jd.DomainInferred(), jd.DomainOverlap()]
try:
#if int(evidence)>4 and 'I' in uid: novel_exon = 'yes' ### high-evidence novel exon
#else: novel_exon = 'no'
if converted == True:
novel_exon = 'yes'
splicing_event = 'cassette-exon'
else:
novel_exon = 'no'
splicing_event = jd.SplicingEvent()
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location]
values = string.join(values,'\t')+'\n'
#if 'yes' in exon_level_confirmation:
export_data.write(values); n+=1
if exon_level_confirmation != 'no' and ('|' not in direction):
geneID = string.split(uid,':')[0]
try: relative_exon_exp = float(jd.ExonExpStatus())
except Exception: relative_exon_exp = 1
if firstEntry:
### Also export high-confidence predictions for GO-Elite
elite_export_path = string.split(results_dir,'AltResults')[0]+'GO-Elite/AltExonConfirmed/'+export_filename+'-junction-exon-evidence.txt'
elite_export_data = export.ExportFile(elite_export_path)
elite_export_data.write('GeneID\tEn\tExonID\tScores\tGenomicLocation\n')
firstEntry = False
if relative_exon_exp>0.10:
elite_export_data.write(string.join([geneID,'En',uid,score,genomic_location],'\t')+'\n')
#if 'DNA' in isoform_annotations[-1]:
if '2moter' not in jd.SplicingEvent() and '2lt-N' not in jd.SplicingEvent():
values = [uid, probesets, jd.Symbol(), jd.Description(), exonids, exon_level_confirmation, score, direction, splicing_event]
values += isoform_annotations+[method, str(evidence),novel_exon,jd.ExonExpStatus(),jd.GeneExpression(),genomic_location,export_filename]
values = string.join(values,'\t')+'\n'
combined_export_data.write(values)
except Exception, e:
#print traceback.format_exc();sys.exit()
pass ### Unknown error - not evaluated in 2.0.8 - isoform_annotations not referenced
print n,'exon IDs written to file.'
export_data.close()
try: elite_export_data.close()
except Exception: pass
clearObjectsFromMemory(regulated_critical_exons)
clearObjectsFromMemory(regulated_critical_exons_copy)
#print '!!!!Within comparison evidence'
#returnLargeGlobalVars()
def runKallisto(species,dataset_name,root_dir,fastq_folder,returnSampleNames=False):
#print 'Running Kallisto...please be patient'
import subprocess
#if '/bin' in kallisto_dir: kallisto_file = kallisto_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
kallisto_dir= 'AltDatabase/kallisto/0.42.1/'
if os.name == 'nt':
kallisto_file = kallisto_dir + 'PC/bin/kallisto.exe'; plat = 'Windows'
elif 'darwin' in sys.platform:
kallisto_file = kallisto_dir + 'Mac/bin/kallisto'; plat = 'MacOSX'
elif 'linux' in sys.platform:
kallisto_file = kallisto_dir + '/Linux/bin/kallisto'; plat = 'linux'
print 'Using',kallisto_file
kallisto_file = filepath(kallisto_file)
kallisto_root = string.split(kallisto_file,'bin/kallisto')[0]
fn = filepath(kallisto_file)
output_dir=root_dir+'/ExpressionInput/kallisto/'
try: os.mkdir(root_dir+'/ExpressionInput')
except Exception: pass
try: os.mkdir(root_dir+'/ExpressionInput/kallisto')
except Exception: pass
fastq_folder += '/'
dir_list = read_directory(fastq_folder)
fastq_paths = []
for file in dir_list:
file_lower = string.lower(file)
if 'fastq' in file_lower and '._' not in file[:4]: ### Hidden files
fastq_paths.append(fastq_folder+file)
fastq_paths,paired = findPairs(fastq_paths)
### Check to see if Kallisto files already exist and use these if so (could be problematic but allows for outside quantification)
kallisto_tsv_paths=[]
dir_list = read_directory(output_dir)
for folder in dir_list:
kallisto_outdir = output_dir+folder+'/abundance.tsv'
status = os.path.isfile(kallisto_outdir)
if status:
kallisto_tsv_paths.append(fastq_folder+file)
if returnSampleNames:
return fastq_paths
indexFile = kallisto_root+species
indexStatus = os.path.isfile(indexFile)
if indexStatus == False:
try: fasta_file = getFASTAFile(species)
except Exception: fasta_file = None
if fasta_file==None:
###download Ensembl fasta file to the above directory
import EnsemblSQL
ensembl_version = string.replace(unique.getCurrentGeneDatabaseVersion(),'EnsMart','')
EnsemblSQL.getEnsemblTranscriptSequences(ensembl_version,species,restrictTo='cDNA')
fasta_file = getFASTAFile(species)
if fasta_file!=None:
print 'Building kallisto index file...'
try: retcode = subprocess.call([kallisto_file, "index","-i", kallisto_root+species, fasta_file])
except Exception:
print traceback.format_exc()
### If installed globally
retcode = subprocess.call(['kallisto', "index","-i", kallisto_root+species, fasta_file])
if len(kallisto_tsv_paths) == len(fastq_paths):
reimportExistingKallistoOutput = True
elif len(kallisto_tsv_paths) > len(fastq_paths):
reimportExistingKallistoOutput = True ### If working with a directory of kallisto results
else:
reimportExistingKallistoOutput = False
print reimportExistingKallistoOutput
if reimportExistingKallistoOutput:
### Just get the existing Kallisto output folders
fastq_paths = read_directory(output_dir)
kallisto_folders=[]
expMatrix={}
countMatrix={}
sample_total_counts={}
headers=['UID']
for n in fastq_paths:
output_path = output_dir+n
kallisto_folders.append(output_path)
if reimportExistingKallistoOutput == False:
begin_time = time.time()
print 'Running kallisto on:',n,
p=fastq_paths[n]
b=[" > "+n+'.sam']
#"""
if paired == 'paired':
try:
#retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--pseudobam"]+p+b)
retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path]+p)
except Exception:
print traceback.format_exc()
retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path]+p)
else:
if os.name == 'nt':
try:
try: retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--single","-l","200"]+p)
except Exception: retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--single","-l","200","-s","20"]+p)
except Exception:
try: retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path,"--single","-l","200"]+p)
except Exception:
retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path,"--single","-l","200","-s","20"]+p)
else:
try: retcode = subprocess.call([kallisto_file, "quant","-i", indexFile, "-o", output_path,"--single","-l","200","-s","20"]+p)
except Exception:
retcode = subprocess.call(['kallisto', "quant","-i", indexFile, "-o", output_path,"--single","-l","200","-s","20"]+p)
if retcode == 0: print 'completed in', int(time.time()-begin_time), 'seconds'
else: print 'kallisto failed due to an unknown error (report to altanalyze.org help).'
#"""
input_path = output_path+'/abundance.txt'
try:
try: expMatrix,countMatrix=importTPMs(n,input_path,expMatrix,countMatrix)
except Exception:
input_path = output_path+'/abundance.tsv'
expMatrix,countMatrix=importTPMs(n,input_path,expMatrix,countMatrix)
headers.append(n)
sample_total_counts = importTotalReadCounts(n,output_path+'/run_info.json',sample_total_counts)
except Exception:
print traceback.format_exc();sys.exit()
print n, 'TPM expression import failed'
if paired == 'paired':
print '\n...Make sure the paired-end samples were correctly assigned:'
for i in fastq_paths:
print 'Common name:',i,
for x in fastq_paths[i]:
print export.findParentDir(x),
print '\n'
### Summarize alignment information
for sample in countMatrix:
try: estCounts = int(float(countMatrix[sample]))
except Exception: estCounts='NA'
try: totalCounts = sample_total_counts[sample]
except Exception: totalCounts = 'NA'
try: aligned = str(100*estCounts/float(totalCounts))
except Exception: aligned = 'NA'
try: aligned = string.split(aligned,'.')[0]+'.'+string.split(aligned,'.')[1][:2]
except Exception: aligned = 'NA'
countMatrix[sample] = [str(estCounts),totalCounts,aligned]
dataset_name = string.replace(dataset_name,'exp.','')
to = export.ExportFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt')
go = export.ExportFile(root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
so = export.ExportFile(root_dir+'/ExpressionInput/summary.'+dataset_name+'.txt')
exportMatrix(to,headers,expMatrix) ### Export transcript expression matrix
try:
geneMatrix = calculateGeneTPMs(species,expMatrix) ### calculate combined gene level TPMs
exportMatrix(go,headers,geneMatrix) ### export gene expression matrix
except Exception:
print 'AltAnalyze was unable to summarize gene TPMs from transcripts, proceeding with transcripts.'
export.copyFile(root_dir+'/ExpressionInput/transcript.'+dataset_name+'.txt',root_dir+'/ExpressionInput/exp.'+dataset_name+'.txt')
exportMatrix(so,['SampleID','Estimated Counts','Total Fragments','Percent Aligned'],countMatrix) ### export gene expression matrix
def calculateGeneTPMs(species,expMatrix):
import gene_associations
try:
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
if len(gene_to_transcript_db)==0:
kill
except Exception:
try:
print 'Missing transcript-to-gene associations... downloading from Ensembl.'
import EnsemblSQL
db_version = unique.getCurrentGeneDatabaseVersion()
EnsemblSQL.getGeneTranscriptOnly(species,'Basic',db_version,'yes')
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
except Exception:
import GeneSetDownloader
print 'Ensembl-EnsTranscripts required for gene conversion... downloading from the web...'
GeneSetDownloader.remoteDownloadEnsemblTranscriptAssocations(species)
gene_to_transcript_db = gene_associations.getGeneToUid(species,('hide','Ensembl-EnsTranscript'))
import OBO_import
transcript_to_gene_db = OBO_import.swapKeyValues(gene_to_transcript_db)
gene_matrix = {}
present_gene_transcripts={}
for transcript in expMatrix:
if transcript in transcript_to_gene_db:
gene = transcript_to_gene_db[transcript][0]
try: present_gene_transcripts[gene].append(transcript)
except Exception: present_gene_transcripts[gene] = [transcript]
else: pass ### could keep track of the missing transcripts
for gene in present_gene_transcripts:
gene_values = []
for transcript in present_gene_transcripts[gene]:
gene_values.append(map(float,expMatrix[transcript]))
gene_tpms = [sum(value) for value in zip(*gene_values)] ### sum of all transcript tmp's per sample
gene_tpms = map(str,gene_tpms)
gene_matrix[gene] = gene_tpms
return gene_matrix
def exportMatrix(eo,headers,matrix):
eo.write(string.join(headers,'\t')+'\n')
for gene in matrix:
eo.write(string.join([gene]+matrix[gene],'\t')+'\n')
eo.close()
def importTPMs(sample,input_path,expMatrix,countMatrix):
firstLine=True
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine:
firstLine=False
header = string.split(data,'\t')
else:
target_id,length,eff_length,est_counts,tpm = string.split(data,'\t')
try: expMatrix[target_id].append(tpm)
except Exception: expMatrix[target_id]=[tpm]
try: countMatrix[sample]+=float(est_counts)
except Exception: countMatrix[sample]=float(est_counts)
return expMatrix,countMatrix
def importTotalReadCounts(sample,input_path,sample_total_counts):
### Import from Kallisto Json file
for line in open(input_path,'rU').xreadlines():
data = cleanUpLine(line)
if "n_processed: " in data:
total = string.split(data,"n_processed: ")[1]
total = string.split(total,',')[0]
sample_total_counts[sample]=total
return sample_total_counts
def findPairs(fastq_paths):
#fastq_paths = ['/Volumes/test/run0718_lane12_read1_index701=Kopan_RBP_02_14999.fastq.gz','/Volumes/run0718_lane12_read2_index701=Kopan_RBP_02_14999.fastq.gz']
import export
read_notation=0
under_suffix_notation=0
suffix_notation=0
equal_notation=0
suffix_db={}
for i in fastq_paths:
if 'read1' in i or 'read2' in i or 'pair1' in i or 'pair2' or 'R1' in i or 'R2' in i:
read_notation+=1
f = export.findFilename(i)
if 'fastq' in f:
name = string.split(f,'fastq')[0]
elif 'FASTQ' in f:
name = string.split(f,'FASTQ')[0]
elif 'fq' in f:
name = string.split(f,'fq')[0]
if '_1.' in name or '_2.' in name:
under_suffix_notation+=1
elif '1.' in name or '2.' in name:
suffix_notation+=1
suffix_db[name[-2:]]=[]
if '=' in name:
equal_notation+=1
if read_notation==0 and suffix_notation==0 and under_suffix_notation==0:
new_names={}
for i in fastq_paths:
if '/' in i or '\\' in i:
n = export.findFilename(i)
if '=' in n:
n = string.split(n,'=')[1]
new_names[n] = [i]
### likely single-end samples
return new_names, 'single'
else:
new_names={}
paired = 'paired'
if equal_notation==len(fastq_paths):
for i in fastq_paths:
name = string.split(i,'=')[-1]
name = string.replace(name,'.fastq.gz','')
name = string.replace(name,'.fastq','')
name = string.replace(name,'.FASTQ.gz','')
name = string.replace(name,'.FASTQ','')
name = string.replace(name,'.fq.gz','')
name = string.replace(name,'.fq','')
if '/' in name or '\\' in name:
name = export.findFilename(name)
if '=' in name:
name = string.split(name,'=')[1]
try: new_names[name].append(i)
except Exception: new_names[name]=[i]
else:
for i in fastq_paths:
if suffix_notation == len(fastq_paths) and len(suffix_db)==2: ### requires that files end in both .1 and .2
pairs = ['1.','2.']
else:
pairs = ['-read1','-read2','-pair1','-pair2','_read1','_read2','_pair1','_pair2','read1','read2','pair1','pair2','_1.','_2.','_R1','_R2','-R1','-R2','R1','R2']
n=str(i)
n = string.replace(n,'fastq.gz','')
n = string.replace(n,'fastq','')
for p in pairs: n = string.replace(n,p,'')
if '/' in n or '\\' in n:
n = export.findFilename(n)
if '=' in n:
n = string.split(n,'=')[1]
if n[-1]=='.':
n = n[:-1] ###remove the last decimal
try: new_names[n].append(i)
except Exception: new_names[n]=[i]
for i in new_names:
if len(new_names[i])>1:
pass
else:
paired = 'single'
return new_names, paired
def getFASTAFile(species):
fasta_file=None
fasta_folder = 'AltDatabase/'+species+'/SequenceData/'
dir_list = read_directory(filepath(fasta_folder))
for file in dir_list:
if '.fa' in file: fasta_file = filepath(fasta_folder+file)
return fasta_file
if __name__ == '__main__':
samplesDiffering = 3
column_method = 'hopach'
species = 'Hs'
excludeCellCycle = False
platform = 'RNASeq'; graphic_links=[('','/Volumes/HomeBackup/CCHMC/PBMC-10X/ExpressionInput/SamplePrediction/DataPlots/Clustering-33k_CPTT_matrix-CORRELATED-FEATURES-iterFilt-hierarchical_cosine_cosine.txt')]
"""
graphic_links,new_results_file = correlateClusteredGenes(platform,graphic_links[-1][-1][:-4]+'.txt',
numSamplesClustered=samplesDiffering,excludeCellCycle=excludeCellCycle,graphics=graphic_links,
ColumnMethod=column_method, transpose=True, includeMoreCells=True)
"""
#sys.exit()
species='Hs'; platform = "3'array"; vendor = "3'array"
import UI; import multiprocessing as mlp
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(1,50,4,4,
True,'gene','protein_coding',False,'cosine','hopach',0.4)
#expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/test/Original/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Volumes/My Passport/salomonis2/SRP042161_GBM-single-cell/bams/ExpressionInput/exp.GBM_scRNA-Seq-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', "RNASeq", expFile, mlp, parameters=gsp);sys.exit()
filename = '/Volumes/SEQ-DATA/Jared/ExpressionInput/counts.CM-steady-state.txt'
#fastRPKMCalculate(filename);sys.exit()
calculateRPKMsFromGeneCounts(filename,'Hs',AdjustExpression=True);sys.exit()
#copyICGSfiles('','');sys.exit()
runKallisto('Hs','scRNA-Seq','/Users/saljh8/kallisto_files/','/Users/saljh8/kallisto_files/');sys.exit()
import multiprocessing as mlp
import UI
species='Mm'; platform = "3'array"; vendor = 'Ensembl'
gsp = UI.GeneSelectionParameters(species,platform,vendor)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setJustShowTheseIDs('')
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(0,0,1.5,3,
False,'AltExon','protein_coding',False,'cosine','hopach',0.35)
#gsp.setSampleDiscoveryParameters(1,1,4,3, True,'Gene','protein_coding',False,'cosine','hopach',0.5)
filename = '/Volumes/SEQ-DATA/AML_junction/AltResults/AlternativeOutput/Hs_RNASeq_top_alt_junctions-PSI-clust.txt'
#fastRPKMCalculate(filename);sys.exit()
results_file = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/DataPlots/400 fold for at least 4 samples/Clustering-myeloblast-steady-state-correlated-features-hierarchical_euclidean_cosine-hopach.txt'
guideGeneFile = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/ExpressionInput/drivingTFs-symbol.txt'
expFile = '/Users/saljh8/Desktop/Grimes/KashishNormalization/3-25-2015/ExpressionInput/exp.CombinedSingleCell_March_15_2015.txt'
expFile = '/Users/saljh8/Desktop/dataAnalysis/Mm_Kiddney_tubual/ExpressionInput/exp.E15.5_Adult_IRI Data-output.txt'
expFile = '/Users/saljh8/Desktop/PCBC_MetaData_Comparisons/temp/C4Meth450-filtered-SC-3_regulated.txt'
expFile = '/Volumes/SEQ-DATA/Grimeslab/TopHat/AltResults/AlternativeOutput/Mm_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
expFile = '/Users/saljh8/Documents/Leucegene_TargetPSIFiles/exp.TArget_psi_noif_uncorr_03-50missing-12high.txt'
expFile = '/Volumes/BOZEMAN2015/Hs_RNASeq_top_alt_junctions-PSI-clust-filter.txt'
singleCellRNASeqWorkflow('Hs', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Desktop/Grimes/AltSplice/Gmp-cluster-filter.txt'
#singleCellRNASeqWorkflow('Mm', "exons", expFile, mlp, exp_threshold=0, rpkm_threshold=0, parameters=gsp);sys.exit()
#expFile = '/Users/saljh8/Downloads/methylation/ExpressionInput/exp.female-steady-state.txt'
#singleCellRNASeqWorkflow('Hs', 'RNASeq', expFile, mlp, exp_threshold=50, rpkm_threshold=5) # drivers=guideGeneFile)
#sys.exit()
#correlateClusteredGenes(results_file);sys.exit()
#reformatExonFile('Hs','exon',True);sys.exit()
filename = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ExpressionInput/counts.C4.txt'
#fastRPKMCalculate(filename);sys.exit()
file1 = '/Volumes/My Passport/dataAnalysis/CardiacRNASeq/BedFiles/ExpressionInput/exp.CardiacRNASeq.txt'
file2 = '/Volumes/Time Machine Backups/dataAnalysis/PCBC_Sep2013/C4-reference/ReferenceComps/ExpressionInput/counts.C4.txt'
#getHighExpNovelExons('Hs',file1);sys.exit()
#mergeCountFiles(file1,file2); sys.exit()
import UI
test_status = 'yes'
data_type = 'ncRNA'
data_type = 'mRNA'
array_type = 'RNASeq'
array_type = 'junction'
species = 'Hs' ### edit this
summary_results_db = {}
root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/Untreated_MS-analysis/'
#root_dir = '/Volumes/Time Machine Backups/dataAnalysis/Human Blood/Exon/Multiple Sclerosis/2-3rds_training-untreated/'
root_dir = '/Volumes/SEQ-DATA/Grimes/14018_gmp-pro/400-original/'
#root_dir = '/Volumes/My Passport/dataAnalysis/PCBC_Dec2013/All/bedFiles/'
root_dir = '/Users/saljh8/Desktop/dataAnalysis/HTA2.0 Files/'
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-ASPIRE-exon-inclusion-results.txt'] = [] ### edit this
#summary_results_db['Hs_Junction_d14_vs_d7.p5_average-splicing-index-exon-inclusion-results.txt'] = [] ### edit this
results_dir = root_dir +'AltResults/AlternativeOutput/'
dir_list = read_directory(results_dir)
for i in dir_list:
if '_average' in i:
comparison, end = string.split(i,'_average')
if '-exon-inclusion-results.txt' in i: summary_results_db[comparison]=[]
compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir); sys.exit()
fl = UI.ExpressionFileLocationData('','','',''); fl.setCELFileDir(loc); fl.setRootDir(loc)
exp_file_location_db={}; exp_file_location_db['test']=fl
alignJunctionsToEnsembl(species,exp_file_location_db,'test'); sys.exit()
getEnsemblAssociations(species,data_type,test_status,'yes'); sys.exit()
| [
"nsalomonis@gmail.com"
] | nsalomonis@gmail.com |
c7a6d37da7b8e7990c140909ac230c2be4083302 | 31ee112d47d3a2b2383498646eff5eb8c7368465 | /collective/scss/stylesheet.py | b1f5bf14ca3b9447bed8f60b56a3b2af06a037bd | [] | no_license | collective/collective.scss | acf7f8313d4584e0a0e2c08756f5bf8ed639149c | b4d86613d3be1d433a118033395416e84df4ad54 | refs/heads/master | 2023-07-15T10:01:19.302332 | 2011-11-25T23:34:21 | 2011-11-25T23:34:21 | 2,681,508 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from Products.Five.browser import BrowserView
from scss import parser
class SCSSView(BrowserView):
"""SCSS base stylesheet view"""
def __call__(self):
# defer to index method, because that's what gets overridden by the template ZCML attribute
scss = self.index().encode('utf-8')
p = parser.Stylesheet()
css = str(p.loads(scss))
self.request.response.setHeader("Content-type", "text/css")
return css
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
6b5d92329e27f257d4c76bf12497abf7ddf3c060 | 2628a2f1723e19f3c88e40684f7ce7518470f358 | /product.py | fe651d0bd24e6c65b7e86b197c30a95dddbaf033 | [] | no_license | Somi-Singh/python_loop_Questions | 0f3dc5d821a3eb90a7bc47733a947921b2d89f90 | 67b224dbdc76f00de6d90216b99d26c7e7eed7d3 | refs/heads/main | 2023-06-08T07:03:46.624140 | 2021-07-01T10:48:49 | 2021-07-01T10:48:49 | 381,999,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | num=int(input("enter any num"))
i=1
product=1
while i<=num:
product=product*i
i+=1
print(product) | [
"noreply@github.com"
] | Somi-Singh.noreply@github.com |
4ae44a52ae0a90f5ce6d3046a79f21566eb04efa | 22dcd52b6a07e82e8db9bf8b7ad38711d12f69a8 | /venv/Lib/site-packages/sklearn/utils/seq_dataset.py | 738c8a6be16c864aebeade2eb9ded721332807e8 | [] | no_license | MrGreenPepper/music_cluster | 9060d44db68ae5e085a4f2c78d36868645432d43 | af5383a7b9c68d04c16c1086cac6d2d54c3e580c | refs/heads/main | 2023-08-15T09:14:50.630105 | 2021-10-01T09:45:47 | 2021-10-01T09:45:47 | 412,407,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _seq_dataset
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.utils.seq_dataset'
correct_import_path = 'sklearn.utils'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_seq_dataset, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| [
"sebastian_truemper@posteo.de"
] | sebastian_truemper@posteo.de |
564fd5a85ad260577bd5d9828960654f8cfe79e5 | 0e448933dd67f9233e8dbd198ea6d27e6afd67d9 | /account/forms.py | 0a610959a986d3a1036d3f5c2db086249e5f414e | [] | no_license | achiengcindy/bookmarks | 1ef1d301508eb45775f724b6da2b343df8543fbb | e4c47b02f13bd55c2773775bc4843f63efa5264f | refs/heads/master | 2021-08-27T20:28:32.644574 | 2017-11-28T07:32:04 | 2017-11-28T07:32:04 | 111,144,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from django.contrib.auth.models import User
from django import forms
from .models import Profile
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password',
widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password',widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'first_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
#user edit profile
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo') | [
"achiengcindy36@gmail.com"
] | achiengcindy36@gmail.com |
80d95487a6b28adfb22e35b787fdbcbc59a88fdc | c7edeca1b88a769247476368a25326d61b1d2b66 | /backend/captchapays_28298/settings.py | df19b6a88d56cb7013391b4fc6b76d8507bbeecb | [] | no_license | crowdbotics-apps/captchapays-28298 | 3bcb7847d7dd01de25a3364addf9e39d85df2600 | 6ac32f590a4a11d00ec2a0da5d19cbbd60ac704c | refs/heads/master | 2023-06-04T21:24:02.480428 | 2021-06-29T13:33:39 | 2021-06-29T13:33:39 | 381,376,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | """
Django settings for captchapays_28298 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'captchapays_28298.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'captchapays_28298.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
bf7109f50229483de9c56643983853642b4a9be2 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/compute/azure-mgmt-compute/generated_samples/availability_set_list_minimum_set_gen.py | 48e735b6f464e2d63ad6b9743a6a88a861b86956 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 1,595 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-compute
# USAGE
python availability_set_list_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ComputeManagementClient(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.availability_sets.list(
resource_group_name="rgcompute",
)
for item in response:
print(item)
# x-ms-original-file: specification/compute/resource-manager/Microsoft.Compute/ComputeRP/stable/2022-11-01/examples/availabilitySetExamples/AvailabilitySet_List_MinimumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
fd236e862a9bfd7c3bfe8192e30f9f77cfa9536d | cbb450f658bec796e26061bdeafcd1cc44ee1159 | /fzhtzj/fjhtzj/apps/news/migrations/0001_initial.py | e8f29fe37d553fa16e883eac1c3fe7a915ccbd1c | [] | no_license | ylz1990/htzj | 4ddd4701ead6a49d254ff6df94d9db6a69066a79 | 6bf1192032b985a484dc9b2221f17410eb4475dc | refs/heads/master | 2022-01-06T16:11:46.918336 | 2019-07-10T06:33:34 | 2019-07-10T06:33:34 | 192,693,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # Generated by Django 2.2 on 2019-06-27 08:32
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='标题', max_length=150, verbose_name='标题')),
('tags', models.CharField(help_text='新闻类型', max_length=200, verbose_name='新闻类型')),
('content', tinymce.models.HTMLField()),
('create_date', models.DateTimeField(auto_now=True, help_text='发布时间', verbose_name='发布时间')),
],
options={
'ordering': ['-create_date', '-id'],
'verbose_name': '新闻',
'verbose_name_plural': '新闻',
'db_table': 'tb_news',
},
),
]
| [
"pyvip@Vip.tz.cn"
] | pyvip@Vip.tz.cn |
cdea9475506ef06acc12eeb3f1d3d8b27d47ca55 | f26937e8cd0b07589ba1cf6275596d97488cda7e | /scrapySpider/mongoTest/build/lib/mongoTest/items.py | 29119a5da459eab04fe94c97ff82d545bd0fe6ff | [] | no_license | HezhouW/hive | 4aa46a045d22de121e2903075e74c3c9fd75ec1f | 3a7de0c18cbe0ec81e0b40c3217dd5b1a15cf464 | refs/heads/master | 2022-02-27T04:52:42.704501 | 2019-05-24T02:40:49 | 2019-05-24T02:40:49 | 123,524,369 | 1 | 0 | null | 2018-03-02T03:18:07 | 2018-03-02T03:18:07 | null | UTF-8 | Python | false | false | 349 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MongotestItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
movieName = scrapy.Field()
pass
| [
"954316227@qq.com"
] | 954316227@qq.com |
bec4142b26da34cb0e079f5600d4f9ab3ce563bf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_070/ch32_2020_03_26_21_55_29_000008.py | 802d7b3dde9f7a743884523a2716fe29a7ee7fd9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def lista_primos(n):
a=2
i=3
x=0
lista=[0]*n
while x<n:
lista[x]=a
a+=1
if a%2==0:
while a%2==0 or (a%i==0 and a>i):
i=3
a+=1
while a%i!=0 and a>i:
i+=2
x+=1
else:
x+=1
return lista | [
"you@example.com"
] | you@example.com |
bc264ae9d065b64acd9d84fbc3f04a67c2052ea3 | 048c4c7a0a7956e976a0cd0512ca9536c8aeb82d | /tefla/core/image_quality.py | 455ca6156c6fccba2695fb40016fc2844ff044e9 | [
"MIT"
] | permissive | mkulariya1/tefla | 40d41242f08b4431a08f7dc6680088a234da5191 | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | refs/heads/master | 2020-04-24T15:46:51.866942 | 2019-02-04T18:33:49 | 2019-02-04T18:33:49 | 172,082,029 | 0 | 0 | NOASSERTION | 2019-02-22T14:41:53 | 2019-02-22T14:41:53 | null | UTF-8 | Python | false | false | 5,967 | py | """Python implementation of MS-SSIM."""
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
def FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2) / (2.0 * sigma**2)))
return g / g.sum()
def SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`. This
function attempts to match the functionality of ssim_index_new.m by Zhou
Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip.
Args:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).', img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d', img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
cs = np.mean(v1 / v2)
return ssim, cs
def MultiScaleSSIM(img1,
img2,
max_val=255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Args:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).', img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d', img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
ssim, cs = SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size, filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim = np.append(mssim, ssim)
mcs = np.append(mcs, cs)
filtered = [convolve(im, downsample_filter, mode='reflect') for im in [im1, im2]]
im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
return (
np.prod(mcs[0:levels - 1]**weights[0:levels - 1]) * (mssim[levels - 1]**weights[levels - 1]))
| [
"mrinal.haloi11@gmail.com"
] | mrinal.haloi11@gmail.com |
4f32bf298e54f2f7987c626a660c4c19e9fadff2 | 9645bdfbb15742e0d94e3327f94471663f32061a | /Python/1034 - Coloring A Border/1034_coloring-a-border.py | 2b493c2baf0f2c8e7cf4f6570d80120a55d84abc | [] | no_license | aptend/leetcode-rua | f81c080b2260adb2da677612e5c437eda256781d | 80e44f4e9d3a5b592fdebe0bf16d1df54e99991e | refs/heads/master | 2023-06-22T00:40:05.533424 | 2021-03-17T13:51:28 | 2021-03-17T13:51:28 | 186,434,133 | 2 | 0 | null | 2023-06-21T22:12:51 | 2019-05-13T14:17:27 | HTML | UTF-8 | Python | false | false | 1,109 | py | from leezy import solution, Solution
class Q1034(Solution):
@solution
def colorBorder(self, grid, r0, c0, color):
# 152ms 87.36%
M, N = len(grid), len(grid[0])
old_c = grid[r0][c0]
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def dfs(i, j, border):
is_border_cell = False
for di, dj in dirs:
ni, nj = i + di, j + dj
if not (0 <= ni < M and 0 <= nj < N):
is_border_cell = True
continue
if grid[ni][nj] != old_c:
if grid[ni][nj] != -1:
is_border_cell = True
continue
grid[i][j] = -1
dfs(ni, nj, border)
grid[i][j] = old_c
if is_border_cell:
border.append((i, j))
border = []
dfs(r0, c0, border)
for i, j in border:
grid[i][j] = color
return grid
def main():
q = Q1034()
q.add_args([[1, 1], [1, 2]], 0, 0, 3)
q.run()
if __name__ == '__main__':
main()
| [
"crescentwhale@hotmail.com"
] | crescentwhale@hotmail.com |
73a825ef1c4e66bae05685af1cb2546236b23287 | 651802447b606e46fe1aee0490458bf4261661a0 | /snafu/__main__.py | 9ef2a936acf4d4f5457ea42a60652dafd000886a | [
"ISC"
] | permissive | MysteriousSonOfGod/snafu | e90a9101dd41c0523c4da529c28fffbe1814b13f | ddcbf8dc8f26fbab6f352058d4b3e62fd01ea331 | refs/heads/master | 2022-03-01T15:19:37.822762 | 2019-10-28T06:31:03 | 2019-10-28T06:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | import click
class SnafuGroup(click.Group):
"""Force command name to 'snafu'.
"""
def make_context(self, info_name, *args, **kwargs):
return super().make_context('snafu', *args, **kwargs)
@click.group(cls=SnafuGroup, invoke_without_command=True)
@click.option('--version', is_flag=True, help='Print version and exit.')
@click.pass_context
def cli(ctx, version):
if ctx.invoked_subcommand is None:
if version:
from . import __version__
click.echo('SNAFU {}'.format(__version__))
else:
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit(1)
@cli.command(help='Install a Python version.')
@click.argument('version')
@click.option('--use', is_flag=True, help='Use version after installation.')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify an installer to not downloading one.',
)
def install(**kwargs):
from .operations.install import install
install(**kwargs)
@cli.command(help='Uninstall a Python version.')
@click.argument('version')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify an uninstaller to not relying on auto-discovery.',
)
def uninstall(**kwargs):
from .operations.install import uninstall
uninstall(**kwargs)
@cli.command(help='Upgrade an installed Python version.')
@click.argument('version')
@click.option('--pre', is_flag=True, help='Include pre-releases.')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify path to installer to not downloading one.',
)
@click.pass_context
def upgrade(ctx, **kwargs):
from .operations.install import upgrade
upgrade(ctx, **kwargs)
@cli.command(help='Download installer of given Python version.')
@click.argument('version')
@click.option(
'--dest', 'dest_dir', type=click.Path(exists=True, file_okay=False),
help='Download installer to this directory.',
)
@click.option('--force', is_flag=True, help='Overwrite target if exists.')
@click.pass_context
def download(ctx, **kwargs):
from .operations.download import download
download(ctx, **kwargs)
@cli.command(help='Set active Python versions.')
@click.argument('version', nargs=-1)
@click.option(
'--add/--reset', default=None, help='Add version to use without removing.',
)
@click.pass_context
def use(ctx, **kwargs):
from .operations.link import use
use(ctx, **kwargs)
@cli.command(
help='Prints where the executable of Python version is.',
short_help='Print python.exe location.',
)
@click.argument('version')
def where(**kwargs):
from .operations.versions import where
where(**kwargs)
@cli.command(name='list', help='List Python versions.')
@click.option(
'--all', 'list_all', is_flag=True,
help='List all versions (instead of only installed ones).',
)
def list_(**kwargs):
from .operations.versions import list_
list_(**kwargs)
@cli.command(
short_help='Link a command from active versions.',
help=('Link a command, or all commands available based on the currently '
'used Python version(s).'),
)
@click.argument('command', required=False)
@click.option(
'--all', 'link_all', is_flag=True,
help='Link all available operations.',
)
@click.option(
'--overwrite',
type=click.Choice(['yes', 'no', 'smart']), default='yes',
help='What to do when the target exists.',
)
@click.pass_context
def link(ctx, overwrite, **kwargs):
from .operations.link import link, Overwrite
link(ctx, overwrite=Overwrite[overwrite], **kwargs)
if __name__ == '__main__':
cli()
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
c793e91c6245dd84a9885fc97963c0193af6dcff | 1ae95a907eda38bc49dba5ce24309a0d134a2fd8 | /vladetina1/asgi.py | ede8d3a4e475252fb3a279974f137e0c6ed195b8 | [] | no_license | ivanurban/vladetina_1-webapp | e43472edbf87485d1b606c9827988f7353adcf02 | c37eea232b2fde654cb2de006a2c3d2fea838047 | refs/heads/master | 2022-12-08T04:56:11.729653 | 2020-08-28T22:36:35 | 2020-08-28T22:36:35 | 289,579,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for vladetina1 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vladetina1.settings')
application = get_asgi_application()
| [
"ivanurban_bg@yahoo.com"
] | ivanurban_bg@yahoo.com |
e3860c147e56b05bdb47ca332ac3184f12e860cd | 3326e1455f857704d144d069ffd0291ef3da830e | /torch2trt_dynamic/plugins/create_gridsample_plugin.py | d42250f0bd94f558c33dc7cb0ab0ea86bf1ecba4 | [
"MIT"
] | permissive | AlanLu0808/torch2trt_dynamic | efc5b3d6cbaffffa43ad28f107ab3588bf135d5e | df864f906a8ae0b7b98680c1612903bdea58c744 | refs/heads/master | 2023-04-30T12:52:20.907104 | 2021-05-09T03:28:19 | 2021-05-09T03:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import numpy as np
import tensorrt as trt
def create_gridsample_plugin(layer_name, mode, padding_mode, align_corners):
creator = trt.get_plugin_registry().get_plugin_creator(
'GridSamplePluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_mode = trt.PluginField("mode", np.array([mode], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_mode)
pf_padding_mode = trt.PluginField("padding_mode",
np.array([padding_mode], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_padding_mode)
pf_align_corners = trt.PluginField(
"align_corners", np.array([align_corners], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_align_corners)
return creator.create_plugin(layer_name, pfc)
| [
"streetyao@live.com"
] | streetyao@live.com |
6164689de25188831b5f04895aff856313ea43e2 | 9495b91cbed933a55be172c2397c4083b5354faa | /app/user/models.py | 0049a926fd00e9db2e3cfc84072e7112ed029ab6 | [] | no_license | huyquyet/MMS_project | 2f20fff079d201716bdd3f38f204dc3d06f1bada | 01596fe39b41b4c1de29b15233fdf22639a21770 | refs/heads/master | 2021-01-10T10:09:44.045152 | 2015-11-16T10:37:41 | 2015-11-16T10:37:41 | 45,814,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from MMS_project import settings
from app.position.models import Position
from app.team.models import Team
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile')
avata = models.ImageField(upload_to=settings.AVATA_DIR, max_length=255, default='avata/default.jpg', blank=False)
description = models.TextField(default='', null=True)
team = models.ForeignKey(Team, related_name='user', default=4, null=True)
position = models.ForeignKey(Position, related_name='profile', default=1, null=True)
# def delete(self, *args, **kwargs):
# self.user.delete()
# return super(self.__class__, self).delete(*args, **kwargs)
| [
"nguyenhuyquyet90@gmail.com"
] | nguyenhuyquyet90@gmail.com |
378560693767fc3e496063fd398b2f9089fa2f87 | 677002b757c0a1a00b450d9710a8ec6aeb9b9e9a | /tiago_public_ws/build/pal_gazebo_plugins/catkin_generated/pkg.installspace.context.pc.py | aff47d5416fc2d4516ff0ba1d7c77a8e4c7b486c | [] | no_license | mrrocketraccoon/tiago_development | ce686c86459dbfe8623aa54cf4279021342887fb | a0539bdcf21b67ab902a4649b516dcb929c54042 | refs/heads/main | 2023-06-16T19:39:33.391293 | 2021-07-08T21:20:03 | 2021-07-08T21:20:03 | 384,249,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,163 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/usr/include;/usr/include/gazebo-9;/usr/include/bullet;/usr/include/simbody;/usr/include/sdformat-6.0;/usr/include/ignition/math4;/usr/include/OGRE;/usr/include/OGRE/Terrain;/usr/include/OGRE/Paging;/usr/include/ignition/transport4;/usr/include/ignition/msgs1;/usr/include/ignition/common1;/usr/include/ignition/fuel_tools1".split(';') if "${prefix}/include;/usr/include;/usr/include/gazebo-9;/usr/include/bullet;/usr/include/simbody;/usr/include/sdformat-6.0;/usr/include/ignition/math4;/usr/include/OGRE;/usr/include/OGRE/Terrain;/usr/include/OGRE/Paging;/usr/include/ignition/transport4;/usr/include/ignition/msgs1;/usr/include/ignition/common1;/usr/include/ignition/fuel_tools1" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;std_srvs;tf;pal_multirobot_msgs;roscpp;control_toolbox".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgazebo_ros_forcetorque;-lgazebo_pal_hand;-lgazebo_wifi_ap;-lgazebo_underactuated_finger;-lBulletSoftBody;-lBulletDynamics;-lBulletCollision;-lLinearMath;/usr/lib/x86_64-linux-gnu/libSimTKsimbody.so;/usr/lib/x86_64-linux-gnu/libSimTKmath.so;/usr/lib/x86_64-linux-gnu/libSimTKcommon.so;/usr/lib/x86_64-linux-gnu/liblapack.so;/usr/lib/x86_64-linux-gnu/libblas.so;-lpthread;-lrt;-ldl;-lm;/usr/lib/x86_64-linux-gnu/libgazebo.so;/usr/lib/x86_64-linux-gnu/libgazebo_client.so;/usr/lib/x86_64-linux-gnu/libgazebo_gui.so;/usr/lib/x86_64-linux-gnu/libgazebo_sensors.so;/usr/lib/x86_64-linux-gnu/libgazebo_rendering.so;/usr/lib/x86_64-linux-gnu/libgazebo_physics.so;/usr/lib/x86_64-linux-gnu/libgazebo_ode.so;/usr/lib/x86_64-linux-gnu/libgazebo_transport.so;/usr/lib/x86_64-linux-gnu/libgazebo_msgs.so;/usr/lib/x86_64-linux-gnu/libgazebo_util.so;/usr/lib/x86_64-linux-gnu/libgazebo_common.so;/usr/lib/x86_64-linux-gnu/libgazebo_gimpact.so;/usr/lib/x86_64-linux-gnu/libgazebo_opcode.so;/usr/lib/x86_64-linux-gnu/libgazebo_opende_ou.so;/usr/lib/x86_64-linux-gnu/libboost_signals.so;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_program_options.so;/usr/lib/x86_64-linux-gnu/libboost_regex.so;/usr/lib/x86_64-linux-gnu/libboost_iostreams.so;/usr/lib/x86_64-linux-gnu/libprotobuf.so;/usr/lib/x86_64-linux-gnu/libsdformat.so;/usr/lib/x86_64-linux-gnu/libOgreMain.so;/usr/lib/x86_64-linux-gnu/libOgreTerrain.so;/usr/lib/x86_64-linux-gnu/libOgrePaging.so;/usr/lib/x86_64-linux-gnu/libignition-math4.so.4.0.0;/usr/lib/x86_64-linux-gnu/libignition-transport4.so.4.0.0;/usr/lib/x86_64-linux-gnu/libignition-msgs1.so.1.0.0;/usr/lib/x86_64-linux-gnu/libignition-common1.so.1.0.1;/usr/lib/x86_64-linux-gnu/libignition-fuel_tools1.so.1.0.0;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so".split(';') if "-lgazebo_ros_forcetorque;-lgazebo_pal_hand;-lgazebo_wifi_ap;-lgazebo_underactuated_finger;-lBulletSoftBody;-lBulletDynamics;-lBulletCollision;-lLinearMath;/usr/lib/x86_64-linux-gnu/libSimTKsimbody.so;/usr/lib/x86_64-linux-gnu/libSimTKmath.so;/usr/lib/x86_64-linux-gnu/libSimTKcommon.so;/usr/lib/x86_64-linux-gnu/liblapack.so;/usr/lib/x86_64-linux-gnu/libblas.so;-lpthread;-lrt;-ldl;-lm;/usr/lib/x86_64-linux-gnu/libgazebo.so;/usr/lib/x86_64-linux-gnu/libgazebo_client.so;/usr/lib/x86_64-linux-gnu/libgazebo_gui.so;/usr/lib/x86_64-linux-gnu/libgazebo_sensors.so;/usr/lib/x86_64-linux-gnu/libgazebo_rendering.so;/usr/lib/x86_64-linux-gnu/libgazebo_physics.so;/usr/lib/x86_64-linux-gnu/libgazebo_ode.so;/usr/lib/x86_64-linux-gnu/libgazebo_transport.so;/usr/lib/x86_64-linux-gnu/libgazebo_msgs.so;/usr/lib/x86_64-linux-gnu/libgazebo_util.so;/usr/lib/x86_64-linux-gnu/libgazebo_common.so;/usr/lib/x86_64-linux-gnu/libgazebo_gimpact.so;/usr/lib/x86_64-linux-gnu/libgazebo_opcode.so;/usr/lib/x86_64-linux-gnu/libgazebo_opende_ou.so;/usr/lib/x86_64-linux-gnu/libboost_signals.so;/usr/lib/x86_64-linux-gnu/libboost_filesystem.so;/usr/lib/x86_64-linux-gnu/libboost_program_options.so;/usr/lib/x86_64-linux-gnu/libboost_regex.so;/usr/lib/x86_64-linux-gnu/libboost_iostreams.so;/usr/lib/x86_64-linux-gnu/libprotobuf.so;/usr/lib/x86_64-linux-gnu/libsdformat.so;/usr/lib/x86_64-linux-gnu/libOgreMain.so;/usr/lib/x86_64-linux-gnu/libOgreTerrain.so;/usr/lib/x86_64-linux-gnu/libOgrePaging.so;/usr/lib/x86_64-linux-gnu/libignition-math4.so.4.0.0;/usr/lib/x86_64-linux-gnu/libignition-transport4.so.4.0.0;/usr/lib/x86_64-linux-gnu/libignition-msgs1.so.1.0.0;/usr/lib/x86_64-linux-gnu/libignition-common1.so.1.0.1;/usr/lib/x86_64-linux-gnu/libignition-fuel_tools1.so.1.0.0;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so" != "" else []
PROJECT_NAME = "pal_gazebo_plugins"
PROJECT_SPACE_DIR = "/tiago_public_ws/install"
PROJECT_VERSION = "2.0.0"
| [
"ricardoxcm@hotmail.com"
] | ricardoxcm@hotmail.com |
5c4f76c6e2b0ef09d415ea9640c17610cfa0689b | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.civitasscrapers/lib/civitasscrapers/sources_civitasscrapers/en/reddit.py | 4cd237137522d81a2fb22aa55a6be1f0a9cdb1f0 | [] | no_license | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 2,249 | py | # -*- coding: utf-8 -*-
'''
Eggman Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle,client,proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['reddit.com']
self.base_link = 'https://www.reddit.com/user/nbatman/m/streaming2/search?q=%s&restrict_sr=on'
def movie(self, imdb, title, localtitle, aliases, year):
try:
title = cleantitle.geturl(title)
title = title.replace('-','+')
query = '%s+%s' % (title,year)
url = self.base_link % query
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
match = re.compile('class="search-title may-blank" >(.+?)</a>.+?<span class="search-result-icon search-result-icon-external"></span><a href="(.+?)://(.+?)/(.+?)" class="search-link may-blank" >').findall(r)
for info,http,host,ext in match:
if '2160' in info: quality = '4K'
elif '1080' in info: quality = '1080p'
elif '720' in info: quality = 'HD'
elif '480' in info: quality = 'SD'
else: quality = 'SD'
url = '%s://%s/%s' % (http,host,ext)
if 'google' in host: host = 'GDrive'
if 'Google' in host: host = 'GDrive'
if 'GOOGLE' in host: host = 'GDrive'
sources.append({
'source': host,
'quality': quality,
'language': 'en',
'url': url,
'info': info,
'direct': False,
'debridonly': False
})
except:
return
except Exception:
return
return sources
def resolve(self, url):
return url
| [
"github+github@github.github"
] | github+github@github.github |
56b08623e6f1caaa20f3bd30c23264c4a592c151 | 5085dfd5517c891a1f5f8d99bf698cd4bf3bf419 | /087.py | 05cfccf10e07977157fcd34d680304b9ba743426 | [] | no_license | Lightwing-Ng/100ExamplesForPythonStarter | 01ffd4401fd88a0b997656c8c5f695c49f226557 | 56c493d38a2f1a1c8614350639d1929c474de4af | refs/heads/master | 2020-03-10T22:07:37.340512 | 2018-04-15T13:16:30 | 2018-04-15T13:16:30 | 129,611,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
"""
* @author: Lightwing Ng
* email: rodney_ng@iCloud.com
* created on Apr 15, 2018, 7:37 PM
* Software: PyCharm
* Project Name: Tutorial
题目:回答结果(结构体变量传递)。
程序分析:无。
"""
class student:
x, c = 0, 0
def f(stu):
stu.x = 20
stu.c = 'c'
a = student()
a.x = 3
a.c = 'a'
f(a)
print(a.x, a.c)
| [
"rodney_ng@icloud.com"
] | rodney_ng@icloud.com |
84edb83be95037a7358797df88f5c9ca2978d486 | 53312f6eea68e95990923f9159e721f1c018b630 | /app/services/company_services.py | 6e5e4e381b04cd4c20520d51516a4a6b66c59af5 | [] | no_license | BrunoGehlen/stocks_app | 22978ba22c48af73263ce4bd18a2f985609eefe7 | c496bafb8475f6557de29043fb98b366f1b01371 | refs/heads/master | 2023-04-14T20:46:45.786460 | 2021-05-03T20:35:42 | 2021-05-03T20:35:42 | 363,943,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from . import datetime, timedelta, HTTPStatus
from app.serializers.company_schema import CompanySchema
from app.models.company_model import CompanyModel
class CompanyServices:
def __init__(self, session):
self.session = session
self.todays_datetime = datetime(
datetime.today().year, datetime.today().month, datetime.today().day
)
def get(self, request):
companies = CompanyModel.query.all()
with self.session.no_autoflush:
for company in companies:
company.transactions = [
transaction
for transaction in company.transactions
if (self.todays_datetime - transaction.transaction_date)
< timedelta(hours=1)
]
# companies = [
# company
# for company in companies
# if all(
# (self.todays_datetime - transaction.transaction_date)
# < timedelta(days=1)
# for transaction in company.transacions
# )
# ]
return {"companies": CompanySchema(many=True).dump(companies)}, HTTPStatus.OK
| [
"you@example.com"
] | you@example.com |
15a91ca627f134ace4c89c131bedcf65cb1b99c4 | 00a086a141acc551c9e3aa23356013cdc8d61b61 | /LeetCode/python/lc021.py | 088a2b04cf2168f0f69ff793b21152f69dd47441 | [] | no_license | ZwEin27/Coding-Training | f01cebbb041efda78bca4bf64e056133d7b7fad7 | 409109478f144791576ae6ca14e2756f8f2f5cb0 | refs/heads/master | 2021-01-18T12:25:06.081821 | 2016-09-04T17:43:44 | 2016-09-05T17:43:44 | 29,571,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | #!/usr/bin/env python
# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def mergeTwoLists(self, l1, l2):
if not l1 and not l2:
return [];
elif not l1 and l2:
return l2;
elif l1 and not l2:
return l1;
result = [];
if l1.val <= l2.val:
result = l1;
result.next = self.mergeTwoLists(l1.next, l2);
elif l1.val > l2.val:
result = l2;
result.next = self.mergeTwoLists(l1, l2.next);
return result;
| [
"zwein27@gmail.com"
] | zwein27@gmail.com |
dfb5b716af7edd4891035a8b4764032f76c0481d | 42229d7c76c305cfde63659ad715a4e6bef0ea99 | /goods/util/kmean_util.py | 35e57660a3523cb8e084c78760968bd33856d814 | [] | no_license | LRJliurj/GoodsServer | 4a043d2f1195e4793aad327732201375495a88f9 | c8c1bbda4fa4ba2a0e8a4055a67b7278ddb15b03 | refs/heads/master | 2020-07-05T14:03:58.536658 | 2019-09-24T03:01:53 | 2019-09-24T03:01:53 | 202,668,466 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,076 | py | from set_config import config
import os
import numpy as np
from goods.util.distance_util import pdis
# 线上保存kmean模型获取特征的util,保存后,重新排序
class online_util:
save_sort_feature_path = config.goods_params['kmean_params']['online']["kmean_predict_features_path"]
top_n = config.goods_params['kmean_params']['top_n']
# 获取指定聚类的商品特征
def get_good_feature(self,cluter):
cluter_feature_file = os.path.join(self.save_sort_feature_path,str(cluter)+".txt")
# features = []
goods_upcs = []
# dises = []
with open(cluter_feature_file,'r') as f:
features = f.readlines()
for feature in features:
ft = feature.split(",")
good_upc = ft[0]
# dis = ft[-1]
goods_upcs.append(good_upc)
# dises.append(float(dis))
# return features,goods_upcs,dises
return goods_upcs
# 获取top_n 商品
def get_topn_upc(self,cluter,img_feature,top_n=top_n):
# upcs = []
# features, goods_upcs, dises = self.get_good_feature(cluter)
goods_upcs = self.get_good_feature(cluter)
return list(set(goods_upcs))
# cluter_dict = {}
# for i,good_upc,feature in zip(range(len(goods_upcs)),goods_upcs,features):
# featArr = feature.split(',')[2:-1]
# f1s = []
# for f1 in featArr:
# # print (f1)
# f1s.append(float(f1))
# upcs.append(good_upc)
# to_img_dis = pdis(f1s, img_feature)[0]
# cluter_dict[str(i)+"##"+str(good_upc)] = abs(to_img_dis)
# a = sorted(cluter_dict.items(), key=lambda x: x[1], reverse=False)
# for key in a:
# upc = key[0].split("##")[1]
# if (len(upcs) <= top_n) and (upc not in upcs):
# upcs.append(upc)
# elif (len(upcs) >= top_n):
# break
# return upcs
# 保存新建商品的聚类特征
def save_new_goods_feature(self,cluter,to_cluter_dis,good_upc,good_feature,img_file_name):
fts = good_upc+","+img_file_name
for gf in good_feature:
fts = fts+","+str(float(gf))
fts=fts+","+str(to_cluter_dis)
cluter_feature_file = os.path.join(self.save_sort_feature_path, str(cluter) + ".txt")
with open(cluter_feature_file,'a+') as f:
f.write(fts)
f.write("\n")
#获取重新训练时刻的所有已知特征
def get_all_features(self):
goods_upcs = []
all_features = []
img_file_names=[]
for cluter_feature_file in os.listdir(self.save_sort_feature_path):
cfile = os.path.join(self.save_sort_feature_path,cluter_feature_file)
with open(cfile, 'r') as f:
features = f.readlines()
for feature in features:
ft = feature.split(",")
good_upc = ft[0]
img_file = ft[1]
feats = ft[2:(len(ft)-1)]
feats = list(map(float, feats))
goods_upcs.append(good_upc)
all_features.append(feats)
img_file_names.append(img_file)
return goods_upcs,all_features,img_file_names
#保存在线重新训练完成后的排序聚类特征
def write_sort_feature(self, all_features, label_centers, centers,goods_upcs,img_file_names):
for j, center in zip(range(len(centers)), centers):
center_dict = []
for i, img_feature,good_upc,img_file_name in zip(label_centers, all_features,goods_upcs,img_file_names):
if j == i:
dis = pdis(center, img_feature)[0]
file_feature = str(good_upc)+","+str(img_file_name)
for feat in img_feature:
file_feature = file_feature+","+str(float(feat))
center_dict.append(file_feature + "," + str(float(dis)))
# a = sorted(center_dict.items(), key=lambda x: x[1], reverse=True)
save_file = os.path.join(self.save_sort_feature_path, str(j) + ".txt")
with open(save_file, 'w') as f:
for key in center_dict:
f.write(key)
f.write("\n")
# 删除指定商品图片的聚类向量
def delete_feature(self,goods_shelfgoods_id):
upc_filename = goods_shelfgoods_id
files = os.listdir(self.save_sort_feature_path)
file_index_path = None
for file in files:
file_path = os.path.join(self.save_sort_feature_path,file)
with open(file_path,'r') as f:
lines = f.readlines()
for line in lines:
if upc_filename in line.split(","):
file_index_path = file_path
break
if file_index_path != None:
break
if file_index_path == None:
return -1
new_lines=[]
with open(file_index_path, 'r') as f:
lines = f.readlines()
for line in lines:
if upc_filename not in line:
new_lines.append(line)
if len(new_lines) < 1:
return -1
else:
with open(file_index_path, 'w') as f:
for line in new_lines:
f.write(line)
f.write("\n")
return 0
# 离线保存kmean模型,获取特征的util,保存排序后的聚类特征
class offline_util:
feature_path = config.goods_params['kmean_params']['offline']["vgg_predict_features_path"]
feature_path_file = config.goods_params['kmean_params']['offline']["vgg_predict_features_path1"]
save_sort_feature_path = config.goods_params['kmean_params']['online']["kmean_predict_features_path"]
img_features = []
X = []
def get_goods_features1(self):
with open(self.feature_path_file, 'r') as f:
lines = f.readlines()
for line in lines:
feature = line.split(",")
filename = feature[0]
upc = None
if "_" in filename:
upc = filename.split("_")[0]
else:
upc = filename.strip(".jpg")
feature = feature[1:]
feat = []
for fea in feature:
feat.append(float(fea))
# print (len(feat))
featArr = np.array(feat)
featArr.resize(512, 7)
f1s = []
f2s = upc + ',' + filename
for f1 in featArr:
f1s.append(float(np.sum(f1)))
f2s = f2s + "," + str(float(np.sum(f1)))
self.X.append(f1s)
self.img_features.append(f2s)
return self.img_features,self.X
def get_goods_features(self):
for good_feature_file in os.listdir(self.feature_path):
img_feature_path = os.path.join(self.feature_path, good_feature_file)
good_upc = str(good_feature_file).strip(".txt")
self.get_feature(good_upc, img_feature_path)
return self.img_features,self.X
def get_feature(self,goods_upc, file_feature):
with open(file_feature, 'r') as f:
lines = f.readlines()
for line in lines:
feature = line.split(",")
filename = feature[0]
if "train_augment0" not in filename:
continue
feature = feature[1:]
feat = []
for fea in feature:
feat.append(float(fea))
# print (len(feat))
featArr = np.array(feat)
featArr.resize(512, 7)
f1s = []
f2s = goods_upc + ',' + filename
for f1 in featArr:
f1s.append(float(np.sum(f1)))
f2s = f2s + "," + str(float(np.sum(f1)))
self.X.append(f1s)
self.img_features.append(f2s)
def write_sort_feature(self,img_features, label_center, centers):
for j, center in zip(range(len(centers)), centers):
center_dict = []
for i, img_feature in zip(label_center, img_features):
if j == i:
feature = str(img_feature).split(",")
feature_img = feature[2:]
feature_img = list(map(float,feature_img))
dis = pdis(center, feature_img)[0]
center_dict.append(img_feature+","+str(float(dis)))
# a = sorted(center_dict.items(), key=lambda x: x[1], reverse=True)
save_file = os.path.join(self.save_sort_feature_path,str(j)+".txt")
with open(save_file, 'w') as f:
for key in center_dict:
f.write(key)
f.write("\n") | [
"908601417@qq.com"
] | 908601417@qq.com |
3980853af39a2d0a86828f26258a712df25ceefd | a47e4026ab8f791518d0319c5f3ec8c5a8afec2e | /Terrain/midlout2h.py | 84d08a8f8039a4bade027f5f33d7513e5de75c2f | [] | no_license | bobbyrward/horrible-terrain-demo | 715064fd020a620751b0c99f0a324300dd4e387e | 55c9add73f5179b4272538950ec8a713dbed88b2 | refs/heads/master | 2016-09-06T08:29:53.623401 | 2009-10-28T19:20:24 | 2009-10-28T19:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | import re
import sys
hresult_re = re.compile(r'STDMETHOD\((.*?)\)\(\s*THIS_?\s*(.*?)\)\s*PURE\s*;\s*')
rval_re = re.compile(r'STDMETHOD_\((.*?), (.*?)\)\(\s*THIS_?\s*(.*?)\)\s*PURE\s*;\s*')
# this
"""STDMETHOD(EndStateBlock)(THIS_ IDirect3DStateBlock9** ppSB) PURE;
"""
# to this
"""HRESULT EndStateBlock(IDirect3DStateBlock9** ppSB) {
return (*this)->EndStateBlock(ppSB);
}
"""
def output_func_call(outfile, rval, name, params):
splitParams = [ x.strip().rsplit(' ', 1) for x in params.split(',') ]
if len(splitParams) == 1 and len(splitParams[0]) == 1:
outfile.write("\t\t%s %s() {\n" % (rval, name))
outfile.write("\t\t\treturn (*this)->%s();\n" % name)
outfile.write("\t\t}\n\n")
else:
outfile.write("\t\t%s %s(%s) {\n" %(rval, name, params))
param_names = ', '.join([ x[1].strip('*') for x in splitParams ])
outfile.write("\t\t\treturn (*this)->%s(%s);\n" % (name, param_names))
outfile.write("\t\t}\n\n")
with open('device_in.txt') as fd:
with open('device_method_calls.h', 'w') as outfile:
outfile.write('/*************************************************/\n')
outfile.write('/* This file is autogenerated by midlout2h. */\n')
outfile.write('/* DO NOT EDIT */\n')
outfile.write('/*************************************************/\n')
outfile.write('\n')
for line in fd:
print line
if hresult_re.match(line):
output_func_call(outfile, 'HRESULT', *hresult_re.match(line).groups())
elif rval_re.match(line):
output_func_call(outfile, *rval_re.match(line).groups())
else:
if(line.strip()):
raise RuntimeError('Unmatchable line "%s"' % line)
| [
"bobbyrward@gmail.com"
] | bobbyrward@gmail.com |
c905281a43641c6a9fe6fea83f5366746deb0ea9 | ffa8a728f43b6de2b9a4dbfda18f3eb8518fbbbd | /snmp-mibs/SOURCE-ROUTING-MIB.py | a5513535109769810f0bc8e436eb6afe1d580ff4 | [] | no_license | oriordan/pysnmp_mibs | 60e0d80e3f50490d9e6ab29d21627fec59ab0cfc | 92d39abf358a952e55a426e2a4658f4b0824182f | refs/heads/master | 2021-01-09T23:37:59.137750 | 2014-11-26T20:07:28 | 2014-11-26T20:07:28 | 20,253,987 | 11 | 15 | null | 2020-07-26T02:49:32 | 2014-05-28T10:43:18 | Python | UTF-8 | Python | false | false | 14,125 | py | # PySNMP SMI module. Autogenerated from smidump -f python SOURCE-ROUTING-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:58:14 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( dot1dBridge, dot1dSr, ) = mibBuilder.importSymbols("BRIDGE-MIB", "dot1dBridge", "dot1dSr")
( MibScalar, MibTable, MibTableRow, MibTableColumn, ) = mibBuilder.importSymbols("RFC-1212", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
( Counter, Gauge, ) = mibBuilder.importSymbols("RFC1155-SMI", "Counter", "Gauge")
( Bits, Integer32, MibIdentifier, TimeTicks, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "MibIdentifier", "TimeTicks")
# Objects
dot1dSrPortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 3, 1))
if mibBuilder.loadTexts: dot1dSrPortTable.setDescription("A table that contains information about every\nport that is associated with this source route\nbridge.")
dot1dSrPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 3, 1, 1)).setIndexNames((0, "SOURCE-ROUTING-MIB", "dot1dSrPort"))
if mibBuilder.loadTexts: dot1dSrPortEntry.setDescription("A list of information for each port of a source\nroute bridge.")
dot1dSrPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPort.setDescription("The port number of the port for which this entry\ncontains Source Route management information.")
dot1dSrPortHopCount = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortHopCount.setDescription("The maximum number of routing descriptors allowed\nin an All Paths or Spanning Tree Explorer frames.")
dot1dSrPortLocalSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortLocalSegment.setDescription("The segment number that uniquely identifies the\nsegment to which this port is connected. Current\nsource routing protocols limit this value to the\nrange: 0 through 4095. (The value 0 is used by\nsome management applications for special test\ncases.) A value of 65535 signifies that no segment\nnumber is assigned to this port.")
dot1dSrPortBridgeNum = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortBridgeNum.setDescription("A bridge number uniquely identifies a bridge when\nmore than one bridge is used to span the same two\nsegments. Current source routing protocols limit\nthis value to the range: 0 through 15. A value of\n65535 signifies that no bridge number is assigned\nto this bridge.")
dot1dSrPortTargetSegment = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortTargetSegment.setDescription("The segment number that corresponds to the target\nsegment this port is considered to be connected to\nby the bridge. Current source routing protocols\nlimit this value to the range: 0 through 4095.\n(The value 0 is used by some management\napplications for special test cases.) A value of\n65535 signifies that no target segment is assigned\nto this port.")
dot1dSrPortLargestFrame = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortLargestFrame.setDescription("The maximum size of the INFO field (LLC and\nabove) that this port can send/receive. It does\nnot include any MAC level (framing) octets. The\nvalue of this object is used by this bridge to\ndetermine whether a modification of the\nLargestFrame (LF, see [14]) field of the Routing\nControl field of the Routing Information Field is\nnecessary.\n\n64 valid values are defined by the IEEE 802.5M SRT\nAddendum: 516, 635, 754, 873, 993, 1112, 1231,\n1350, 1470, 1542, 1615, 1688, 1761, 1833, 1906,\n1979, 2052, 2345, 2638, 2932, 3225, 3518, 3812,\n4105, 4399, 4865, 5331, 5798, 6264, 6730, 7197,\n7663, 8130, 8539, 8949, 9358, 9768, 10178, 10587,\n10997, 11407, 12199, 12992, 13785, 14578, 15370,\n16163, 16956, 17749, 20730, 23711, 26693, 29674,\n32655, 35637, 38618, 41600, 44591, 47583, 50575,\n53567, 56559, 59551, and 65535.\n\nAn illegal value will not be accepted by the\nbridge.")
dot1dSrPortSTESpanMode = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 7), Integer().subtype(subtypeSpec=SingleValueConstraint(2,3,1,)).subtype(namedValues=NamedValues(("auto-span", 1), ("disabled", 2), ("forced", 3), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrPortSTESpanMode.setDescription("Determines how this port behaves when presented\nwith a Spanning Tree Explorer frame. The value\n'disabled(2)' indicates that the port will not\naccept or send Spanning Tree Explorer packets; any\nSTE packets received will be silently discarded.\nThe value 'forced(3)' indicates the port will\nalways accept and propagate Spanning Tree Explorer\nframes. This allows a manually configured\nSpanning Tree for this class of packet to be\nconfigured. Note that unlike transparent\nbridging, this is not catastrophic to the network\nif there are loops. The value 'auto-span(1)' can\nonly be returned by a bridge that both implements\nthe Spanning Tree Protocol and has use of the\nprotocol enabled on this port. The behavior of the\nport for Spanning Tree Explorer frames is\ndetermined by the state of dot1dStpPortState. If\nthe port is in the 'forwarding' state, the frame\nwill be accepted or propagated. Otherwise, it\nwill be silently discarded.")
dot1dSrPortSpecInFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 8), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortSpecInFrames.setDescription("The number of Specifically Routed frames, also\nreferred to as Source Routed Frames, that have\nbeen received from this port's segment.")
dot1dSrPortSpecOutFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 9), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortSpecOutFrames.setDescription("The number of Specifically Routed frames, also\nreferred to as Source Routed Frames, that this\nport has transmitted on its segment.")
dot1dSrPortApeInFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 10), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortApeInFrames.setDescription("The number of All Paths Explorer frames, also\nreferred to as All Routes Explorer frames, that\nhave been received by this port from its segment.")
dot1dSrPortApeOutFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 11), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortApeOutFrames.setDescription("The number of all Paths Explorer Frames, also\nreferred to as All Routes Explorer frames, that\nhave been transmitted by this port on its\nsegment.")
dot1dSrPortSteInFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 12), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortSteInFrames.setDescription("The number of spanning tree explorer frames that\nhave been received by this port from its segment.")
dot1dSrPortSteOutFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 13), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortSteOutFrames.setDescription("The number of spanning tree explorer frames that\nhave been transmitted by this port on its\nsegment.")
dot1dSrPortSegmentMismatchDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 14), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortSegmentMismatchDiscards.setDescription("The number of explorer frames that have been\ndiscarded by this port because the routing\ndescriptor field contained an invalid adjacent\nsegment value.")
dot1dSrPortDuplicateSegmentDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 15), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortDuplicateSegmentDiscards.setDescription("The number of frames that have been discarded by\nthis port because the routing descriptor field\ncontained a duplicate segment identifier.")
dot1dSrPortHopCountExceededDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 16), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortHopCountExceededDiscards.setDescription("The number of explorer frames that have been\ndiscarded by this port because the Routing\nInformation Field has exceeded the maximum route\ndescriptor length.")
dot1dSrPortDupLanIdOrTreeErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 17), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortDupLanIdOrTreeErrors.setDescription("The number of duplicate LAN IDs or Tree errors.\nThis helps in detection of problems in networks\ncontaining older IBM Source Routing Bridges.")
dot1dSrPortLanIdMismatches = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 3, 1, 1, 18), Counter()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dSrPortLanIdMismatches.setDescription("The number of ARE and STE frames that were\ndiscarded because the last LAN ID in the routing\ninformation field did not equal the LAN-in ID.\nThis error can occur in implementations which do\nonly a LAN-in ID and Bridge Number check instead\nof a LAN-in ID, Bridge Number, and LAN-out ID\ncheck before they forward broadcast frames.")
dot1dSrBridgeLfMode = MibScalar((1, 3, 6, 1, 2, 1, 17, 3, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,)).subtype(namedValues=NamedValues(("mode3", 1), ("mode6", 2), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dSrBridgeLfMode.setDescription("Indicates whether the bridge operates using older\n3 bit length negotiation fields or the newer 6 bit\nlength field in its RIF.")
dot1dPortPair = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 10))
dot1dPortPairTableSize = MibScalar((1, 3, 6, 1, 2, 1, 17, 10, 1), Gauge()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dot1dPortPairTableSize.setDescription("The total number of entries in the Bridge Port\nPair Database.")
dot1dPortPairTable = MibTable((1, 3, 6, 1, 2, 1, 17, 10, 2))
if mibBuilder.loadTexts: dot1dPortPairTable.setDescription("A table that contains information about every\nport pair database entity associated with this\nsource routing bridge.")
dot1dPortPairEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 10, 2, 1)).setIndexNames((0, "SOURCE-ROUTING-MIB", "dot1dPortPairLowPort"), (0, "SOURCE-ROUTING-MIB", "dot1dPortPairHighPort"))
if mibBuilder.loadTexts: dot1dPortPairEntry.setDescription("A list of information for each port pair entity\nof a bridge.")
dot1dPortPairLowPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 10, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dPortPairLowPort.setDescription("The port number of the lower numbered port for\nwhich this entry contains port pair database\ninformation.")
dot1dPortPairHighPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 10, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dPortPairHighPort.setDescription("The port number of the higher numbered port for\nwhich this entry contains port pair database\ninformation.")
dot1dPortPairBridgeNum = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 10, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dPortPairBridgeNum.setDescription("A bridge number that uniquely identifies the path\nprovided by this source routing bridge between the\nsegments connected to dot1dPortPairLowPort and\ndot1dPortPairHighPort. The purpose of bridge\nnumber is to disambiguate between multiple paths\nconnecting the same two LANs.")
dot1dPortPairBridgeState = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 10, 2, 1, 4), Integer().subtype(subtypeSpec=SingleValueConstraint(2,1,3,)).subtype(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("invalid", 3), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: dot1dPortPairBridgeState.setDescription("The state of dot1dPortPairBridgeNum. Writing\n'invalid(3)' to this object removes the\ncorresponding entry.")
# Augmentions
# Exports
# Objects
mibBuilder.exportSymbols("SOURCE-ROUTING-MIB", dot1dSrPortTable=dot1dSrPortTable, dot1dSrPortEntry=dot1dSrPortEntry, dot1dSrPort=dot1dSrPort, dot1dSrPortHopCount=dot1dSrPortHopCount, dot1dSrPortLocalSegment=dot1dSrPortLocalSegment, dot1dSrPortBridgeNum=dot1dSrPortBridgeNum, dot1dSrPortTargetSegment=dot1dSrPortTargetSegment, dot1dSrPortLargestFrame=dot1dSrPortLargestFrame, dot1dSrPortSTESpanMode=dot1dSrPortSTESpanMode, dot1dSrPortSpecInFrames=dot1dSrPortSpecInFrames, dot1dSrPortSpecOutFrames=dot1dSrPortSpecOutFrames, dot1dSrPortApeInFrames=dot1dSrPortApeInFrames, dot1dSrPortApeOutFrames=dot1dSrPortApeOutFrames, dot1dSrPortSteInFrames=dot1dSrPortSteInFrames, dot1dSrPortSteOutFrames=dot1dSrPortSteOutFrames, dot1dSrPortSegmentMismatchDiscards=dot1dSrPortSegmentMismatchDiscards, dot1dSrPortDuplicateSegmentDiscards=dot1dSrPortDuplicateSegmentDiscards, dot1dSrPortHopCountExceededDiscards=dot1dSrPortHopCountExceededDiscards, dot1dSrPortDupLanIdOrTreeErrors=dot1dSrPortDupLanIdOrTreeErrors, dot1dSrPortLanIdMismatches=dot1dSrPortLanIdMismatches, dot1dSrBridgeLfMode=dot1dSrBridgeLfMode, dot1dPortPair=dot1dPortPair, dot1dPortPairTableSize=dot1dPortPairTableSize, dot1dPortPairTable=dot1dPortPairTable, dot1dPortPairEntry=dot1dPortPairEntry, dot1dPortPairLowPort=dot1dPortPairLowPort, dot1dPortPairHighPort=dot1dPortPairHighPort, dot1dPortPairBridgeNum=dot1dPortPairBridgeNum, dot1dPortPairBridgeState=dot1dPortPairBridgeState)
| [
"oriordan@devel.hu"
] | oriordan@devel.hu |
64466beaf3a967d6e4a630cb489949ec77b7de52 | 17a655d21d7ddaf8cf60e23055e107cb602bd9bc | /project/bookmarker/signals.py | f25b97e867ff7e735283ada57d8066db7ebe124d | [] | no_license | geofferyj/YouTubeVideoBookmarker | fedb6913a8c5118c0a51f011244233630cf6f58c | fbf10230c5184cd1479dddafbcfd3609d5ac98f1 | refs/heads/master | 2023-08-04T22:30:37.636957 | 2021-03-01T08:09:46 | 2021-03-01T08:09:46 | 278,203,783 | 0 | 0 | null | 2021-09-22T19:46:09 | 2020-07-08T22:05:00 | JavaScript | UTF-8 | Python | false | false | 1,646 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from bookmarker.models import Token, Video, ResetableViews, Subscription, VoicePause, VoicePlay
# VoicePause
@receiver(post_save, sender=User)
def create_voicepause(sender, instance, created, **kwargs):
if created:
VoicePause.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_voicepause(sender, instance, **kwargs):
instance.voice_pause.save()
# VoicePlay
@receiver(post_save, sender=User)
def create_voiceplay(sender, instance, created, **kwargs):
if created:
VoicePlay.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_voiceplay(sender, instance, **kwargs):
instance.voice_play.save()
# Token
@receiver(post_save, sender=User)
def create_token(sender, instance, created, **kwargs):
if created:
Token.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_token(sender, instance, **kwargs):
instance.tokens.save()
# Subscription
@receiver(post_save, sender=User)
def create_subscription(sender, instance, created, **kwargs):
if created:
Subscription.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_subscription(sender, instance, **kwargs):
instance.subscription.save()
# ResetableViews
@receiver(post_save, sender=Video)
def create_rviews(sender, instance, created, **kwargs):
if created:
ResetableViews.objects.create(video=instance)
@receiver(post_save, sender=Video)
def save_rviews(sender, instance, **kwargs):
instance.rviews.save()
| [
"geofferyjoseph1@gmail.com"
] | geofferyjoseph1@gmail.com |
0efe422b2d4a7ed61c38d320817a656491c43136 | 4809471274d6e136ac66d1998de5acb185d1164e | /pypureclient/flasharray/FA_2_5/models/alert_event_get_response.py | 277b06b6969aff3effbb0edb3555fb2a5e1c3824 | [
"BSD-2-Clause"
] | permissive | astrojuanlu/py-pure-client | 053fef697ad03b37ba7ae21a0bbb466abf978827 | 6fa605079950765c316eb21c3924e8329d5e3e8a | refs/heads/master | 2023-06-05T20:23:36.946023 | 2021-06-28T23:44:24 | 2021-06-28T23:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class AlertEventGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[AlertEvent]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.AlertEvent]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[AlertEvent])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AlertEventGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertEventGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertEventGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hubert.chan@purestorage.com"
] | hubert.chan@purestorage.com |
196be9077ae178f63ed463bc211d805e9e2ae2c5 | c8069f89af457ba9cea5f3e0306aecc289f53da6 | /backend/manage.py | f8fb179532211c6515a0bc0779af29a70e2390d1 | [] | no_license | crowdbotics-apps/bz-chat-24445 | 3f988ad908bb32c9bbaf147aff0f5360177c8ad2 | 464fe7a3ccd4105c020cdea1762b34be1aec38a1 | refs/heads/master | 2023-03-10T14:50:17.136207 | 2021-02-11T08:58:12 | 2021-02-11T08:58:12 | 337,975,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bz_chat_24445.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
fef2f7c9212ccab6017d6dd21a22597a88f51592 | fa6612470d814f365280b0480b14748f27c1333c | /Data Visualization with Matplotlib/23_sharexAxis.py | 5e67e5f9cf5ee0bce913dac588aef4e1792b6f97 | [] | no_license | SaretMagnoslove/Practical_Machine_Learning_with_python | ffca9da49774d6bf4c459960a691b8dc351f8f1f | 2e426cecb99831bba75ff2faad1d61f1b802dacb | refs/heads/master | 2020-03-23T02:16:24.274694 | 2018-09-25T23:19:51 | 2018-09-25T23:19:51 | 140,965,770 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,683 | py | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.dates import bytespdate2num
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
# style.use('ggplot')
style.use('fivethirtyeight')
MA1 = 10
MA2 = 30
def moving_average(values, window):
weights = np.repeat(1.0, window) / window
smas = np.convolve(values, weights, 'valid')
return smas
def highes_minus_lows(highs, lows):
return highs - lows
def graph_data(stock):
fig = plt.figure()
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=1, colspan=1)
plt.title(stock)
plt.ylabel('H-l')
ax2 = plt.subplot2grid((6, 1), (1, 0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Price')
ax3 = plt.subplot2grid((6, 1), (5, 0), rowspan=1, colspan=1, sharex=ax1)
plt.ylabel('MovingAvg')
# Unfortunately, Yahoo's API is no longer available
# feel free to adapt the code to another source, or use this drop-in replacement.
stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source[1:]:
split_line = line.split(',')
if len(split_line) == 7:
if 'values' not in line and 'labels' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(
stock_data,
delimiter=',',
unpack=True,
converters={0: bytespdate2num('%Y-%m-%d')})
x, y, ohlc = 0, len(date), []
while x < y:
append_me = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x]
ohlc.append(append_me)
x += 1
ma1 = moving_average(closep, MA1)
ma2 = moving_average(closep, MA2)
start = len(date[MA2 - 1:])
h_l = [highes_minus_lows(h, l) for h, l in zip(highp, lowp)]
# h_l = list(map(highes_minus_lows, highp, lowp))
ax1.plot_date(date[-start:], h_l[-start:], '-')
ax1.yaxis.set_major_locator(mticker.MaxNLocator(nbins=4, prune='lower'))
candlestick_ohlc(ax2, ohlc[-start:], width=0.4, colorup='g', colordown='r')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=7, prune='upper'))
ax2.grid(True)
bbox_props = dict(boxstyle='larrow', fc='w', ec='k', lw=1)
ax2.annotate(
str(closep[-1]), (date[0], closep[-1]),
xytext=(date[0] + 400, closep[-1]),
bbox=bbox_props)
ax3.plot(date[-start:], ma1[-start:], linewidth=1)
ax3.plot(date[-start:], ma2[-start:], linewidth=1)
ax3.fill_between(
date[-start:],
ma2[-start:],
ma1[-start:],
where=(ma1[-start:] < ma2[-start:]),
facecolor='r',
edgecolor='r',
alpha=0.5)
ax3.fill_between(
date[-start:],
ma2[-start:],
ma1[-start:],
where=(ma1[-start:] > ma2[-start:]),
facecolor='g',
edgecolor='g',
alpha=0.5)
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax3.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax3.yaxis.set_major_locator(mticker.MaxNLocator(nbins=4, prune='upper'))
for label in ax3.xaxis.get_ticklabels():
label.set_rotation(45)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.subplots_adjust(
left=0.11, bottom=0.24, right=0.90, top=0.90, wspace=0.2, hspace=0)
plt.show()
graph_data('EBAY')
| [
"magnoslove@gmail.com"
] | magnoslove@gmail.com |
8427a365418b51b3933a3c9fc1d994443f00f617 | a3eb732ead7e1d10a85a88e42dc639eb16a40265 | /instagram_api/response/archived_stories_feed.py | e0ea4adece55f9e2c80964fe8585b77857ed3be3 | [
"MIT"
] | permissive | carsam2021/instagram_api | 7654c0f485c22935cf478016e46e65acbeda9344 | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | refs/heads/master | 2023-03-16T14:06:27.515432 | 2020-10-17T04:39:19 | 2020-10-17T04:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import ArchivedStoriesFeedItem
__all__ = ['ArchivedStoriesFeedResponse']
class ArchivedStoriesFeedResponseInterface(ApiResponseInterface):
items: [ArchivedStoriesFeedItem]
num_results: int
more_available: bool
max_id: int
class ArchivedStoriesFeedResponse(ApiResponse, ArchivedStoriesFeedResponseInterface):
pass
| [
"root@proscript.ru"
] | root@proscript.ru |
d5639c623837332fced341fd3abdf47957f070cd | b24ce5acced59ef367a20706949953f3ea81d57a | /tensorflow/contrib/seq2seq/python/ops/basic_decoder.py | d19e2b0d5e469b484a16e9290a1cb09684c16638 | [
"Apache-2.0"
] | permissive | BoldizsarZopcsak/Image-Classifier | b57dd3b72cf368cc1d66a5e318003a2a2d8338a4 | c0d471a55a70b3118178488db3c005a9277baade | refs/heads/master | 2022-11-19T12:28:49.625532 | 2018-01-20T15:48:48 | 2018-01-20T15:48:48 | 118,253,026 | 1 | 1 | Apache-2.0 | 2022-11-01T09:24:24 | 2018-01-20T15:04:57 | Python | UTF-8 | Python | false | false | 5,584 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`, `helper`
is not an instance of `Helper`, or `output_layer` is not an instance
of `tf.layers.Layer`.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base._Layer)): # pylint: disable=protected-access
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=tensor_shape.TensorShape([]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| [
"zboldi@gmail.com"
] | zboldi@gmail.com |
c69bb2f790f1d53f574bc2e900a0ceffb5445294 | f7ee578df2ef14dca614ea2238520405a7bc1010 | /emission/tests/storageTests/TestMoveFilterField.py | c371fc9aefc1b17ab39a5500b2cbb2002ccf0933 | [
"BSD-3-Clause"
] | permissive | jeffdh5/e-mission-server | 5a02583d7a06b902fa4c1af7858df58c0bd65145 | c4e3ebdb77133a58b5e2796c8850b479ffa8a096 | refs/heads/master | 2020-12-11T07:38:18.620865 | 2015-12-31T01:05:37 | 2015-12-31T01:05:37 | 30,099,294 | 0 | 0 | null | 2015-01-31T02:14:15 | 2015-01-31T02:14:15 | null | UTF-8 | Python | false | false | 3,565 | py | # Standard imports
import unittest
import datetime as pydt
import logging
import json
# Our imports
import emission.core.get_database as edb
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
# Test imports
import emission.tests.common as etc
class TestTimeSeries(unittest.TestCase):
def setUp(self):
etc.setupRealExample(self, "emission/tests/data/real_examples/iphone_2015-11-06")
def tearDown(self):
edb.get_timeseries_db().remove({"user_id": self.testUUID})
def testMoveFilters(self):
# First, check that all filters are in metadata
for entry in edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}):
del entry["_id"]
entry["metadata"]["key"] = "background/filtered_location"
edb.get_timeseries_db().insert(entry)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 594)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 20)
# Now, move all filters
estfm.move_all_filters_to_data()
# Finally, check that no filters are in metadata
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 0)
# And that location filters are in data
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 474)
# But not in the others
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
01d2e4527f7d2563f3902393cf341ec5f00e4969 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/nws/rtoverridefwpol.py | 307c8d55e48a35eeb21395c77fc39efe8dff02b8 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,958 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtOverrideFwPol(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.nws.RtOverrideFwPol", "cobra.model.infra.AttPolicyGroup")
meta.moClassName = "nwsRtOverrideFwPol"
meta.rnFormat = "rtinfraOverrideFwPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Access Attachable Policy Group"
meta.writeAccessMask = 0x2100000000001
meta.readAccessMask = 0x2300000000011
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.nws.FwPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtinfraOverrideFwPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 19097, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4453
prop.defaultValueStr = "infraAttPolicyGroup"
prop._addConstant("infraAttPolicyGroup", None, 4453)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 19096, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("nwsFwPolToPortGroups", "Portgroups", "cobra.model.vmm.EpPD"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("nwsFwPolToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
e046c899eb7005cc67025d7f39e0ab584c58c2a5 | 3e7a772a74dca544c241c2a4bf34790b7cb13b50 | /trig/tests/gitinterface_tests | 9055befdd3519434e2f15a03e1ee2dea0bf2ff31 | [] | no_license | rrdrake/vvtools | 2af490f2dff6462320c14caa8b78463c640a51f6 | 2d07558737127077f97e9347d84e6ca46885b0bc | refs/heads/master | 2022-10-29T11:14:30.640460 | 2020-06-13T12:25:06 | 2020-06-13T12:25:06 | 109,034,639 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,076 | #!/usr/bin/env python
# Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#RUNTEST:
import sys
sys.dont_write_bytecode = True
sys.excepthook = sys.__excepthook__
import os
from os.path import abspath
import time
import shutil
import glob
import filecmp
import unittest
import trigtestutils as trigutil
import testutils as util
from testutils import print3
from gitinterface import GitInterfaceError, GitInterface
from gitinterface import set_environ, change_directory
from gitinterface import copy_path_to_current_directory
from gitinterface import runcmd
from gitinterface import safe_repository_mirror
from gitinterface import repository_url_match
from gitinterface import is_a_local_repository
from gitinterface import verify_repository_url
from gitinterface import repo_name_from_url
class with_set_environ( trigutil.trigTestCase ):
def setUp(self):
""
trigutil.trigTestCase.setUp( self, cleanout=False )
def test_setting_no_names_should_not_change_environ(self):
""
orig = dict( os.environ )
with set_environ():
state = dict( os.environ )
self.assertEqual( orig, os.environ )
self.assertEqual( orig, state )
def test_setting_a_new_name_should_get_set_then_unset(self):
""
orig = dict( os.environ )
assert 'MY_SPECIAL_NAME' not in os.environ
with set_environ( MY_SPECIAL_NAME='my special value' ):
state = dict( os.environ )
self.assertEqual( orig, os.environ )
assert state['MY_SPECIAL_NAME'] == 'my special value'
def test_a_value_of_None_causes_an_unset(self):
""
orig = dict( os.environ )
os.environ['MY_SPECIAL_NAME'] = 'my special value'
assert 'MY_SPECIAL_NAME' in os.environ
with set_environ( MY_SPECIAL_NAME=None ):
state = dict( os.environ )
assert os.environ['MY_SPECIAL_NAME'] == 'my special value'
assert 'MY_SPECIAL_NAME' not in state
del os.environ['MY_SPECIAL_NAME']
def test_unset_has_no_affect_if_not_already_defined(self):
""
orig = dict( os.environ )
assert 'MY_SPECIAL_NAME' not in os.environ
with set_environ( MY_SPECIAL_NAME=None ):
state = dict( os.environ )
assert 'MY_SPECIAL_NAME' not in state
self.assertEqual( orig, state )
class create_and_clone( trigutil.trigTestCase ):
def test_create_repository_in_existing_directory(self):
""
git = GitInterface()
git.create()
time.sleep(1)
assert os.path.exists( '.git/config' )
def test_create_repository_in_a_new_directory(self):
""
git = GitInterface()
git.create( 'newrepo' )
time.sleep(1)
assert not os.path.exists( '.git' )
assert os.path.exists( 'newrepo/.git/config' )
def write_git_wrapper(self):
""
touchfile = abspath( 'touchfile.txt' )
util.writescript( 'mygit/gitwrapper', """
#!"""+sys.executable+"""
import os, sys, subprocess
fp = open( '"""+touchfile+"""', 'w' )
prox = os.environ.get( 'https_proxy', 'None' )
fp.write( 'https_proxy=' + prox + os.linesep )
prox = os.environ.get( 'HTTPS_PROXY', 'None' )
fp.write( 'HTTPS_PROXY=' + prox + os.linesep )
fp.close()
x = subprocess.call( ' '.join( ['git']+sys.argv[1:] ), shell=True )
assert x == 0
""" )
time.sleep(1)
def test_specify_git_executable_to_use(self):
""
self.write_git_wrapper()
git = GitInterface( gitexe=abspath( 'mygit/gitwrapper' ) )
git.create( 'newrepo' )
time.sleep(1)
assert os.path.exists( 'touchfile.txt' )
assert os.path.exists( 'newrepo/.git/config' )
def test_specify_https_proxy(self):
""
self.write_git_wrapper()
with set_environ( https_proxy=None, HTTPS_PROXY=None ):
url = util.create_local_bare_repository( 'example' )
util.push_file_to_repo( url, 'file.txt', 'file contents' )
assert 'https_proxy' not in os.environ
assert 'HTTPS_PROXY' not in os.environ
git = GitInterface( gitexe=abspath( 'mygit/gitwrapper' ),
https_proxy='fakeurl://some/thing' )
git.clone( url )
git.currentBranch()
assert len( util.grepfiles( 'https_proxy=fakeurl://some/thing',
'touchfile.txt' ) ) == 1
assert len( util.grepfiles( 'HTTPS_PROXY=fakeurl://some/thing',
'touchfile.txt' ) ) == 1
def test_create_bare_repository(self):
""
git = GitInterface()
git.create( 'newrepo.git', bare=True )
assert git.isBare()
time.sleep(1)
lineL = util.grepfiles( 'bare', 'newrepo.git/config' )
assert len(lineL) == 1 and 'true' in lineL[0].lower()
def test_clone_a_local_repository(self):
""
url = util.create_local_bare_repository( 'example' )
util.push_file_to_repo( url, 'file.txt', 'file contents' )
git = GitInterface( origin_url=url )
time.sleep(1)
assert not git.isBare()
assert len( util.grepfiles( 'example', 'example/.git/config' ) ) > 0
assert len( util.grepfiles( 'file contents', 'example/file.txt' ) ) == 1
def test_setting_root_directory_in_constructor(self):
""
os.mkdir( 'adir' )
time.sleep(1)
bare_url = util.create_local_bare_repository( 'example' )
util.push_file_to_repo( bare_url, 'file.txt', 'file contents' )
with change_directory( 'adir' ):
GitInterface( bare_url )
git2 = GitInterface( rootdir='adir/example' )
assert not git2.isBare()
assert git2.currentBranch() == 'master'
def test_using_quiet_option_to_repress_clone_error_message(self):
""
url = 'file://'+abspath('fakerepo')
os.mkdir( 'curdir' )
time.sleep(1)
os.chdir( 'curdir' )
git = GitInterface()
redir = util.RedirectStdout( 'stdout.log', 'stderr.log' )
try:
git.clone( url, quiet=True )
except GitInterfaceError:
caught = True
except:
redir.close()
raise
redir.close()
assert caught
assert not util.readfile( 'stdout.log' ).strip()
assert not util.readfile( 'stderr.log' ).strip()
def test_clone_master_branch_only(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
# default clone first; a checkout of the branch should succeed
git = GitInterface()
git.clone( url )
runcmd( 'git checkout topic', chdir='example' )
shutil.rmtree( 'example' )
time.sleep(1)
# clone with master only; a checkout of the branch should fail
git = GitInterface()
git.clone( url, branch='master' )
self.assertRaises( GitInterfaceError,
runcmd, 'git checkout topic', chdir='example' )
def test_clone_into_a_subdirectory(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
git1 = GitInterface()
url1 = git1.clone( url, rootdir='ex1' )
git2 = GitInterface( url, rootdir='ex2' )
git3 = GitInterface()
url3 = git3.clone( url, rootdir='ex3', branch='topic' )
fL = glob.glob( 'ex*/.git/config' )
fL.sort()
assert fL == ['ex1/.git/config', 'ex2/.git/config', 'ex3/.git/config']
git = GitInterface()
assert len( git.listRemoteBranches( url1 ) ) > 0
assert len( git.listRemoteBranches( url3 ) ) > 0
def test_getting_root_directory_without_clone(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
git1 = GitInterface( url, rootdir='ex1' )
util.writefile( 'ex1/subdir/afile', 'my file' )
git1.add( 'subdir' )
git1.commit( 'add subdir' )
git1.push()
GitInterface( url )
root = os.path.abspath( 'example' )
git = GitInterface()
os.chdir( 'example' )
assert os.path.samefile( git.getRootDir(), root )
os.chdir( 'subdir' )
assert os.path.samefile( git.getRootDir(), root )
os.chdir( '/' )
self.assertRaises( GitInterfaceError, git.getRootDir )
def test_a_bare_clone_can_be_cloned(self):
""
os.mkdir( 'baredir' )
time.sleep(1)
url = util.create_bare_repo_with_topic_branch( 'example' )
bare_url = self.make_bare_clone_in_subdirectory( url, 'baredir' )
assert os.path.isdir( 'baredir/example.git' )
assert not os.path.exists( 'baredir/example.git/.git' )
git = GitInterface( bare_url )
assert util.readfile( 'example/file.txt' ).strip() == 'file contents'
def make_bare_clone_in_subdirectory(self, origin_url, subdir):
""
git_bare = GitInterface()
with change_directory( subdir ):
git_bare.clone( origin_url, bare=True )
return 'file://'+git_bare.getRootDir()
def test_a_bare_clone_into_a_specified_directory(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
git_bare = GitInterface()
git_bare.clone( url, rootdir='bare_clone_subdir', bare=True )
assert os.path.isdir( 'bare_clone_subdir' )
assert not os.path.exists( 'bare_clone_subdir/.git' )
bare_url = 'file://'+git_bare.getRootDir()
git = GitInterface( bare_url, rootdir='checkrepo' )
assert util.readfile( 'checkrepo/file.txt' ).strip() == 'file contents'
def test_can_push_to_a_bare_clone(self):
""
os.mkdir( 'baredir' )
time.sleep(1)
url = util.create_bare_repo_with_topic_branch( 'example' )
bare_url = self.make_bare_clone_in_subdirectory( url, 'baredir' )
git = GitInterface( bare_url )
assert 'baredir' in git.getRemoteURL()
util.writefile( 'example/file.txt', "yep ;)" )
git.add( 'file.txt' )
git.commit( 'cool message' )
git.push()
git2 = GitInterface( bare_url, rootdir='checkrepo' )
assert util.readfile( 'checkrepo/file.txt' ).strip() == 'yep ;)'
def test_can_push_from_a_bare_clone(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example' )
bare_git = GitInterface()
bare_git.clone( orig_url, rootdir='bareclone', bare=True )
bare_url = 'file://'+bare_git.getRootDir()
git = GitInterface( bare_url, rootdir='pushclone' )
util.writefile( 'pushclone/file.txt', "make a mod" )
git.add( 'file.txt' )
git.commit( 'a msg' )
git.push()
bare_git.push()
GitInterface( orig_url, rootdir='checkrepo' )
assert util.readfile( 'checkrepo/file.txt' ).strip() == 'make a mod'
def test_a_bare_clone_gets_all_branches_and_tags(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example', tag='atag' )
bare_git = GitInterface()
bare_git.clone( orig_url, rootdir='bareclone', bare=True )
bare_url = 'file://'+bare_git.getRootDir()
branchL = bare_git.listBranches()
assert branchL == [ 'master', 'topic' ]
tagL = bare_git.listTags()
assert tagL == [ 'atag' ]
def test_can_push_all_branches_from_a_bare_clone(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example' )
bare_git = GitInterface()
bare_git.clone( orig_url, rootdir='bareclone', bare=True )
bare_url = 'file://'+bare_git.getRootDir()
util.push_new_branch_with_file( bare_url, 'whatever',
'file2.md', 'some content' )
bare_git.push( all_branches=True )
git2 = GitInterface( orig_url, rootdir='checkrepo' )
git2.checkoutBranch( 'whatever' )
assert util.readfile( 'checkrepo/file2.md' ).strip() == 'some content'
git2.checkoutBranch( 'topic' )
assert not os.path.exists( 'checkrepo/file2.md' )
assert os.path.exists( 'checkrepo/file.txt' )
def test_can_push_all_tags_from_a_bare_clone(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example', tag='atag' )
bare_git = GitInterface()
bare_git.clone( orig_url, rootdir='bareclone', bare=True )
bare_url = 'file://'+bare_git.getRootDir()
util.push_file_to_repo( bare_url, 'newfile.txt', 'new junk' )
util.push_tag_to_repo( bare_url, 'sosad' )
bare_git.push( all_tags=True )
git2 = GitInterface( orig_url, rootdir='checkrepo' )
tagL = git2.listTags()
assert tagL == [ 'atag', 'sosad' ]
def test_pushing_to_a_different_repository(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example' )
bare_git = GitInterface()
bare_git.clone( orig_url, rootdir='bareclone', bare=True )
bare_url = 'file://'+bare_git.getRootDir()
git = GitInterface( bare_url, rootdir='workclone' )
util.writefile( 'workclone/file.txt', 'modify this guy' )
git.add( 'file.txt' )
git.commit( 'a mod' )
git.push( repository=orig_url )
GitInterface( orig_url, rootdir='checkclone' )
assert util.readfile( 'checkclone/file.txt' ).strip() == 'modify this guy'
GitInterface( bare_url, rootdir='checktwo' )
assert util.readfile( 'checktwo/file.txt' ).strip() == 'file contents'
git.createRemoteBranch( 'newbranch' )
util.writefile( 'workclone/file.txt', 'branch mod' )
git.add( 'file.txt' )
git.commit( 'b mod' )
git.push( all_branches=True, repository=orig_url )
chkit = GitInterface( orig_url, rootdir='checkclone2' )
chkit.checkoutBranch( 'newbranch' )
assert util.readfile( 'checkclone2/file.txt' ).strip() == 'branch mod'
def test_verbose_prints_git_command_and_output(self):
""
orig_url = util.create_bare_repo_with_topic_branch( 'example' )
git,out,err = util.call_capture_output( GitInterface,
orig_url, verbose=True )
out += err
print3( out )
assert 'clone' in out and 'example' in out
rtn,out,err = util.call_capture_output( git.checkoutBranch, 'topic' )
out += err
print3( out )
assert 'checkout' in out and 'topic' in out
rtn,out,err = util.call_capture_output( git.currentBranch )
out += err
print3( out )
assert rtn == 'topic'
assert 'branch' in out and 'topic' in out
class commit_and_push( trigutil.trigTestCase ):
def test_create_repo_and_commit_a_file(self):
""
util.writefile( 'grepo/file.txt', "file contents" )
time.sleep(1)
git = GitInterface()
git.create( 'grepo' )
git.add( 'file.txt' )
git.commit( 'first commit message' )
def test_commit_and_push_a_new_file(self):
""
url = util.create_local_bare_repository( 'myrepo' )
util.push_file_to_repo( url, 'file.txt', 'file contents' )
time.sleep(1)
git = GitInterface( origin_url=url )
util.writefile( 'myrepo/another.txt', 'another contents' )
git.add( 'another.txt' )
git.commit( 'adding file' )
git.push()
assert len( util.grepfiles( 'another', 'myrepo/another.txt' ) ) == 1
shutil.rmtree( 'myrepo' )
time.sleep(1)
git = GitInterface( url )
assert len( util.grepfiles( 'file', 'myrepo/file.txt' ) ) == 1
assert len( util.grepfiles( 'another', 'myrepo/another.txt' ) ) == 1
def test_add_commit_push_every_changed_file_in_a_directory(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
git = GitInterface( url )
util.writefile( 'example/adir/afile.txt', 'whatever' )
git.add( 'adir/afile.txt' )
git.commit( 'create directory' )
git.push()
util.writefile( 'example/adir/afile.txt', 'changed' )
util.writefile( 'example/adir/newfile.txt', 'brand spanking new' )
util.writefile( 'example/adir/deep/file.txt', 'further down' )
time.sleep(1)
git.add( 'adir' )
git.commit( 'add everything under adir' )
git.push()
time.sleep(1)
git2 = GitInterface( url, 'check' )
assert os.path.isfile( 'check/adir/deep/file.txt' )
assert util.readfile( 'check/adir/afile.txt' ).strip() == 'changed'
class branches( trigutil.trigTestCase ):
def setUp(self):
""
trigutil.trigTestCase.setUp( self )
self.url = util.create_bare_repo_with_topic_branch( 'example' )
time.sleep(1)
def test_listing_branches(self):
""
os.mkdir( 'default' )
os.mkdir( 'single' )
time.sleep(1)
git = GitInterface()
assert git.listRemoteBranches( self.url ) == [ 'master', 'topic' ]
self.assertRaises( GitInterfaceError, git.listRemoteBranches )
os.chdir( 'default' )
git = GitInterface( self.url )
assert git.listBranches() == [ 'master' ]
assert git.listBranches( remotes=True ) == [ 'master', 'topic' ]
assert git.listRemoteBranches( self.url ) == [ 'master', 'topic' ]
assert git.listRemoteBranches() == [ 'master', 'topic' ]
os.chdir( '../single' )
git = GitInterface()
git.clone( self.url, branch='topic' )
assert git.listBranches() == [ 'topic' ]
# the fetch entry in .git/config limits the remote listing
assert git.listBranches( remotes=True ) == [ 'topic' ]
# but listRemoteBranches() is immune
assert git.listRemoteBranches( self.url ) == [ 'master', 'topic' ]
assert git.listRemoteBranches() == [ 'master', 'topic' ]
def test_determine_current_branch(self):
""
git = GitInterface( self.url )
assert git.currentBranch() == 'master'
git.checkoutBranch( 'topic' )
assert git.currentBranch() == 'topic'
git = GitInterface()
os.chdir( 'example' )
git.currentBranch() == 'topic'
def test_getting_current_branch_fails_if_not_in_a_local_repository(self):
""
git = GitInterface()
self.assertRaises( GitInterfaceError, git.currentBranch )
def test_current_branch_returns_None_if_in_detached_HEAD_state(self):
""
util.push_file_to_repo( self.url, 'file.txt', 'new contents' )
git = GitInterface( self.url )
util.checkout_to_previous_sha1( git.getRootDir() )
assert git.currentBranch() == None
def test_current_branch_fails_if_done_right_after_git_init(self):
""
os.mkdir( 'arepo' )
time.sleep(1)
os.chdir( 'arepo' )
runcmd( 'git init' )
git = GitInterface()
self.assertRaises( GitInterfaceError, git.currentBranch )
def test_a_push_fails_if_not_on_a_branch(self):
""
util.push_file_to_repo( self.url, 'file.txt', 'new contents' )
git = GitInterface( self.url )
util.checkout_to_previous_sha1( git.getRootDir() )
assert git.currentBranch() == None
self.assertRaises( GitInterfaceError, git.push )
def test_clone_followed_by_a_new_branch_showing_up_on_remote(self):
""
git = GitInterface( self.url )
assert git.listBranches() == [ 'master' ]
assert git.listBranches( remotes=True ) == [ 'master', 'topic' ]
assert git.listRemoteBranches() == [ 'master', 'topic' ]
util.push_new_branch_with_file( self.url, 'newtopic',
'file.txt', 'redo' )
time.sleep(1)
assert git.listBranches() == [ 'master' ]
assert git.listBranches( remotes=True ) == [ 'master', 'topic' ]
assert git.listRemoteBranches() == [ 'master', 'newtopic', 'topic' ]
git.checkoutBranch( 'newtopic' )
assert git.currentBranch() == 'newtopic'
assert git.listBranches( remotes=True ) == [ 'master', 'newtopic', 'topic' ]
assert git.listRemoteBranches() == [ 'master', 'newtopic', 'topic' ]
def test_exception_if_checkout_branch_name_does_not_exist(self):
""
git = GitInterface( self.url )
assert git.listBranches() == [ 'master' ]
git.checkoutBranch( 'topic' )
self.assertRaises( GitInterfaceError, git.checkoutBranch, 'foobar' )
def test_creating_a_local_branch(self):
""
git = GitInterface( self.url )
git.createBranch( 'justme' )
assert git.currentBranch() == 'justme'
util.writefile( 'example/file.txt', 'branch contents' )
git.add( 'file.txt' )
git.commit( 'mod to file on branch' )
git.checkoutBranch( 'master' )
git2 = GitInterface( 'example', 'checkrepo' )
assert 'justme' not in git2.listBranches()
assert 'justme' in git2.listRemoteBranches()
git2.checkoutBranch( 'justme' )
assert util.readfile( 'checkrepo/file.txt' ).strip() == 'branch contents'
def test_create_local_branch_fails_if_branch_already_exists(self):
""
git = GitInterface( self.url )
git.checkoutBranch( 'topic' )
git.checkoutBranch( 'master' )
self.assertRaises( GitInterfaceError, git.createBranch, 'topic' )
assert git.currentBranch() == 'master'
def test_creating_a_remote_branch(self):
""
git = GitInterface( self.url )
git.createRemoteBranch( 'nasa' )
git = GitInterface( self.url, 'check' )
git.checkoutBranch( 'nasa' )
assert util.readfile( 'check/file.txt' ).strip() == 'file contents'
def create_a_remote_branch_and_push_a_change(self, git):
""
git.createRemoteBranch( 'nasa' )
util.writefile( 'example/newfile.txt', 'new contents' )
git.add( 'newfile.txt' )
git.commit( 'add file to new branch' )
git.push()
git = GitInterface( self.url, 'check' )
git.checkoutBranch( 'nasa' )
assert util.readfile( 'check/newfile.txt' ).strip() == 'new contents'
def test_creating_a_remote_branch_and_pushing_a_change(self):
""
git = GitInterface( self.url )
self.create_a_remote_branch_and_push_a_change( git )
def test_clone_specific_branch_then_create_a_remote_branch_and_push(self):
""
git = GitInterface()
git.clone( self.url, branch='master' )
self.create_a_remote_branch_and_push_a_change( git )
def test_create_remote_branch_does_not_push_local_changes(self):
""
git1 = GitInterface( self.url )
util.writefile( 'example/file.txt', 'modified contents' )
git1.add( 'file.txt' )
git1.commit( 'modify file' )
git1.createRemoteBranch( 'nasa' )
git2 = GitInterface( self.url, 'check1' )
git2.checkoutBranch( 'nasa' )
assert util.readfile( 'check1/file.txt' ).strip() == 'file contents'
git1.push()
git2.pull()
assert util.readfile( 'check1/file.txt' ).strip() == 'modified contents'
def test_create_remote_branch_within_a_single_branch_clone(self):
""
git1 = GitInterface()
git1.clone( self.url, branch='master' )
git1.createRemoteBranch( 'nasa' )
git2 = GitInterface( self.url, 'check1' )
git2.checkoutBranch( 'nasa' )
assert util.readfile( 'check1/file.txt' ).strip() == 'file contents'
util.writefile( 'example/file.txt', 'modified contents' )
git1.add( 'file.txt' )
git1.commit( 'modify file' )
git1.push()
git2.pull()
assert util.readfile( 'check1/file.txt' ).strip() == 'modified contents'
def test_create_remote_branch_that_already_exists_is_an_error(self):
""
git = GitInterface( self.url )
self.assertRaises( GitInterfaceError, git.createRemoteBranch, 'topic' )
def test_create_remote_branch_fails_if_current_branch_is_not_tracked(self):
""
git = GitInterface( self.url )
util.create_local_branch( 'example', 'proximus' )
assert git.currentBranch() == 'proximus'
self.assertRaises( GitInterfaceError, git.createRemoteBranch, 'nasa' )
def test_create_remote_branch_of_same_name_as_current_branch_is_an_error(self):
""
git = GitInterface( self.url )
git.checkoutBranch( 'topic' )
assert git.currentBranch() == 'topic'
self.assertRaises( GitInterfaceError, git.createRemoteBranch, 'topic' )
def test_create_remote_branch_with_added_files_should_succeed(self):
""
git = GitInterface( self.url )
util.writefile( 'example/file.txt', 'modified contents' )
git.add( 'file.txt' )
git.createRemoteBranch( 'nasa' )
git = GitInterface( self.url, 'check' )
git.checkoutBranch( 'nasa' )
assert util.readfile( 'check/file.txt' ).strip() == 'file contents'
def test_create_remote_branch_with_modified_but_not_added_files_should_succeed(self):
""
git = GitInterface( self.url )
util.writefile( 'example/file.txt', 'modified contents' )
git.createRemoteBranch( 'nasa' )
git = GitInterface( self.url, 'check' )
git.checkoutBranch( 'nasa' )
assert util.readfile( 'check/file.txt' ).strip() == 'file contents'
def test_delete_remote_branch_while_on_master(self):
""
git = GitInterface( self.url )
assert 'topic' in git.listBranches( remotes=True )
assert 'topic' in git.listRemoteBranches()
git.deleteRemoteBranch( 'topic' )
assert 'topic' not in git.listBranches( remotes=True )
assert 'topic' not in git.listRemoteBranches()
git = GitInterface( self.url, 'ex2' )
assert 'topic' not in git.listBranches( remotes=True )
assert 'topic' not in git.listRemoteBranches()
def test_delete_remote_branch_after_checking_it_out(self):
""
git = GitInterface( self.url )
git.checkoutBranch( 'topic' )
git.checkoutBranch( 'master' )
assert 'topic' in git.listBranches()
assert 'topic' in git.listBranches( remotes=True )
git.deleteRemoteBranch( 'topic' )
assert 'topic' not in git.listBranches()
assert 'topic' not in git.listBranches( remotes=True )
assert 'topic' not in git.listRemoteBranches()
git = GitInterface( self.url, 'ex2' )
assert 'topic' not in git.listBranches( remotes=True )
assert 'topic' not in git.listRemoteBranches()
def test_deleting_the_current_branch_is_an_error(self):
""
git = GitInterface( self.url )
git.checkoutBranch( 'topic' )
self.assertRaises( GitInterfaceError, git.deleteRemoteBranch, 'topic' )
class pulling( trigutil.trigTestCase ):
def setUp(self):
""
trigutil.trigTestCase.setUp( self )
self.url = util.create_bare_repo_with_topic_branch( 'example' )
time.sleep(1)
def clone_twice_and_modify_and_push_file_txt(self):
""
git1 = GitInterface( self.url, 'ex1' )
git2 = GitInterface( self.url, 'ex2' )
util.writefile( 'ex2/file.txt', 'modified contents' )
git2.add( 'file.txt' )
git2.commit( 'modify and push' )
git2.push()
return git1, git2
def test_push_in_one_repo_and_pull_in_another(self):
""
git1, git2 = self.clone_twice_and_modify_and_push_file_txt()
# pull with no changes
git1.pull()
util.writefile( 'ex1/filetwo.txt', 'file two contents' )
git1.add( 'filetwo.txt' )
git1.commit( 'adding another file' )
git1.push()
time.sleep(1)
assert util.readfile( 'ex1/file.txt' ).strip() == 'modified contents'
# pull with unstaged changes
util.writefile( 'ex2/something.txt', 'whatever' )
git2.pull()
git2.add( 'something.txt' )
git2.commit( 'adding something' )
git2.push()
time.sleep(1)
assert util.readfile( 'ex2/filetwo.txt' ).strip() == 'file two contents'
# pull with committed changes
util.writefile( 'ex1/filetwo.txt', 'for the third time!' )
git1.add( 'filetwo.txt' )
git1.commit( 'third change' )
git1.pull()
time.sleep(1)
assert util.readfile( 'ex1/something.txt' ).strip() == 'whatever'
def test_pull_will_fail_if_repo_is_currently_in_a_rebase_operation(self):
""
git1, git2 = self.clone_twice_and_modify_and_push_file_txt()
util.writefile( 'ex1/file.txt', 'also modified contents' )
git1.add( 'file.txt' )
git1.commit( 'this will conflict' )
# let this fail due to a conflict
x,out = runcmd( 'git pull', chdir='ex1', raise_on_error=False )
assert x != 0
# this should now fail because it is in the middle of a rebase operation
self.assertRaises( GitInterfaceError, git1.pull )
def test_the_repo_is_reset_after_a_pull_conflict(self):
""
git1, git2 = self.clone_twice_and_modify_and_push_file_txt()
util.writefile( 'ex1/file.txt', 'also modified contents' )
git1.add( 'file.txt' )
git1.commit( 'this will conflict' )
self.assertRaises( GitInterfaceError, git1.pull )
assert git1.currentBranch() == 'master'
assert util.readfile( 'ex1/file.txt' ).strip() == 'also modified contents'
class orphan_branches( trigutil.trigTestCase ):
def test_copy_file_or_directory_to_current_directory(self):
""
util.writefile( 'subdir/myfile.txt', 'hello my file' )
src = os.path.abspath( 'subdir' )
os.mkdir( 'destdir' ) ; dest = os.path.abspath( 'destdir' )
util.writefile( 'subdir/adir/another.txt', 'what' )
os.symlink( 'myfile.txt', 'subdir/linkfile' )
time.sleep(1)
cwd = os.getcwd()
os.chdir( dest )
f1 = copy_path_to_current_directory( src+'/adir' )
f2 = copy_path_to_current_directory( src+'/myfile.txt' )
f3 = copy_path_to_current_directory( src+'/linkfile' )
time.sleep(1)
assert util.readfile( 'adir/another.txt' ).strip() == 'what'
assert f1 == 'adir'
assert util.readfile( 'myfile.txt' ).strip() == 'hello my file'
assert f2 == 'myfile.txt'
assert os.path.islink( 'linkfile' )
assert os.readlink( 'linkfile' ) == 'myfile.txt'
assert f3 == 'linkfile'
def test_create_orphan_branch(self):
""
self.run_create_orphan_branch_test()
def test_clone_master_only_then_create_orphan_branch(self):
""
self.run_create_orphan_branch_test( 'master' )
def run_create_orphan_branch_test(self, initial_branchname=None):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
util.writefile( 'readme.txt', 'this is adam' )
time.sleep(1)
git = GitInterface()
git.clone( url, 'ex', initial_branchname )
git.createRemoteOrphanBranch( 'loner', 'start fresh', 'readme.txt' )
assert git.currentBranch() == 'loner'
assert 'loner' in git.listRemoteBranches()
fL = glob.glob( 'ex/*' )
assert len( fL ) == 1 and fL[0] == 'ex/readme.txt'
git.checkoutBranch( 'master' )
assert not os.path.exists( 'ex/readme.txt' )
assert util.readfile( 'ex/file.txt' ).strip() == 'file contents'
git2 = GitInterface()
git2.clone( url, 'ex2', branch='loner' )
fL = glob.glob( 'ex2/*' )
assert len( fL ) == 1 and fL[0] == 'ex2/readme.txt'
def test_orphan_branch_creation_errors(self):
""
url = util.create_bare_repo_with_topic_branch( 'example' )
util.push_file_to_repo( url, 'file.txt', 'new contents' )
util.writefile( 'readme.txt', 'this is adam' )
time.sleep(1)
git = GitInterface( url )
util.checkout_to_previous_sha1( git.getRootDir() )
self.assertRaises( GitInterfaceError,
git.createRemoteOrphanBranch, 'loner', 'start it', 'readme.txt' )
git.clone( url, 'ex2' )
assert git.currentBranch() == 'master'
self.assertRaises( GitInterfaceError,
git.createRemoteOrphanBranch, 'topic', 'start it', 'readme.txt' )
class mirroring_repositories( trigutil.trigTestCase ):
def test_copy_one_repo_to_a_second_empty_repo(self):
""
src_url = util.create_local_bare_repository( 'foobar', 'src' )
util.push_file_to_repo( src_url, 'file.txt', 'file contents' )
cpy_url = util.create_local_bare_repository( 'example', 'cpy' )
safe_repository_mirror( src_url, cpy_url, verbose=True )
time.sleep(1)
git = GitInterface( cpy_url )
assert util.readfile( 'example/file.txt' ).strip() == 'file contents'
assert git.currentBranch() == 'master'
def test_update_a_second_repo(self):
""
src_url = util.create_local_bare_repository( 'example' )
util.push_file_to_repo( src_url, 'file.txt', 'file contents' )
git1 = GitInterface()
cpy_url = git1.clone( src_url, rootdir='cpy', bare=True )
util.push_file_to_repo( src_url, 'file.txt', 'new contents' )
safe_repository_mirror( src_url, cpy_url )
time.sleep(1)
git2 = GitInterface( cpy_url, rootdir='checkclone' )
assert util.readfile( 'checkclone/file.txt' ).strip() == 'new contents'
def test_that_branches_and_tags_are_copied(self):
""
src_url = util.create_bare_repo_with_topic_branch( 'example',
subdir='srcrepo',
tag='FANCYTAG' )
cpy_url = util.create_local_bare_repository( 'cpyrepo' )
safe_repository_mirror( src_url, cpy_url )
time.sleep(1)
git = GitInterface()
git.clone( cpy_url, rootdir='checkclone1', bare=True )
assert git.listBranches() == ['master', 'topic']
assert git.listTags() == ['FANCYTAG']
util.push_new_branch_with_file( src_url, 'coolbranch',
'file.txt', 'cool contents' )
util.push_tag_to_repo( src_url, 'COOLTAG' )
safe_repository_mirror( src_url, cpy_url )
time.sleep(1)
git = GitInterface()
git.clone( cpy_url, rootdir='checkclone2', bare=True )
assert git.listBranches() == ['coolbranch', 'master', 'topic']
assert git.listTags() == ['COOLTAG', 'FANCYTAG']
def test_update_a_second_repo_using_an_existing_working_repo(self):
""
src_url = util.create_bare_repo_with_topic_branch( 'example',
subdir='srcrepo',
tag='FANCYTAG' )
cpy_url = util.create_local_bare_repository( 'cpyrepo' )
git = GitInterface()
git.clone( src_url, rootdir='wrkclone', bare=True )
wrkdir = git.getRootDir()
# make work clone out-of-date
util.push_file_to_repo( src_url, 'file.txt', 'my contents' )
util.push_new_branch_with_file( src_url, 'coolbranch',
'file.txt', 'cool contents' )
util.push_tag_to_repo( src_url, 'NEWTAG' )
safe_repository_mirror( src_url, cpy_url, work_clone=wrkdir )
time.sleep(1)
git = GitInterface( cpy_url, rootdir='checkclone' )
assert git.listRemoteBranches() == ['coolbranch', 'master', 'topic']
assert git.listTags() == ['FANCYTAG', 'NEWTAG']
assert util.readfile( 'checkclone/file.txt' ).strip() == 'my contents'
def test_using_an_existing_working_non_bare_repo_is_an_error(self):
""
src_url = util.create_bare_repo_with_topic_branch( 'example',
subdir='srcrepo',
tag='FANCYTAG' )
cpy_url = util.create_local_bare_repository( 'cpyrepo' )
git = GitInterface( src_url, rootdir='wrkclone' )
wrkdir = git.getRootDir()
self.assertRaises( GitInterfaceError,
safe_repository_mirror, src_url, cpy_url, work_clone=wrkdir )
def test_the_work_dir_will_be_created_if_it_doesnt_exist(self):
""
src_url = util.create_bare_repo_with_topic_branch( 'example',
subdir='srcrepo',
tag='FANCYTAG' )
cpy_url = util.create_local_bare_repository( 'cpyrepo' )
wrkdir = 'workclone'
safe_repository_mirror( src_url, cpy_url, work_clone=wrkdir )
git = GitInterface( rootdir=wrkdir )
assert git.isBare()
def test_an_update_fails_if_history_would_be_changed(self):
""
src_url = util.create_bare_repo_with_topic_branch( 'example',
subdir='srcrepo',
tag='FANCYTAG' )
cpy_url = util.create_local_bare_repository( 'cpyrepo' )
safe_repository_mirror( src_url, cpy_url )
util.push_file_to_repo( cpy_url, 'file.txt', 'careful...' )
self.assertRaises( GitInterfaceError,
safe_repository_mirror, src_url, cpy_url )
class misc_functions( trigutil.trigTestCase ):
def test_repo_name_from_url(self):
""
assert repo_name_from_url( 'foo/bar.git' ) == 'bar'
assert repo_name_from_url( 'foo/bar' ) == 'bar'
assert repo_name_from_url( 'foo/bar.git/' ) == 'bar'
assert repo_name_from_url( 'foo/bar/' ) == 'bar'
def test_function_repository_url_match(self):
""
os.makedirs( 'subdir/deep' )
os.mkdir( 'sub:dir' )
url = util.create_bare_repo_with_topic_branch( 'cool', 'barerepo' )
GitInterface( url )
GitInterface( url, 'mrdir/.mrgit' )
git = GitInterface()
git.clone( url, 'bare_mrdir/.mrgit.git', bare=True )
time.sleep(1)
assert repository_url_match( 'file:///foo/bar' )
assert repository_url_match( 'http://host.xx/path' )
assert repository_url_match( 'https://host.xx/path' )
assert repository_url_match( 'ssh://host.xx/path' )
assert repository_url_match( 'git://host.xx/path' )
assert repository_url_match( 'ftp://host.xx/path' )
assert repository_url_match( 'ftps://host.xx/path' )
assert repository_url_match( 'sub:dir' )
assert not repository_url_match( './sub:dir' )
assert repository_url_match( 'host.xx:path/to/repo.git' )
assert repository_url_match( 'usrname@host.xx:/path/to/repo.git' )
assert not repository_url_match( 'barerepo/cool.git' )
assert not repository_url_match( abspath( 'barerepo/cool.git' ) )
assert not repository_url_match( 'cool' )
assert not repository_url_match( abspath( 'cool' ) )
assert not repository_url_match( 'subdir' )
assert not repository_url_match( 'subdir/deep' )
assert not os.path.exists( 'mrdir/.git' )
assert not os.path.exists( 'mrdir/config' )
assert not repository_url_match( 'mrdir' )
assert not os.path.exists( 'bare_mrdir/.git' )
assert not os.path.exists( 'bare_mrdir/config' )
assert not repository_url_match( 'bare_mrdir' )
def test_function_is_a_local_repository(self):
""
url = util.create_bare_repo_with_topic_branch( 'cool', 'barerepo' )
GitInterface( url )
time.sleep(1)
assert not is_a_local_repository( 'barerepo' )
assert is_a_local_repository( 'barerepo/cool.git' )
assert is_a_local_repository( abspath( 'barerepo/cool.git' ) )
assert is_a_local_repository( 'barerepo/cool' )
assert is_a_local_repository( abspath( 'barerepo/cool' ) )
assert is_a_local_repository( 'cool' )
assert is_a_local_repository( abspath( 'cool' ) )
def test_function_verify_repository_url(self):
""
url = util.create_bare_repo_with_topic_branch( 'cool', 'barerepo' )
git = GitInterface()
git.clone( url, rootdir='dir1/coolclone' )
git.clone( url, rootdir='dir2/bareclone.git', bare=True )
time.sleep(1)
assert os.path.isdir( 'dir1/coolclone' )
assert os.path.isdir( 'dir2/bareclone.git' )
pre = 'file://'+os.getcwd()
assert not verify_repository_url( 'barerepo' )
assert not verify_repository_url( pre+'/barerepo' )
assert not verify_repository_url( 'dir1' )
assert not verify_repository_url( pre+'/dir1' )
assert not verify_repository_url( 'dir2' )
assert not verify_repository_url( pre+'/dir2' )
assert verify_repository_url( 'dir1/coolclone' )
assert verify_repository_url( os.path.abspath('dir1/coolclone') )
assert verify_repository_url( 'dir2/bareclone' )
assert verify_repository_url( os.path.abspath('dir2/bareclone') )
assert verify_repository_url( pre+'/dir1/coolclone' )
assert verify_repository_url( pre+'/dir2/bareclone.git' )
assert verify_repository_url( pre+'/dir2/bareclone' )
#######################################################################
util.run_test_cases( sys.argv, sys.modules[__name__] )
| [
"rrdrake@sandia.gov"
] | rrdrake@sandia.gov | |
43351e79c4609abf8ba1a0ba88d5fa8762844870 | 9259591e4794aecd85c199b645ffb05ccbebd993 | /vendor/billiard/spawn.py | 9d04b23446e46a9a832530e23ef9003e84a9da00 | [
"Apache-2.0"
] | permissive | swdotcom/swdc-sublime | 000e20b2d2d9fa214480d2a94fcb97a88b83f67f | eab2007ab408e44d38163a121bf95c5d2018d6e4 | refs/heads/main | 2022-03-10T05:57:44.701151 | 2022-03-09T23:53:14 | 2022-03-09T23:53:14 | 127,207,312 | 14 | 5 | Apache-2.0 | 2022-03-09T23:53:15 | 2018-03-28T22:47:16 | Python | UTF-8 | Python | false | false | 11,664 | py | #
# Code used to start processes when using the spawn or forkserver
# start methods.
#
# multiprocessing/spawn.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import io
import os
import pickle
import sys
import runpy
import types
import warnings
from . import get_start_method, set_start_method
from . import process
from . import util
__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
'get_preparation_data', 'get_command_line', 'import_main_path']
W_OLD_DJANGO_LAYOUT = """\
Will add directory %r to path! This is necessary to accommodate \
pre-Django 1.4 layouts using setup_environ.
You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \
environment variable.
"""
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if sys.platform != 'win32':
WINEXE = False
WINSERVICE = False
else:
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def _module_parent_dir(mod):
dir, filename = os.path.split(_module_dir(mod))
if dir == os.curdir or not dir:
dir = os.getcwd()
return dir
def _module_dir(mod):
if '__init__.py' in mod.__file__:
return os.path.dirname(mod.__file__)
return mod.__file__
def _Django_old_layout_hack__save():
if 'DJANGO_PROJECT_DIR' not in os.environ:
try:
settings_name = os.environ['DJANGO_SETTINGS_MODULE']
except KeyError:
return # not using Django.
conf_settings = sys.modules.get('django.conf.settings')
configured = conf_settings and conf_settings.configured
try:
project_name, _ = settings_name.split('.', 1)
except ValueError:
return # not modified by setup_environ
project = __import__(project_name)
try:
project_dir = os.path.normpath(_module_parent_dir(project))
except AttributeError:
return # dynamically generated module (no __file__)
if configured:
warnings.warn(UserWarning(
W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir)
))
os.environ['DJANGO_PROJECT_DIR'] = project_dir
def _Django_old_layout_hack__load():
try:
sys.path.append(os.environ['DJANGO_PROJECT_DIR'])
except KeyError:
pass
def set_executable(exe):
global _python_exe
_python_exe = exe
def get_executable():
return _python_exe
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--billiard-fork':
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
kwds = {}
for arg in sys.argv[2:]:
name, value = arg.split('=')
if value == 'None':
kwds[name] = None
else:
kwds[name] = int(value)
spawn_main(**kwds)
sys.exit()
def get_command_line(**kwds):
'''
Returns prefix of command line used for spawning a child process
'''
if getattr(sys, 'frozen', False):
return ([sys.executable, '--billiard-fork'] +
['%s=%r' % item for item in kwds.items()])
else:
prog = 'from billiard.spawn import spawn_main; spawn_main(%s)'
prog %= ', '.join('%s=%r' % item for item in kwds.items())
opts = util._args_from_interpreter_flags()
return [_python_exe] + opts + ['-c', prog, '--billiard-fork']
def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
'''
Run code specified by data received over pipe
'''
assert is_forking(sys.argv)
if sys.platform == 'win32':
import msvcrt
from .reduction import steal_handle
new_handle = steal_handle(parent_pid, pipe_handle)
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
else:
from . import semaphore_tracker
semaphore_tracker._semaphore_tracker._fd = tracker_fd
fd = pipe_handle
exitcode = _main(fd)
sys.exit(exitcode)
def _setup_logging_in_child_hack():
# Huge hack to make logging before Process.run work.
try:
os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__
except KeyError:
pass
except AttributeError:
pass
loglevel = os.environ.get("_MP_FORK_LOGLEVEL_")
logfile = os.environ.get("_MP_FORK_LOGFILE_") or None
format = os.environ.get("_MP_FORK_LOGFORMAT_")
if loglevel:
from . import util
import logging
logger = util.get_logger()
logger.setLevel(int(loglevel))
if not logger.handlers:
logger._rudimentary_setup = True
logfile = logfile or sys.__stderr__
if hasattr(logfile, "write"):
handler = logging.StreamHandler(logfile)
else:
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(
format or util.DEFAULT_LOGGING_FORMAT,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _main(fd):
_Django_old_layout_hack__load()
with io.open(fd, 'rb', closefd=True) as from_parent:
process.current_process()._inheriting = True
try:
preparation_data = pickle.load(from_parent)
prepare(preparation_data)
_setup_logging_in_child_hack()
self = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
return self._bootstrap()
def _check_not_importing_main():
if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.''')
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
_check_not_importing_main()
d = dict(
log_to_stderr=util._log_to_stderr,
authkey=process.current_process().authkey,
)
if util._logger is not None:
d['log_level'] = util._logger.getEffectiveLevel()
sys_path = sys.path[:]
try:
i = sys_path.index('')
except ValueError:
pass
else:
sys_path[i] = process.ORIGINAL_DIR
d.update(
name=name,
sys_path=sys_path,
sys_argv=sys.argv,
orig_dir=process.ORIGINAL_DIR,
dir=os.getcwd(),
start_method=get_start_method(),
)
# Figure out whether to initialise main in the subprocess as a module
# or through direct execution (or to leave it alone entirely)
main_module = sys.modules['__main__']
try:
main_mod_name = main_module.__spec__.name
except AttributeError:
main_mod_name = main_module.__name__
if main_mod_name is not None:
d['init_main_from_name'] = main_mod_name
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
main_path = getattr(main_module, '__file__', None)
if main_path is not None:
if (not os.path.isabs(main_path) and
process.ORIGINAL_DIR is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['init_main_from_path'] = os.path.normpath(main_path)
return d
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process().authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'start_method' in data:
set_start_method(data['start_method'])
if 'init_main_from_name' in data:
_fixup_main_from_name(data['init_main_from_name'])
elif 'init_main_from_path' in data:
_fixup_main_from_path(data['init_main_from_path'])
# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
# __main__.py files for packages, directories, zip archives, etc, run
# their "main only" code unconditionally, so we don't even try to
# populate anything in __main__, nor do we make any changes to
# __main__ attributes
current_main = sys.modules['__main__']
if mod_name == "__main__" or mod_name.endswith(".__main__"):
return
# If this process was forked, __main__ may already be populated
if getattr(current_main.__spec__, "name", None) == mod_name:
return
# Otherwise, __main__ may contain some non-main code where we need to
# support unpickling it properly. We rerun it as __mp_main__ and make
# the normal __main__ an alias to that
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def _fixup_main_from_path(main_path):
# If this process was forked, __main__ may already be populated
current_main = sys.modules['__main__']
# Unfortunately, the main ipython launch script historically had no
# "if __name__ == '__main__'" guard, so we work around that
# by treating it like a __main__.py file
# See https://github.com/ipython/ipython/issues/4698
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == 'ipython':
return
# Otherwise, if __file__ already has the setting we expect,
# there's nothing more to do
if getattr(current_main, '__file__', None) == main_path:
return
# If the parent process has sent a path through rather than a module
# name we assume it is an executable script that may contain
# non-main code that needs to be executed
old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
def import_main_path(main_path):
'''
Set sys.modules['__main__'] to module at main_path
'''
_fixup_main_from_path(main_path)
| [
"bo@software.com"
] | bo@software.com |
2e9f0ae01fbad3c4bae02bb7427a5b127c6d02ff | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /stypy/sgmc/sgmc_cache/taxonomy/builtin_functions/str/error_str_return_type.py | cffaa9f073b7b8d8146ea300f49c0b12c05ebde2 | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # coding=utf-8
2: __doc__ = "str builtin is invoked and its return type is used to call an non existing method"
3:
4: if __name__ == '__main__':
5: # Call options
6: # () -> <type 'str'>
7: # (AnyType) -> <type 'str'>
8:
9:
10: # Call the builtin
11: ret = str(3)
12:
13: # Type error
14: ret.unexisting_method()
15:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
# Assigning a Str to a Name (line 2):
str_1 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 2, 10), 'str', 'str builtin is invoked and its return type is used to call an non existing method')
# Assigning a type to the variable '__doc__' (line 2)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 2, 0), '__doc__', str_1)
if (__name__ == '__main__'):
# Assigning a Call to a Name (line 11):
# Call to str(...): (line 11)
# Processing the call arguments (line 11)
int_3 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 11, 14), 'int')
# Processing the call keyword arguments (line 11)
kwargs_4 = {}
# Getting the type of 'str' (line 11)
str_2 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 11, 10), 'str', False)
# Calling str(args, kwargs) (line 11)
str_call_result_5 = invoke(stypy.reporting.localization.Localization(__file__, 11, 10), str_2, *[int_3], **kwargs_4)
# Assigning a type to the variable 'ret' (line 11)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 11, 4), 'ret', str_call_result_5)
# Call to unexisting_method(...): (line 14)
# Processing the call keyword arguments (line 14)
kwargs_8 = {}
# Getting the type of 'ret' (line 14)
ret_6 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 14, 4), 'ret', False)
# Obtaining the member 'unexisting_method' of a type (line 14)
unexisting_method_7 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 14, 4), ret_6, 'unexisting_method')
# Calling unexisting_method(args, kwargs) (line 14)
unexisting_method_call_result_9 = invoke(stypy.reporting.localization.Localization(__file__, 14, 4), unexisting_method_7, *[], **kwargs_8)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
| [
"redondojose@uniovi.es"
] | redondojose@uniovi.es |
f9c609f789f2003f98166ffdddef23ee00af7fa2 | a5909624f890a5f4d45c5c5400b18facfc4b14e5 | /torch/testing/_internal/opinfo/core.py | 8b1f3f69450903fd3e81a983f960ac19d6d640e4 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | yueyericardo/pytorch | cbd21ecb35553bb932655140f1965832a618c165 | a61c96492b042947467bb603244301400f0e8d1a | refs/heads/master | 2022-09-03T06:01:31.199274 | 2022-08-05T15:29:00 | 2022-08-07T03:04:46 | 136,817,183 | 0 | 0 | null | 2018-06-10T14:44:34 | 2018-06-10T14:44:34 | null | UTF-8 | Python | false | false | 99,198 | py | from dataclasses import dataclass, asdict
import collections.abc
import operator
from typing import Any, Callable, List, Optional, Tuple, Iterable
from enum import Enum
import unittest
import math
from functools import partial
from itertools import product
import torch
from torch.testing import make_tensor
from torch.testing._internal.opinfo import utils
from torchgen.utils import dataclass_repr
from torch.testing._internal.common_utils import (
is_iterable_of_tensors,
noncontiguous_like,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import (
_dispatch_dtypes,
floating_and_complex_types_and,
floating_and_complex_types,
floating_types,
)
from torch.testing._internal.common_device_type import (
skipCPUIfNoFFT,
toleranceOverride,
tol,
)
# Reasonable testing sizes for dimensions
L = 20
M = 10
S = 5
XS = 3
# Unique value to distinguish default from anything else
_NOTHING = object()
# Extension of getattr to support qualified names
# e.g. _getattr_qual(torch, 'linalg.norm') -> torch.linalg.norm
def _getattr_qual(obj, name, default=_NOTHING):
try:
for path in name.split('.'):
obj = getattr(obj, path)
return obj
except AttributeError:
if default is not _NOTHING:
return default
else:
raise
class DecorateInfo(object):
"""Describes which test, or type of tests, should be wrapped in the given
decorators when testing an operator. Any test that matches all provided
arguments will be decorated. The decorators will only be applied if the
active_if argument is True."""
__slots__ = ['decorators', 'cls_name', 'test_name', 'device_type', 'dtypes', 'active_if']
def __init__(self, decorators, cls_name=None, test_name=None, *,
device_type=None, dtypes=None, active_if=True):
self.decorators = list(decorators) if isinstance(decorators, collections.abc.Sequence) else [decorators]
self.cls_name = cls_name
self.test_name = test_name
self.device_type = device_type
self.dtypes = dtypes
self.active_if = active_if
# Validate dtypes
if self.dtypes is not None:
for dtype in self.dtypes:
assert isinstance(dtype, torch.dtype)
def is_active(self, cls_name, test_name, device_type, dtype):
return (
self.active_if and
(self.cls_name is None or self.cls_name == cls_name) and
(self.test_name is None or self.test_name == test_name) and
(self.device_type is None or self.device_type == device_type) and
(self.dtypes is None or dtype in self.dtypes)
)
# FIXME
# Note: historically the 'input' kwarg had to be a Tensor or TensorList, but we are trying
# to support scalar inputs, too. Some tests still depend on 'input' being a Tensor
# or TensorList, however.
class SampleInput(object):
"""Represents sample inputs to a function."""
__slots__ = ['input', 'args', 'kwargs', 'output_process_fn_grad', 'broadcasts_input', 'name']
def __init__(self, input, *, args=tuple(), kwargs=None, output_process_fn_grad=lambda x: x, broadcasts_input=False, name=""):
# input is the first input to the op and is typically either a Tensor or TensorList (Sequence[Tensor]).
# This follows the typical pattern where for Tensor inputs op(t, ...) = t.op(...).
self.input = input
self.args = args
self.kwargs = kwargs if kwargs is not None else {}
self.output_process_fn_grad = output_process_fn_grad
self.name = name
# Specifies if `self.input` is broadcasted or not,
# given that the operator supports broadcasting.
# This field is used to verify the behavior for inplace variant.
#
# If a SampleInput is marked with `broadcasts_input=True`,
# it is verified that we get a `RuntimerError` with this sample,
# and inplace variant. Also inplace grad{grad} tests are skipped,
# for such inputs (as they will error out otherwise).
self.broadcasts_input = broadcasts_input
def _repr_helper(self, formatter):
# Helper function to return the details of the SampleInput as `str`
# It consolidates all the fields of SampleInput and allows,
# formatting the fields like `input`, `args`, etc with `formatter`
# callable to customize the representation.
# Look at `summary` method for example.
arguments = [
f'input={formatter(self.input)}',
f'args={formatter(self.args)}',
f'kwargs={formatter(self.kwargs)}',
f'output_process_fn_grad={self.output_process_fn_grad}',
f'broadcasts_input={self.broadcasts_input}',
f'name={repr(self.name)}']
return f'SampleInput({", ".join(a for a in arguments if a is not None)})'
def __repr__(self):
return self._repr_helper(lambda x: x)
def summary(self):
# Returns the SampleInput details in a more
# friendly format.
# It formats `Tensor` and `TensorList`
# in a more condensed representation.
def formatter(arg):
# Format any instance of `Tensor` (standalone, in list, or in dict)
# by Tensor[TensorShape]
# Eg. Tensor with shape (3, 4) is formatted as Tensor[3, 4]
if isinstance(arg, torch.Tensor):
shape = str(tuple(arg.shape)).replace('(', '').replace(')', '')
return f"Tensor[{shape}]"
elif isinstance(arg, dict):
return {k: formatter(v) for k, v in arg.items()}
elif is_iterable_of_tensors(arg):
return "TensorList[" + ", ".join(map(formatter, arg)) + "]"
elif isinstance(arg, (list, tuple)): # Handle list, tuple
return "(" + ",".join(map(formatter, arg)) + ")"
return repr(arg)
return self._repr_helper(formatter)
# Applies the transform f(t) -> t to each tensor and dtype in the SampleInput
def transform(self, f):
def tt(t):
def _tt(t):
with torch.no_grad():
return f(t)
if isinstance(t, torch.Tensor):
return _tt(t)
elif isinstance(t, torch.dtype):
return _tt(t)
elif isinstance(t, list):
return list(map(tt, t))
elif isinstance(t, tuple):
return tuple(map(tt, t))
elif isinstance(t, dict):
return {k: tt(v) for k, v in t.items()}
else:
return t
sample_tt_input, tt_args, tt_kwargs = tt(self.input), tt(self.args), tt(self.kwargs)
# Note the transformed SampleInput assumes metadata like output_process_fn_grad is still valid!
return SampleInput(
sample_tt_input,
args=tt_args,
kwargs=tt_kwargs,
output_process_fn_grad=self.output_process_fn_grad,
broadcasts_input=self.broadcasts_input,
name=self.name + "_transformed")
# Returns the NumPy version of the sample input object in the form of a tuple: (input, args, kwargs)
# Converts tensors to ndarrays by calling .detach().cpu().numpy() on them
# Converts dtypes by remapping them using torch_to_numpy_dtype_dict
def numpy(self):
def to_numpy(t):
if isinstance(t, torch.Tensor):
if t.dtype is torch.bfloat16:
return t.detach().cpu().to(torch.float32).numpy()
if t.dtype is torch.chalf:
return t.detach().cpu().to(torch.cfloat).numpy()
return t.detach().cpu().numpy()
elif isinstance(t, torch.dtype):
return torch_to_numpy_dtype_dict[t]
return t
return self.transform(to_numpy)
def noncontiguous(self):
def to_noncontiguous(t):
if isinstance(t, torch.Tensor):
return noncontiguous_like(t)
elif isinstance(t, torch.dtype):
return t
return t
return self.transform(to_noncontiguous)
class ErrorInput(object):
"""
A SampleInput that will cause the operation to throw an error plus information
about the resulting error.
"""
__slots__ = ['sample_input', 'error_type', 'error_regex']
def __init__(self, sample_input, *, error_type=RuntimeError, error_regex):
self.sample_input = sample_input
self.error_type = error_type
self.error_regex = error_regex
class AliasInfo(object):
"""Class holds alias information. For example, torch.abs ->
torch.absolute, torch.Tensor.absolute, torch.Tensor.absolute_
"""
def __init__(self, alias_name):
self.name = alias_name
self.op = _getattr_qual(torch, alias_name)
self.method_variant = getattr(torch.Tensor, alias_name, None)
self.inplace_variant = getattr(torch.Tensor, alias_name + "_", None)
def __call__(self, *args, **kwargs):
return self.op(*args, **kwargs)
# Note [OpInfos]
# ~~~~~~~~~~~~~~
#
# The majority of this note was written shortly after the PyTorch 1.9 release.
# If you notice it's out-of-date or think it could be improved then please
# file an issue.
#
# See also: the OpInfo tracker (https://github.com/pytorch/pytorch/issues/54261)
# See also: "Writing Test Templates" in common_device_type.py to learn how to
# parametrize a test template using OpInfos.
# See also: PyTorch's GitHub wiki on running and writing tests
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# See also: ModuleInfos, OpInfo's sister class, defined in common_modules.py
#
# An OpInfo is a collection of metadata related to a PyTorch operator. This
# metadata is used to generate tests that validate properties of the operator,
# like if it implements the correct gradient formula.
#
# WHY OPINFOS?
# ~~~~~~~~~~~~
#
# OpInfos are principally intended to do three things:
#
# 1) to allow systematic testing over all PyTorch's operators
# 2) to simplify operating testing by autogenerating many tests
# 3) to allow systems (like autograd, torchscript, fx, nnc...) to test
# against every PyTorch operator
#
# All these goals are still a work in progress. Not every operator has an
# OpInfo, and some operator tests that could be automatically generated
# still have to be written manually.
#
# It's helpful to understand that OpInfos are both about test simplification and
# modularity. PyTorch is a complicated framework with many interrelated systems,
# too many for any one person to keep track of. An OpInfo can be thought of as the
# interface between an operator implementer and those other systems. Instead of
# requiring the implementer of torch.foo understand how to test its forward
# mode AD or NNC support that's typically handled automatically just by
# defining an OpInfo.
#
# It's often surprising to OpInfo writers that just implementing an OpInfo
# typically can't verify an operator is actually implemented correctly:
#
# "If an OpInfo doesn't validate my op works as expected, what's the point
# of it?"
#
# But the point of is the above. OpInfos are intended to let you focus on testing
# the operator logic you're familiar with instead of having to write tests for
# how the operator interacts with each of PyTorch's many systems.
#
# And, OK, it turns out that SOMETIMES just writing an OpInfo DOES
# validate your op works as expected, but that's only in special
# cases. See below for details.
#
# WHAT'S AN OPINFO?
# ~~~~~~~~~~~~~~~~~
#
# So what is an OpInfo? It's a Python class that describes an operator's properties,
# like which dtypes it supports on the CPU and whether it has any aliases.
# These properties can be divided into three categories:
#
# 1) Metadata describing the operator, like the operator's name and if it
# "supports" the out kwarg.
# 2) Test directives, like "skips" that tell the test suite to skip some
# tests.
# 3) A "sample inputs" function that generates valid inputs for the operator.
#
# OpInfo attributes are described in more detail below.
#
# THE SAMPLE INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The "sample inputs" function merits special elaboration. This function is
# crucial to testing with OpInfos. A typical OpInfo test has to treat the operator
# as a black box. There's no structure for the test to understand or exploit.
# Without "sample inputs" it wouldn't even know how to call the OpInfo's
# operator. The sample input function saves the day by providing different
# "SampleInputs" that can be used to call the operator. A sample input
# function should have the following signature:
#
# def sample_inputs_foo(op_info, device, dtype, requires_grad, **kwargs):
#
# And should return an iterable of SampleInputs (see the class description
# above). Each SampleInput defines an "input", "args", "kwargs", an
# "output_process_fn_grad" function, the "broadcasts_input" bool and a
# "name".
#
# All the "sample_inputs" functions are invoked within a `torch.no_grad()`
# environment for efficiency and correctness. As such remember to set the
# "requires_grad" flag on the inputs **after** performing any transformations
# on them.
#
# The "input" is the first argument to the operator, or the tensor that
# the method or inplace variants of the operator should be called on, and
# should be on the requested device, of the requested dtype, and its
# requires_grad attribute should be set to the requires_grad argument.
#
# "args" should contain positional arguments, and "kwargs" keyword arguments.
#
# "output_process_fn_grad" has an interesting name. It's a function that maps
# the operator's output (when given the input, args, and kwargs) to the
# portion of the output to gradcheck. For example, consider an operator
# like torch.linalg.slogdet
# (https://pytorch.org/docs/master/generated/torch.linalg.slogdet.html).
# This operator returns a tuple of two tensors, but the first tensor
# cannot be backwarded through. Its "output_process_fn_grad" filters
# this output tuple to just the second argument, which we can call backward
# on. Functions that produce a single tensor can ignore this argument.
#
# "broadcasts_input" is a bool indicated if the SampleInput causes the operator
# to broadcast the "input" argument. This is important for tests to understand
# because inplace variants of operations throw a runtime error if they
# would broadcast their input arguments, so tests that work with inplace
# variants filter SampleInputs that broadcast their input.
#
# "name" is a string that's just used for debugging. It appears when printing
# the SampleInput.
#
# Sample inputs are designed to be used with many tests, some
# that are very time consuming, so they should be a small
# set with small tensors. An elaborated set of sample inputs
# can be specified using the "reference_inputs_func" attribute.
# The "reference inputs" for an operation are an extended
# set of sample inputs that can more exhausively test an
# operator. They are used by only a few tests that are careful
# not to take too long to run. Adding reference inputs
# is highly encouraged!
#
# THE (OPTIONAL) ERROR INPUTS FUNCTION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# OpInfos may optionally specify "error inputs" through an error function. If
# specified test_errors in test_ops.py will call the op with these inputs
# and validate that the desired error is thrown.
#
# Error inputs automate a common testing pattern where multiple inputs are
# passed to an operation and the errors they thrown are reviewed. Tests
# written in this style should be ported to the new OpInfo pattern.
#
# Error inputs are specified using the ErrorInputs class, which contains
# a SampleInput (see above) and data about the expected error.
#
# OPINFO FILE ORGANIZATION
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# All OpInfos are currently defined in this file. Most OpInfo tests are defined
# in test_ops.py, but some system-specific tests are defined in those
# systems' test files, and subclass-specific tests are defined in the test
# file that corresponds to that subclass (see the below).
# Expect a reorganization in the future.
#
# WHAT'S TESTED?
# ~~~~~~~~~~~~~~
#
# Every OpInfo in the op_db sequence has the following properties validated in
# test_ops.py:
#
# - that its supported dtypes are specified correctly
# - that the operation produces the same results when called with noncontiguous inputs
# - that it supports the out= argument properly (if it allows out=),
# see https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-does-out-work-in-pytorch
# - that it works with the conjugate view bit properly
# - that its function, method, and inplace variants perform the same operation
# (that is, that torch.add, torch.Tensor.add, and torch.Tensor.add_ all
# do the same thing).
# - that its inplace variant preserves the input's storage
# - that its gradient formula is implemented correctly, and that it supports
# gradgrad and complex grad and gradgrad and forward mode AD properly for
# the op's function and inplace variants (method variants are skipped
# to reduce test time).
# - that the operation performs the same operation when traced or scripted
# using the jit
# - that the operation is autodifferentiated by the jit as expected
# - that the operator's aliases, if any, perform the same operation and that
# the jit understands the alias
# - that the operator throws the correct errors (if error_inputs is defined)
# - that the operator produces the same results as a NumPy reference (if ref is defined)
# - that the operator produces the same results as a NumPy reference on an extended
# set of "reference inputs" (if both ref and reference_inputs_func are defined)
# (NOTE: elementwise unary and elementwise binary OpInfos do this even if only
# ref is defined, because they effectively autogenerate reference inputs)
# - that the operator works on different CUDA devices
#
# Additional OpInfo tests are in test_jit_fuser_te.py, test_fx_experimental.py,
# and test_fx.py. These tests validate that operators work with NNC and FX
# as expected.
#
# For performance, some of the above tests may only run on the first
# SampleInput returned by an OpInfo's sample input function.
#
# In addition to these tests, some subclasses (discussed in the next section)
# define additional tests.
#
# Critically, as mentioned above, what's not necessarily tested is that the operator
# works as expected. When implementing an OpInfo an engineer must still
# typically write one or more tests validating the operator's behavior.
# The exception to this is if reference testing is sufficient, or if
# the operation belongs to an OpInfo subclass that has more exhaustive
# operator testing. Elementwise unary and elementwise binary operators,
# in particular, usually don't require additional testing beyond
# writing an Opinfo.
#
#
# OPINFO (SUB)CLASSES
# ~~~~~~~~~~~~~~~~~~~
#
# In addition to the OpInfo base class there are several specialized OpInfo
# subclasses. For example, the UnaryUfuncInfo subclass is used for
# unary elementwise operations. These operations have a common structure
# that test_unary_ufuncs.py exploits with additional automated testing.
# The automated testing in test_unary_ufuncs.py is so thorough, comparing
# the operator to a NumPy reference function on a plethora of values, that
# just implementing an OpInfo for a unary elementwise operation is often
# sufficient testing.
#
# The ForeachFuncInfo is another OpInfo subclass that is hyper-specialized to a
# very unique class of operations. These OpInfos aren't included in the
# op_db sequence and have their own tests.
#
# Other OpInfo subclasses, like SpectralFuncInfo, are just for convenience
# when writing OpInfos.
#
# TESTING A NEW OPERATOR
# ~~~~~~~~~~~~~~~~~~~~~~
#
# If you're adding a new operator to any of the following namespaces:
# - torch
# - torch.fft
# - torch.linalg,
# - torch.special
# - torch.nn.functional
# then you should typically add an OpInfo for it.
#
# As mentioned a couple times above, implementing an OpInfo is not
# usually sufficient testing (unless the operator is a unary or binary elementwise
# operator). The OpInfo will only test the properties described in the
# "WHAT'S TESTED" section. It DOES NOT necessarily verify that the operator is
# implemented correctly.
#
# TIPS FOR WRITING AN OPINFO AND OPINFO TESTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Writing an OpInfo can be a little daunting. Since the point of an OpInfo is to
# be consumed by a variety of systems it can be hard to understand how to
# deal with test failures or how to set the OpInfo metadata properly.
#
# Before adding an OpInfo it helps to look at other OpInfos. A sample inputs
# function must be defined, and the operator's dtypes must be specified.
# Once that's done you should run the operator's tests in test_ops.py
# (these can be filtered using the "-k" argument in pytest). Tests that
# fail should provide an error message that describes what to change about
# your OpInfo. You don't need to worry about changing an OpInfo's default
# values unless a test yells at you.
#
# Similarly, if you're writing a test that consumes OpInfos then it's critical
# your test provides a clear error message describing what to do when it
# fails. You should not assume the OpInfo implementer is familiar with your
# system.
#
# If you see a confusing error message while developing an OpInfo then please
# file an issue describing what happened.
#
# This trial-and-error approach to writing an OpInfo can be frustrating,
# but it's probably necessary as long as OpInfos don't require
# learning about all the systems that consume them. One thing that can help
# is the get_supported_dtypes() function defined in utils.py. This
# function can be used to programmatically specify the dtypes an operator
# supports, and is especially useful if writing an OpInfo on a machine
# without a CUDA device. See its documentation for more details.
#
# THE FUTURE OF OPINFOS AND OPINFO TESTING
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# In the future we expect OpInfo coverage to improve and cover
# the great majority of PyTorch's (public) operators.
#
# Classes and methods for the operator database
@dataclass
class OpInfo(object):
"""Operator information and helper functions for acquiring it."""
# the string name of the function
name: str
# An optional reference function that accepts ndarrays (AKA "NumPy arrays").
# If given, the op will be compared with its reference on each of its sample inputs.
ref: Callable = None
# the following metadata describes the operator, its variants, and its aliases, if any
# iterable of aliases, e.g. ("absolute",) for torch.abs
aliases: Iterable = None
# additional string to include in the test name
# this is useful when an op needs multiple OpInfos,
# like divide does, often because it's really several
# different ops behind the scenes
variant_test_name: str = ''
# the function variant of the operation, populated as torch.<name> if None
op: Callable = None
# allows the method variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated method
# - if a Callable, then that callable should be the method associated with this operation
method_variant: Callable = _NOTHING
# allows the inplace variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace variant
# - if a Callable, then that callable should be the inplace variant associated with this operation
inplace_variant: Callable = _NOTHING
# allows the operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated operator
# - if a Callable, then that callable should be the operator associated with this operation
operator_variant: Callable = _NOTHING
# allows the inplace operator variant of this operation to be specified as follows:
# - if _NOTHING (default), then the OpInfo attempts to discover the variant using its name
# - if None, then the OpInfo explicitly specifies is has no associated inplace operator
# - if a Callable, then that callable should be the inplace operator associated with this operation
inplace_operator_variant: Callable = _NOTHING
# the following metadata are test directives for skipping or modifying tests
# information about which tests to skip
skips: Tuple = tuple()
# decorators to apply to generated tests
decorators: Tuple = tuple()
# the following are pointers to functions to generate certain classes of inputs
# function to generate sample inputs with strided layouts
sample_inputs_func: Callable = None
# function to generate a more thorough set of samples inputs with strided layouts
reference_inputs_func: Callable = None
# function to generate inputs that will throw errors
error_inputs_func: Callable = None
# function to generate sample inputs with sparse coo layouts
sample_inputs_sparse_coo_func: Callable = None
# function to generate sample inputs with sparse csr layouts
sample_inputs_sparse_csr_func: Callable = None
# function to generate sample inputs with sparse csc layouts
sample_inputs_sparse_csc_func: Callable = None
# function to generate sample inputs with sparse bsr layouts
sample_inputs_sparse_bsr_func: Callable = None
# function to generate sample inputs with sparse bsc layouts
sample_inputs_sparse_bsc_func: Callable = None
# the following metadata relates to dtype support and is tested for correctness in test_ops.py
# dtypes this function works with on the CPU,
# inherited by other device types that don't specify their own dtypes
dtypes: _dispatch_dtypes = None
# the following dtypesIf... options override the dtypes value on their respective device types
# dtypes this function is expected to work with on CUDA
dtypesIfCUDA: _dispatch_dtypes = None
# dtypes this function is expected to work with on ROCM
dtypesIfROCM: _dispatch_dtypes = None
# backward dtypes this function is expected to work with
backward_dtypes: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on CUDA
backward_dtypesIfCUDA: _dispatch_dtypes = None
# backward dtypes this function is expected to work with on ROCM
backward_dtypesIfROCM: _dispatch_dtypes = None
# the following metadata describes the operators out= support
# whether the op supports the out kwarg
# defaults to True, if the op does not allow the out kwarg or
# supports it incorrectly then test_out in test_ops.py should fail
supports_out: bool = True
# the following metadata relates to autograd support
# whether the operation supports backward mode AD
# if true, gradient correctness is tested in test_ops.py
# using the op's sample inputs
supports_autograd: bool = True
# whether the op supports second order gradients
# if true, gradgrad correctness is tested in test_ops.py
# defaults to support_autograd's value
# TODO: rename this to supports_bwgrad_bwgrad to be consistent with below
supports_gradgrad: bool = None
# whether the ops supports second order gradients via
# forward-over-reverse. If True, forward-over-reverse gradgrad correctness
# is tested. If False, test that forward grad is not implemented.
# Defaults to False.
supports_fwgrad_bwgrad: bool = False
# whether the operation supports inplace autograd
# if true, tested in test_ops.py
# defaults to supports_autograd's value
supports_inplace_autograd: bool = None
# Whether the operation support forward mode AD
# If the value is True, we check that the gradients are correct
# If the value is False, we test that forward grad is not implemented
supports_forward_ad: bool = False
# wrapper function for gradcheck
gradcheck_wrapper: Callable = lambda op, *args, **kwargs: op(*args, **kwargs)
# whether to check batched grad when doing gradcheck
# defaults to support_autograd's value
check_batched_grad: bool = None
# whether to check batched grad grad when doing gradgradcheck
# default's to support_gradgrad's value
check_batched_gradgrad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `supports_forward_ad`
check_batched_forward_grad: bool = None
# whether to check batched forward grad when doing gradcheck
# defaults to the value of `check_batched_forward_grad`
check_inplace_batched_forward_grad: bool = None
# tolerance for nondeterminism while performing gradcheck
gradcheck_nondet_tol: float = 0.0
# Whether to use the fast implmentation for gradcheck/gradgradcheck.
# When set to None, defers to the default value provided by the wrapper
# function around gradcheck (testing._internal.common_utils.gradcheck)
gradcheck_fast_mode: bool = None
# the following metadata relates to JIT support and is tested for correctness in test_ops.py
# name of the corresponding aten:: operator
aten_name: str = None
# if this is a composite implicit autograd op, the decomposed op
decomp_aten_name: Optional[str] = None
# name of the corresponding aten:: operator for backwards
aten_backward_name: Optional[str] = None
# if a op's aten::node is expected to be symbolically autodiffed
assert_autodiffed: bool = False
# a list of strings with node names that are expected to be in a
# DifferentiableGraph when autodiffed. Ex: ['aten::add', 'aten::mm'],
# default is populated to be ['aten::(name of Python operator)']
autodiff_nonfusible_nodes: List[str] = None
# a list of strings with node names that are expected to be in FusionGroups
# inside of DifferentiableGraphs when this operation is autodiffed.
# Ex: ['aten::add', 'aten::mm'], defaults to an empty list
# Note: currently no ops use fusible nodes
autodiff_fusible_nodes: List[str] = None
# the following metadata relates to sparse support and is used in test_sparse.py
# whether the op supports sparse inputs
supports_sparse: bool = False
# only run tracing tests
supports_scripting: bool = True
# if the operator can be traced
supports_tracing: bool = True
# the following metadata relates to sparse csr support and is used in test_sparse_csr.py
# whether the op supports sparse csr inputs
supports_sparse_csr: bool = False
# whether the op supports sparse csc inputs
supports_sparse_csc: bool = False
# whether the op supports sparse bsr inputs
supports_sparse_bsr: bool = False
# whether the op supports sparse bsc inputs
supports_sparse_bsc: bool = False
# the following metadata relates to complex support and is checked in test_ops.py
test_conjugated_samples: bool = True
test_neg_view: bool = True
# assert that jit shape analysis fully propagates shape
assert_jit_shape_analysis: bool = False
# the following metadata relates to ExpandedWeights support and is checked in test_expanded_weights.py
supports_expanded_weight: bool = False
is_factory_function: bool = False
def __post_init__(self):
self._original_opinfo_args = asdict(self).copy()
assert self.dtypes is not None, "OpInfo for {0} has no dtypes!".format(self.name)
dtypes_args = (self.dtypes, self.dtypesIfCUDA, self.dtypesIfROCM)
# Validates the dtypes are generated from the dispatch-related functions
for dtype_list in dtypes_args:
assert isinstance(dtype_list, (_dispatch_dtypes, type(None)))
if self.aten_name is None:
self.aten_name = self.name
# Attribute to verify dynamic_dtypes are used.
self.dynamic_dtypes = any(map(lambda dtypes: isinstance(
dtypes, utils._dynamic_dispatch_dtypes), dtypes_args))
if self.dynamic_dtypes:
# Make sure `dtyesIfCUDA` is dynamic, if dynamic dispatch is used for CPU
# This is because, below we set dtypesIfCUDA to dtypes if they are None.
assert isinstance(self.dtypesIfCUDA, utils._dynamic_dispatch_dtypes), \
(f"To use dynamic dypes for operator {self.name}, "
"acquire the dtypes dynamically for argument `dtypesIfCUDA`."
"This is to ensure that CUDA dtypes are acquired correctly as they"
"differ from CPU dtypes occasionally")
self.dtypes = set(self.dtypes)
# NOTE: backward dtypes must be acquired before forward dtypes
# since they fallback to explicit (not implicit!) specifications of
# forward dtypes
self.backward_dtypesIfROCM = set(self.backward_dtypesIfROCM) if self.backward_dtypesIfROCM is not None else (
self.backward_dtypesIfCUDA if self.backward_dtypesIfCUDA is not None
else self.backward_dtypes if self.backward_dtypes is not None
else self.dtypesIfROCM if self.dtypesIfROCM is not None
else self.dtypesIfCUDA if self.dtypesIfCUDA is not None
else self.dtypes)
self.backward_dtypesIfCUDA = set(self.backward_dtypesIfCUDA) if self.backward_dtypesIfCUDA is not None else (
self.backward_dtypes if self.backward_dtypes is not None
else self.dtypesIfCUDA if self.dtypesIfCUDA is not None
else self.dtypes)
self.backward_dtypes = set(self.backward_dtypes) if self.backward_dtypes is not None else self.dtypes
self.dtypesIfCUDA = set(self.dtypesIfCUDA) if self.dtypesIfCUDA is not None else self.dtypes
self.dtypesIfROCM = set(self.dtypesIfROCM) if self.dtypesIfROCM is not None else self.dtypesIfCUDA
# NOTE: if the op is unspecified it is assumed to be under the torch namespace
if not self.op:
self.op = _getattr_qual(torch, self.name)
if self.method_variant is _NOTHING:
self.method_variant = getattr(torch.Tensor, self.name, None)
# attributes like real, imag are not callable
if not callable(self.method_variant):
self.method_variant = None
if self.inplace_variant is _NOTHING:
inplace_name = self.name + "_"
self.inplace_variant = getattr(torch.Tensor, inplace_name, None)
if self.operator_variant is _NOTHING:
self.operator_variant = getattr(operator, self.name, None)
if self.inplace_operator_variant is _NOTHING:
# Note: operator.i<op> will use operator.<op> and assign the result to the lhs when no
# __i<op>__ method is found. This results in the appearance of an inplace operator variant which
# does not have the correct inplace behavior. To avoid this, we guard automatic detection of the inplace
# operator with a check that an inplace variant exists.
if self.inplace_variant is not None:
inplace_operator_name = "i" + self.name
self.inplace_operator_variant = getattr(operator, inplace_operator_name, None)
else:
self.inplace_operator_variant = None
self.decorators = (*self.decorators, *self.skips)
# We run the sampling functions without tracking the gradiends of the creation of inputs
self.sample_inputs_func = torch.no_grad()(self.sample_inputs_func)
self.sample_inputs_sparse_coo_func = torch.no_grad()(self.sample_inputs_sparse_coo_func)
self.sample_inputs_sparse_csr_func = torch.no_grad()(self.sample_inputs_sparse_csr_func)
self.sample_inputs_sparse_csc_func = torch.no_grad()(self.sample_inputs_sparse_csc_func)
self.sample_inputs_sparse_bsr_func = torch.no_grad()(self.sample_inputs_sparse_bsr_func)
self.sample_inputs_sparse_bsc_func = torch.no_grad()(self.sample_inputs_sparse_bsc_func)
if self.reference_inputs_func is not None:
self.reference_inputs_func = torch.no_grad()(self.reference_inputs_func)
if not self.autodiff_fusible_nodes:
self.autodiff_fusible_nodes = []
if self.autodiff_nonfusible_nodes is None:
self.autodiff_nonfusible_nodes = ['aten::' + self.name]
# Autograd support
# Autograd flags that depend on backward AD only
# - If setting has been explicitly set, raise error if inconsistent
if self.supports_gradgrad is None:
self.supports_gradgrad = self.supports_autograd
else:
assert not (self.supports_gradgrad and not self.supports_autograd), (
"supports_gradgrad refines the part of autograd is supported, so it should "
"not be set if supports_autograd is False")
if self.check_batched_grad is None:
self.check_batched_grad = self.supports_autograd or self.supports_forward_ad
else:
assert not (self.check_batched_grad and not (self.supports_autograd or self.supports_forward_ad)), (
"check_batched_grad refines the part of autograd that will be checked (by gradcheck), so "
"it should not be set if supports_autograd is False")
if self.check_batched_gradgrad is None:
self.check_batched_gradgrad = self.supports_gradgrad
else:
assert not (self.check_batched_gradgrad and not self.supports_gradgrad), (
"check_batched_gradgrad refines the part of autograd that will be checked (by "
"gradgradcheck), so it should not be set if either supports_gradgrad or supports_autograd "
"is False.")
if self.check_batched_forward_grad is None:
self.check_batched_forward_grad = self.supports_forward_ad
else:
assert not (self.check_batched_forward_grad and not self.supports_forward_ad), (
"check_batched_forward_grad should only be used when supports_forward_ad "
"is True. It is used to disable the test in the specific cases "
"where the op supports forward ad but fails to compute "
"batched forward grad.")
if self.check_inplace_batched_forward_grad is None:
self.check_inplace_batched_forward_grad = self.check_batched_forward_grad
else:
assert not (self.check_inplace_batched_forward_grad and not self.check_batched_forward_grad), (
"check_batched_forward_grad should only be used when check_batched_forward_grad "
"is True. It is used to disable the test in the specific cases "
"where the op supports batched forward grad but fails to compute batched forward "
"grad for the inplace variant of the op.")
assert not (self.supports_fwgrad_bwgrad and not self.supports_autograd), (
"supports_fwgrad_bwgrad enables forward-over-backward gradgrad checks and should only be "
"True if backward ad is also checked, i.e., supports_forward_ad should be True.", self.name)
# Autograd flags that depend on both forward AD and backward AD
if self.supports_inplace_autograd is None:
self.supports_inplace_autograd = self.supports_autograd or self.supports_forward_ad
else:
assert not (self.supports_inplace_autograd and not self.supports_autograd and not self.supports_forward_ad), (
"supports_inplace_autograd refines the part of autograd that is supported, so "
"it should not be set if both supports_autograd and supports_forward_ad are False")
if self.aliases is not None:
self.aliases = tuple(AliasInfo(a) for a in self.aliases) # type: ignore[assignment]
else:
self.aliases = ()
def __call__(self, *args, **kwargs):
"""Calls the function variant of the operator."""
return self.op(*args, **kwargs)
def __str__(self):
return dataclass_repr(self)
def get_op(self):
"""Returns the function variant of the operator, torch.<op_name>."""
return self.op
def get_method(self):
"""Returns the method variant of the operator, torch.Tensor.<op_name>.
Returns None if the operator has no method variant.
"""
return self.method_variant
def get_inplace(self):
"""Returns the inplace variant of the operator, torch.Tensor.<op_name>_.
Returns None if the operator has no inplace variant.
"""
return self.inplace_variant
def get_operator(self):
"""Returns operator variant of the operator, e.g. operator.neg
Returns None if the operator has no operator variant.
"""
return self.operator_variant
def get_inplace_operator(self):
"""Returns the inplace operator variant of the operator, e.g operator.iadd
Returns None if the operator has no inplace operator variant"""
return self.inplace_operator_variant
def conjugate_sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs but with the tensor input or first
tensor in a sequence input conjugated.
"""
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
conj_samples = list(samples)
def conjugate(tensor):
_requires_grad = tensor.requires_grad
tensor = tensor.conj()
return tensor.requires_grad_(_requires_grad)
for i, sample in enumerate(samples):
sample = conj_samples[i]
# Note: it is assumed that the input here is either a tensor or tensorlist
if isinstance(sample.input, torch.Tensor):
sample.input = conjugate(sample.input)
else:
sample.input[0] = conjugate(sample.input[0])
return tuple(conj_samples)
def sample_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
These samples should be sufficient to test the function works correctly
with autograd, TorchScript, etc.
"""
samples = self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
if kwargs.get('include_conjugated_inputs', False):
conj_samples = self.conjugate_sample_inputs(device, dtype, requires_grad, **kwargs)
samples_list = list(samples)
samples_list.extend(conj_samples)
samples = tuple(samples_list)
return samples
def reference_inputs(self, device, dtype, requires_grad=False, **kwargs):
"""
Returns an iterable of SampleInputs.
Distinct from sample_inputs() above because this returns an expanded set
of inputs when reference_inputs_func is defined. If undefined this returns
the sample inputs.
"""
if self.reference_inputs_func is None:
return self.sample_inputs_func(self, device, dtype, requires_grad, **kwargs)
if kwargs.get('include_conjugated_inputs', False):
raise NotImplementedError
return self.reference_inputs_func(self, device, dtype, requires_grad, **kwargs)
def error_inputs(self, device, **kwargs):
"""
Returns an iterable of ErrorInputs.
"""
return self.error_inputs_func(self, device, **kwargs)
def sample_inputs_sparse_coo(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
coo layout.
"""
return self.sample_inputs_sparse_coo_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_csr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csr layout.
"""
return self.sample_inputs_sparse_csr_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_csc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
csc layout.
"""
return self.sample_inputs_sparse_csc_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_bsr(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsr layout.
"""
return self.sample_inputs_sparse_bsr_func(self, device, dtype, requires_grad, **kwargs)
def sample_inputs_sparse_bsc(self, device, dtype, requires_grad=False, **kwargs):
"""Returns an iterable of SampleInputs that contain inputs with sparse
bsc layout.
"""
return self.sample_inputs_sparse_bsc_func(self, device, dtype, requires_grad, **kwargs)
def get_decorators(self, test_class, test_name, device, dtype):
'''Returns the decorators targeting the given test.'''
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(test_class, test_name, device, dtype):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
def supported_dtypes(self, device_type):
if device_type == 'cpu':
return self.dtypes
if device_type == 'cuda':
return self.dtypesIfROCM if TEST_WITH_ROCM else self.dtypesIfCUDA
else:
return self.dtypes
def supported_backward_dtypes(self, device_type):
if not self.supports_autograd:
return set()
backward_dtypes = None
if device_type == 'cpu':
backward_dtypes = self.backward_dtypes
elif device_type == 'cuda':
backward_dtypes = self.backward_dtypesIfROCM if TEST_WITH_ROCM else self.backward_dtypesIfCUDA
else:
backward_dtypes = self.backward_dtypes
allowed_backward_dtypes = floating_and_complex_types_and(torch.bfloat16, torch.float16, torch.complex32)
return set(allowed_backward_dtypes).intersection(backward_dtypes)
def supports_dtype(self, dtype, device_type):
return dtype in self.supported_dtypes(device_type)
@property
def formatted_name(self):
"""Returns a formatted full name for this OpInfo that can be used in test names."""
variant = '_' + self.variant_test_name.replace('.', '_') if self.variant_test_name else ''
return '{}{}'.format(self.name.replace('.', '_'), variant)
# NOTE [Python References]
# Python References emulate existing PyTorch operations, but can ultimately
# be expressed in terms of "primitive" operations from torch._prims.
#
# These references are experimental.
# See https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-0/577
# for additional context.
#
# Python Reference OpInfos should be added to the python_ref_db list below.
# Tests can opt-into running on these references by including
# that list in the Sequence they pass to the @ops decorator.
#
# When a Python Reference OpInfo is constructed a pointer to an
# existing OpInfo must be provided using the torch_opinfo_name kwarg.
# The existing OpInfo with that name and no variant will be found
# to inherit from.
#
# Instead of just inheriting the existing OpInfo's metadata, the
# Python Reference OpInfos inherit the existing OpInfo's
# construction arguments. These arguments can be overridden
# by adding kwargs to the constructor.
def _find_referenced_opinfo(referenced_name, variant_name):
'''
Finds the OpInfo with the given name that has no variant name.
'''
from torch.testing._internal.common_methods_invocations import op_db
for opinfo in op_db:
if opinfo.name == referenced_name and opinfo.variant_test_name == variant_name:
return opinfo
def _inherit_constructor_args(name, op, inherited, overrides):
# inherits metadata
common_kwargs = {
'name': name,
'op': op,
'aliases': None, # TODO add a check for alias coverage
'method_variant': None,
'inplace_variant': None, # TODO: add a check for inplace coverage
'supports_scripting': False,
}
# Acquires inherited kwargs
kwargs = inherited.copy()
# Fixes metadata
if 'kwargs' in kwargs:
kwargs.update(kwargs['kwargs'])
del kwargs['kwargs']
if 'self' in kwargs:
del kwargs['self']
if '__class__' in kwargs:
del kwargs['__class__']
if 'skips' in kwargs:
del kwargs['skips']
if 'decorators' in kwargs:
del kwargs['decorators']
# Overrides metadata
kwargs.update(common_kwargs)
kwargs.update(overrides)
# At the moment no prims support autograd, so we must not run autograd
# tests e.g. when testing dtype support. Once we start writing autograd
# formulas for prims this can be removed.
kwargs['supports_autograd'] = False
kwargs['supports_gradgrad'] = False
kwargs['supports_fwgrad_bwgrad'] = False
kwargs['supports_inplace_autograd'] = False
kwargs['supports_forward_ad'] = False
return kwargs
class PythonRefInfo(OpInfo):
'''
An OpInfo for a Python reference of an OpInfo base class operation.
'''
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name='', # the variant name for corresponding torch opinfo
validate_view_consistency=True,
supports_nvfuser=True,
**kwargs): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(torch_opinfo_name, torch_opinfo_variant_name)
self.validate_view_consistency = validate_view_consistency
self.supports_nvfuser = supports_nvfuser
assert isinstance(self.torch_opinfo, OpInfo)
inherited = self.torch_opinfo._original_opinfo_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super(PythonRefInfo, self).__init__(**ukwargs)
def _generate_reduction_inputs(device, dtype, requires_grad, **kwargs):
"""Generates input tensors for testing reduction operators"""
yield make_tensor([], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([2], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([3, 5], dtype=dtype, device=device, requires_grad=requires_grad)
yield make_tensor([3, 2, 1, 2], dtype=dtype, device=device, requires_grad=requires_grad)
def _generate_reduction_kwargs(ndim, supports_multiple_dims=True):
"""Generates a subset of all valid dim and keepdim kwargs given ndim that
is appropriate for testing reduction operators.
"""
# Test default dim and keepdim
yield {}
# Test reducing inner and outer most dimensions
yield {'dim': 0, 'keepdim': True}
yield {'dim': -1, 'keepdim': False}
# Test reducing middle dimension
if ndim > 2:
yield {'dim': ndim // 2, 'keepdim': True}
if supports_multiple_dims:
# Test reducing all dimensions
yield {'dim': tuple(range(ndim)), 'keepdim': False}
# Test reducing both first and last dimensions
if ndim > 1:
yield {'dim': (0, -1), 'keepdim': True}
# Test reducing every other dimension starting with the second
if ndim > 3:
yield {'dim': tuple(range(1, ndim, 2)), 'keepdim': False}
def sample_inputs_reduction(op_info, device, dtype, requires_grad, **kwargs):
"""Sample inputs for reduction operators."""
# TODO(@heitorschueroff) Once all reduction operators are using
# ReductionOpInfo use op_info.supports_multiple_dims directly.
supports_multiple_dims: bool = kwargs.get('supports_multiple_dims', True)
# TODO(@heitorschueroff) Once all reduction operators are using ReductionOpInfo
# use op_info.generate_args_kwargs directly.
generate_args_kwargs = kwargs.get('generate_args_kwargs', lambda *args, **kwargs: (yield tuple(), {}))
for t in _generate_reduction_inputs(device, dtype, requires_grad):
for reduction_kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
for args, kwargs in generate_args_kwargs(t, **reduction_kwargs):
kwargs.update(reduction_kwargs)
yield SampleInput(t.detach().requires_grad_(requires_grad), args=args, kwargs=kwargs)
# NOTE [Reductions]:
#
# For testing purposes, we relax the definition of a reduction operator
# as defined in the docstring below. We do this to capture operators with
# a similar API so they can be tested automatically. However...
#
# Strictly speaking a reduction operator is an operator that can reduce an
# array to a single scalar value and that can be computed from the partial
# result of reducing subarrays. This usually means that the reduction operation
# should be commutative and associative. This definition is important when it
# comes to implementation as it determines how a reduction can be parallelized.
#
# For example, many summary statistics such as median, mode and quantile cannot
# be computed from partial results because these are sorting and counting based
# algorithms that need information that would be lost in the reduced value.
class ReductionOpInfo(OpInfo):
"""Reduction operator information.
An operator is a reduction operator if it reduces one or more dimensions of
the input tensor to a single value. Reduction operators must implement the
following signature:
- `op(input, *args, *, dim=None, keepdim=False, **kwargs) -> Tensor`
ReductionOpInfo tests that reduction operators implement a consistent API.
Optional features such as reducing over multiple dimensions are captured in
the optional keyword parameters of the ReductionOpInfo constructor.
If a reduction operator does not yet implement the full required API of
reduction operators, this should be documented by xfailing the failing
tests rather than adding optional parameters to ReductionOpInfo.
NOTE
The API for reduction operators has not yet been finalized and some
requirements may change.
See tests in test/test_reductions.py
"""
def __init__(
self, name, *,
# The identity value for the operator if it has one.
identity: Optional[Any] = None,
# The nan policy for the operator if it implements one.
# - propagate: NaN values are propagated to the output
# - omit: NaN values are discarded during the reduction
nan_policy: Optional[str] = None,
# Whether the operator supports reducing multiple dimensions.
supports_multiple_dims: bool = True,
# Whether the operator promotes integral to floating point dtypes.
promotes_int_to_float: bool = False,
# Whether the operator promotes all integral dtypes to int64.
promotes_int_to_int64: bool = False,
# If a specific dtype is given, then the operator always returns that
# dtype irrespective of the input dtype. If None, the operator returns
# the dtype according to the type promotion rules above.
result_dtype: Optional[torch.dtype] = None,
# Casts complex results to real (e.g. linalg.norm or torch.var)
complex_to_real: bool = False,
# ReductionOpInfo tests generate their own input, dim and keepdim
# arguments and call this function to generate tuples of extra args and
# kwargs to use when calling the op. This is required for operators that
# have other required parameters besides the input tensor.
generate_args_kwargs: Callable = lambda t, dim=None, keepdim=False: (yield tuple(), {}),
# Options from the OpInfo base class
**kwargs,
):
self._original_reduction_args = locals().copy()
assert nan_policy in (None, 'propagate', 'omit')
# These are mutually exclusive options
assert not (result_dtype and promotes_int_to_float)
assert not (result_dtype and promotes_int_to_int64)
assert not (result_dtype and complex_to_real)
assert not (promotes_int_to_float and promotes_int_to_int64)
# Default sample_inputs_func for ReductionOpInfo which augments sample
# inputs from sample_inputs_reduction with the args and kwargs from
# generate_args_kwargs. This is only used if sample_inputs_func is None.
def sample_inputs_func(*args, **kwargs):
kwargs['supports_multiple_dims'] = supports_multiple_dims
kwargs['generate_args_kwargs'] = generate_args_kwargs
yield from sample_inputs_reduction(*args, **kwargs)
# Override OpInfo defaults and call base class __init__
kwargs.setdefault('inplace_variant', None)
kwargs.setdefault('sample_inputs_func', sample_inputs_func)
super().__init__(name, **kwargs)
self.identity = identity
self.nan_policy = nan_policy
self.supports_multiple_dims = supports_multiple_dims
self.promotes_int_to_float = promotes_int_to_float
self.promotes_int_to_int64 = promotes_int_to_int64
self.complex_to_real = complex_to_real
self.result_dtype = result_dtype
self.generate_args_kwargs = generate_args_kwargs
# The base reference input generation for elementwise binary operations
def _reference_inputs_elementwise_binary(op, device, dtype, requires_grad, exclude_zero, **kwargs):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_binary_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
if dtype is not torch.bool:
yield from generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype not in (torch.bool, torch.uint8, torch.int8):
yield from generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_broadcasting_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
yield from generate_elementwise_binary_with_scalar_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
yield from generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
if dtype.is_floating_point or dtype.is_complex:
yield from generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
)
# Note that these references inputs use scalars for the SampleInput.input value,
# and many tests require SampleInput.input be a tensor or a list of tensors
def reference_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
gen = partial(
_reference_inputs_elementwise_binary, op, device, dtype, requires_grad, exclude_zero, **kwargs
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_binary_noncontiguous_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
yield from generate_elementwise_binary_arbitrarily_strided_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
# A functional that extends an elementwise binary operator's bespoke error inputs
# with generic error inputs for the class of elementwise binary operations
def make_error_inputs_elementwise_binary(error_inputs_func):
def error_inputs_func_wrapper(op, device, **kwargs):
if error_inputs_func is not None:
yield from error_inputs_func(op, device, **kwargs)
if not op.supports_rhs_python_scalar:
si = SampleInput(torch.tensor((1, 2, 3), device=device), args=(2,))
yield ErrorInput(si, error_type=Exception, error_regex="")
if not op.supports_one_python_scalar:
si = SampleInput(2, args=(torch.tensor((1, 2, 3), device=device),))
yield ErrorInput(si, error_type=Exception, error_regex="")
if (
not kwargs.get("skip_two_python_scalars", False)
and not op.supports_two_python_scalars
):
si = SampleInput(2, args=(3,))
yield ErrorInput(si, error_type=Exception, error_regex="")
return error_inputs_func_wrapper
# The following functions and classes are for testing elementwise binary operators.
# Returns a generator of pairs of contiguous tensors on the requested device
# and with the requested dtype.
#
# This function is intended to test the non-vectorized and vectorized code
# paths of elementwise binary functions, as well as their handling of odd tensor
# sizes (like zero-dim tensors and tensors with zero elements).
#
# Each iterable will include an a tensor with no elements,
# zero dim (scalar) tensors, small 1D tensors, a medium 1D tensor, and
# a large 2D tensor.
def generate_elementwise_binary_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False):
shapes = (
# tensors with no elements
(0,),
(1, 0, 3),
# zero dim (scalar) tensor
(),
# small 1D tensor
(20,),
# medium 1D tensor
(812,),
# large 2D tensor
(1029, 917),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False, exclude_zero=False):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape, strides, offset in strided_cases:
a = make_arg(500,).as_strided(shape, strides, offset)
b = make_arg(shape)
yield SampleInput(a, args=(b,))
# Returns a generator of pairs of contiguous tensors on the requested device and with
# the requested dtype.
#
# Unlike the previous function, the values in these tensors are specified manually.
def generate_elementwise_binary_small_value_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=None
):
if exclude_zero is None:
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
# defines interesting values
_unsigned_int_vals = (0, 1, 55, 127, 128, 190, 210, 220, 254)
_int_vals = (0, -1, 1, -55, 55, -127, 127, -128)
_float_vals = (
0.0,
-0.0,
-0.001,
0.001,
-0.25,
0.25,
-1.0,
1.0,
-math.pi / 2,
math.pi / 2,
-math.pi + 0.00001,
math.pi - 0.00001,
-math.pi,
math.pi,
-math.pi - 0.00001,
math.pi + 0.00001,
)
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_vals, _float_vals)
elif dtype.is_complex:
complex_vals = product(_float_vals, _float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int8, torch.int16, torch.int32, torch.int64):
prod = product(_int_vals, _int_vals)
elif dtype is torch.uint8:
prod = product(_unsigned_int_vals, _unsigned_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
if r == 0 and exclude_zero:
r_vals.append(1)
else:
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
_large_int_vals = (-1113, 1113, -10701, 10701)
_large_float16_vals = (-501, 501, -1001.2, 1001.2, -13437.7, 13437.7)
_large_float_vals = _large_float16_vals + (-4988429.2, 4988429.2, -1e20, 1e20)
l_vals = []
r_vals = []
if dtype == torch.float16:
prod = product(_large_float16_vals, _large_float16_vals)
elif dtype.is_floating_point:
prod = product(_large_float_vals, _large_float_vals)
elif dtype.is_complex:
complex_vals = product(_large_float_vals, _large_float_vals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
elif dtype in (torch.int16, torch.int32, torch.int64):
prod = product(_large_int_vals, _large_int_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
def generate_elementwise_binary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
_float_extremals = (float("inf"), float("-inf"), float("nan"))
l_vals = []
r_vals = []
if dtype.is_floating_point:
prod = product(_float_extremals, _float_extremals)
elif dtype.is_complex:
complex_vals = product(_float_extremals, _float_extremals)
# Note the use of list is required here or the map generator will be
# emptied by the following product and it won't produce the desired cross-product
complex_vals = list(map(lambda x: complex(*x), complex_vals))
prod = product(complex_vals, complex_vals)
else:
raise ValueError("Unsupported dtype!")
for l, r in prod:
l_vals.append(l)
r_vals.append(r)
lhs = torch.tensor(l_vals, device=device, dtype=dtype, requires_grad=requires_grad)
rhs = torch.tensor(r_vals, device=device, dtype=dtype, requires_grad=requires_grad)
yield SampleInput(lhs, args=(rhs,))
# Test case for NaN propagation
nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan'))
lhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad)
lhs.flatten()[::3] = nan
rhs = make_tensor((128, 128), device=device, dtype=dtype, requires_grad=requires_grad)
rhs.flatten()[::3] = nan
yield SampleInput(lhs, args=(rhs,))
# Returns a generator of pairs of contiguous and noncontiguous tensors that
# require broadcasting
def generate_elementwise_binary_broadcasting_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
shapes = (
((1,), ()),
((2,), ()),
((1,), (2,)),
((2, 1), (2,)),
((1, 2), (2,)),
((3, 2), (2,)),
((1, 3, 2), (2,)),
((1, 3, 2), (3, 2)),
((3, 1, 2), (3, 2)),
((2, 3, 2), ()),
((3, 1, 2), (1, 3, 2)),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
for shape, noncontiguous in product(shapes, [True, False]):
shape_lhs, shape_rhs = shape
lhs = make_arg(
shape_lhs, noncontiguous=noncontiguous, **op.lhs_make_tensor_kwargs
)
rhs = make_arg(
shape_rhs, noncontiguous=noncontiguous, **op.rhs_make_tensor_kwargs
)
yield SampleInput(lhs, args=(rhs,), broadcasts_input=True)
# Returns a generator of pairs of contiguous tensors and scalars
def generate_elementwise_binary_with_scalar_samples(
op, *, device, dtype, requires_grad=False
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shapes = ((), (3,), (5, 3), (0, 1, 3), (1, 5))
if op.supports_rhs_python_scalar:
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(lhs, args=(rhs_scalar,))
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(lhs_scalar, args=(rhs,))
if op.supports_two_python_scalars:
lhs_scalar = make_arg((), **op.lhs_make_tensor_kwargs).item()
rhs_scalar = make_arg((), **op.rhs_make_tensor_kwargs).item()
yield SampleInput(lhs_scalar, args=(rhs_scalar,))
# Returns a generator of pairs of contiguous tensors and 0d tensos and scalars and type promotion
def generate_elementwise_binary_with_scalar_and_type_promotion_samples(
op, *, device, dtype, requires_grad=False
):
# add these samples only for logical and comparison ops, arithmetic ops are not happy about extremal scalars
if op.name in ('eq', 'ne', 'gt', 'ge', 'lt', 'le', 'logical_and', 'logical_or', 'logical_xor'):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
shape = (23,) # this shape is big enough to trigger vectorization, and has non-vectorized tail
values = (float('nan'), float('inf'), -float('inf'))
scalar_tensors = tuple(torch.tensor(val) for val in values)
if op.supports_rhs_python_scalar:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
for scalar in values + scalar_tensors:
yield SampleInput(lhs, args=(scalar,))
# Extends with scalar lhs
if op.supports_one_python_scalar:
yield SampleInput(scalar, args=(rhs,))
# Returns a generator of pairs of noncontiguous tensors
def generate_elementwise_binary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False, exclude_zero=False
):
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
# Generic noncontiguity
lhs = make_arg((1026,), noncontiguous=True, **op.lhs_make_tensor_kwargs)
rhs = make_arg((1026,), noncontiguous=True, **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs.clone(), args=(rhs.clone(),))
yield SampleInput(lhs.contiguous(), args=(rhs,))
# Transposed
lhs = make_arg((789, 357), **op.lhs_make_tensor_kwargs)
rhs = make_arg((789, 357), **op.rhs_make_tensor_kwargs)
yield SampleInput(lhs.T, args=(rhs.T,))
# More noncontiguity
shapes = ((5, 7), (1024,))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
lhs_non_contig.copy_(lhs)
rhs_non_contig = torch.empty(shape + (2,), device=device, dtype=dtype)[..., 0]
rhs_non_contig.copy_(rhs)
yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),))
yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,))
# Noncontiguous indices
shape = (2, 2, 1, 2)
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs[:, 1, ...]
rhs_non_contig = rhs[:, 1, ...]
yield SampleInput(lhs_non_contig.clone(), args=(rhs_non_contig.clone(),))
yield SampleInput(lhs_non_contig.contiguous(), args=(rhs_non_contig,))
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
lhs = make_arg(shape, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape, **op.rhs_make_tensor_kwargs)
lhs_non_contig = lhs.expand(3, -1, -1)
rhs_non_contig = rhs.expand(3, -1, -1)
yield SampleInput(lhs_non_contig, args=(rhs_non_contig,))
# Sample inputs for elementwise binary operators, like add
def sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs):
_M = S if kwargs.get("small_inputs_only", False) else M
_S = XS if kwargs.get("small_inputs_only", False) else S
if hasattr(op, "rhs_make_tensor_kwargs"):
exclude_zero = op.rhs_make_tensor_kwargs.get("exclude_zero", False)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, exclude_zero=exclude_zero
)
shapes = (
((), ()),
((_S,), ()),
((_S, 1), (_S,)),
((_M, _S), ()),
((_S, _M, _S), (_M, _S)),
((_S, _M, _S), (_S, _M, _S)),
((_M, 1, _S), (_M, _S)),
((_M, 1, _S), (1, _M, _S)),
((0, 1, XS), (0, _M, XS)),
)
sample_kwargs = kwargs.get("sample_kwargs", {})
for shape_lhs, shape_rhs in shapes:
lhs = make_arg(shape_lhs, **op.lhs_make_tensor_kwargs)
rhs = make_arg(shape_rhs, **op.rhs_make_tensor_kwargs)
broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)
yield SampleInput(
lhs, args=(rhs,), kwargs=sample_kwargs, broadcasts_input=broadcasts_input
)
# Metadata class for binary "universal functions (ufuncs)" that accept two
# tensor and have common properties
class BinaryUfuncInfo(OpInfo):
"""Operator information for 'universal binary functions (binary ufuncs).'
These are functions of two tensors with common properties like:
- they are elementwise functions
- the output shape is determined by the input shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/stable/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name,
*,
sample_inputs_func=sample_inputs_elementwise_binary,
reference_inputs_func=reference_inputs_elementwise_binary,
error_inputs_func=None,
lhs_make_tensor_kwargs=None,
rhs_make_tensor_kwargs=None,
promotes_int_to_float=False, # Set to true if the op promotes integer inputs to float
always_returns_bool=False, # Set to true if the op always returns bool tensors
supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs
supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs
supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs
**kwargs,
):
self._original_binary_ufunc_args = locals().copy()
# Elementwise binary operations perform the equivalent of test_numpy_refs
# in test_binary_ufuncs, but with additional test granularity. So the
# generic test_ops.py test is skipped because it's redundant.
common_skips = (
DecorateInfo(
unittest.skip("Skipping redundant test."),
"TestCommon",
"test_numpy_refs",
),
)
kwargs["skips"] = kwargs.get("skips", tuple()) + common_skips
super(BinaryUfuncInfo, self).__init__(
name,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func),
**kwargs,
)
# [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on.
if lhs_make_tensor_kwargs is None:
lhs_make_tensor_kwargs = {}
self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs
if rhs_make_tensor_kwargs is None:
rhs_make_tensor_kwargs = {}
self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs
self.promotes_int_to_float = promotes_int_to_float
self.always_returns_bool = always_returns_bool
self.supports_rhs_python_scalar = supports_rhs_python_scalar
self.supports_one_python_scalar = supports_one_python_scalar
self.supports_two_python_scalars = supports_two_python_scalars
if self.supports_two_python_scalars:
self.supports_one_python_scalar = True
if self.supports_one_python_scalar:
assert (
supports_rhs_python_scalar
), "Can't support lhs and rhs Python scalars but not rhs scalars!"
# The following functions and classes are for testing elementwise unary operators.
def sample_inputs_elementwise_unary(
op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs
):
if not op_kwargs:
op_kwargs = {}
_L = S if kwargs.get("small_inputs_only", False) else L
low, high = op_info.domain
low = low if low is None else low + op_info._domain_eps
high = high if high is None else high - op_info._domain_eps
if op_info.supports_sparse_csr or op_info.supports_sparse_csc or op_info.supports_sparse_bsr or op_info.supports_sparse_bsc:
# Tensors with dim=2 for sparse compressed testing
yield SampleInput(
make_tensor(
(_L, _L),
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
else:
# Creates a 1D, empty, and scalar tensor
for shape in ((_L,), (1, 0, 3), ()):
yield SampleInput(
make_tensor(
shape,
device=device,
dtype=dtype,
low=low,
high=high,
requires_grad=requires_grad,
),
kwargs=op_kwargs,
)
# Replace values satisfying condition with a safe value. This is used to block
# out values the could cause singularity like tan(pi/2)
def _replace_values_in_tensor(tensor, condition, safe_value):
mask = condition(tensor)
tensor.masked_fill_(mask, safe_value)
# Helper to create a unary elementwise tensor with valid inputs
def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs):
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs)
if op.reference_numerics_filter is not None and dtype is not torch.bool:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
return a
# Restricts the values in the tensor to the domain of the
# given elementwise unary operator
def _filter_unary_elementwise_tensor(a, *, op):
# short-circuits for boolean tensors
if a.dtype is torch.bool:
return a
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
if a.dtype is torch.uint8 and low is not None:
low = max(low, 0)
if not a.dtype.is_floating_point and not a.dtype.is_complex:
low = math.ceil(low) if low is not None else None
high = math.floor(high) if high is not None else None
if op.reference_numerics_filter is not None:
condition, safe_value = op.reference_numerics_filter
_replace_values_in_tensor(a, condition, safe_value)
if low is not None or high is not None:
if a.dtype.is_complex:
a.real.clamp_(low, high)
a.imag.clamp_(low, high)
else:
a.clamp_(min=low, max=high)
return a
def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs):
# Special-cases bool
if dtype is torch.bool:
tensors = (
torch.empty(0, device=device, dtype=torch.bool),
torch.tensor(True, device=device),
torch.tensor(False, device=device),
torch.tensor((True, False), device=device),
make_tensor((812,), device=device, dtype=dtype),
make_tensor((1029, 917), device=device, dtype=dtype),
)
for a in tensors:
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
shapes = (
(1029, 917),
(812,),
# Empty sizes
(0,),
(0, 3, 3),
(1, 0, 5),
(6, 0, 0, 0),
(3, 0, 1, 0),
)
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
for shape in shapes:
a = make_arg(shape)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_small_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_large_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
a = _filter_unary_elementwise_tensor(sample.input, op=op)
yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0])
def generate_elementwise_unary_extremal_value_tensors(
op, *, device, dtype, requires_grad=False
):
for sample in generate_elementwise_binary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad
):
yield SampleInput(
sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0]
)
def generate_elementwise_unary_noncontiguous_tensors(
op, *, device, dtype, requires_grad=False
):
low, high = op.domain
low = low if low is None else low + op._domain_eps
high = high if high is None else high - op._domain_eps
make_arg = partial(
_make_unary_elementwise_tensor,
op=op,
device=device,
dtype=dtype,
requires_grad=requires_grad,
)
# Generic noncontiguity
t = make_arg((1026,), noncontiguous=True)
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Transposed
t = make_arg((1024, 1024)).T
yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0])
# Expanded tensors
shapes = ((1, 3), (1, 7), (5, 7))
for shape in shapes:
t = make_arg(shape)
t_non_contig = t.expand(3, -1, -1)
yield SampleInput(
t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0]
)
def generate_elementwise_unary_arbitrarily_strided_tensors(op, *, device, dtype, requires_grad=False):
# shape, strides, offset
strided_cases = (
((5, 6, 2), (1, 1, 7), 2),
((5, 5, 4), (1, 1, 7), 2),
((5, 5, 2), (4, 5, 7), 3),
((5, 5, 2), (5, 5, 7), 3),
((5, 5, 2), (5, 5, 5), 3),
((9, 5, 2), (0, 1, 7), 3),
)
make_arg = partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
for shape, strides, offset in strided_cases:
a = make_arg(500,).as_strided(shape, strides, offset)
yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0])
# Reuses the elementwise binary generators for consistency
# TODO: in the future generalize the reference generators to handle n-ary elementwise operations
def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs)
yield from generate_elementwise_unary_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype is not torch.bool:
yield from generate_elementwise_unary_small_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype not in (torch.bool, torch.uint8, torch.int8) and (
op.handles_large_floats
or (not dtype.is_floating_point and not dtype.is_complex)
):
yield from generate_elementwise_unary_large_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
if dtype.is_floating_point or (op.handles_complex_extremal_values and dtype.is_complex):
yield from generate_elementwise_unary_extremal_value_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs):
gen = partial(
_reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs
)
# yields "normal" samples
yield from gen()
# yields noncontiguous samples
for sample in gen():
yield sample.noncontiguous()
yield from generate_elementwise_unary_noncontiguous_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
yield from generate_elementwise_unary_arbitrarily_strided_tensors(
op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs
)
# Metadata class for unary "universal functions (ufuncs)" that accept a single
# tensor and have common properties like:
class UnaryUfuncInfo(OpInfo):
"""Operator information for 'universal unary functions (unary ufuncs).'
These are functions of a single tensor with common properties like:
- they are elementwise functions
- the input shape is the output shape
- they typically have method and inplace variants
- they typically support the out kwarg
- they typically have NumPy or SciPy references
See NumPy's universal function documentation
(https://numpy.org/doc/1.18/reference/ufuncs.html) for more details
about the concept of ufuncs.
"""
def __init__(
self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
domain=(None, None), # the [low, high) domain of the function
handles_complex_extremal_values=True, # whether the op correctly handles extremal values (like nan/inf)
handles_large_floats=True, # whether the op correctly handles large float values (like 1e20)
supports_complex_to_float=False, # op supports casting from complex input to real output safely eg. angle
sample_inputs_func=sample_inputs_elementwise_unary,
reference_inputs_func=reference_inputs_elementwise_unary,
sample_kwargs=lambda device, dtype, input: ({}, {}),
supports_sparse=False,
reference_numerics_filter=None, # Filters values in the range of the domain specified above but that should not be tested
**kwargs,
):
self._original_unary_ufunc_args = locals().copy()
super(UnaryUfuncInfo, self).__init__(
name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
reference_inputs_func=reference_inputs_func,
supports_sparse=supports_sparse,
**kwargs,
)
self.ref = ref
self.domain = domain
self.handles_complex_extremal_values = handles_complex_extremal_values
self.handles_large_floats = handles_large_floats
self.supports_complex_to_float = supports_complex_to_float
self.reference_numerics_filter = reference_numerics_filter
# test_unary_ufuncs.py generates its own inputs to test the consistency
# of the operator on sliced tensors, non-contig tensors, etc.
# `sample_kwargs` is a utility function to provide kwargs
# along with those inputs if required (eg. clamp).
# It should return two dictionaries, first holding kwarg for
# torch operator and second one for reference NumPy operator.
self.sample_kwargs = sample_kwargs
# Epsilon to ensure grad and gradgrad checks don't test values
# outside a function's domain.
self._domain_eps = 1e-5
def sample_inputs_spectral_ops(self, device, dtype, requires_grad=False, **kwargs):
is_fp16_or_chalf = dtype == torch.complex32 or dtype == torch.half
if not is_fp16_or_chalf:
nd_tensor = partial(make_tensor, (S, S + 1, S + 2), device=device,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, (31,), device=device,
dtype=dtype, requires_grad=requires_grad)
else:
# cuFFT supports powers of 2 for half and complex half precision
# NOTE: For hfft, hfft2, hfftn, irfft, irfft2, irfftn with default args
# where output_size n=2*(input_size - 1), we make sure that logical fft size is a power of two
low = None
high = None
if self.name in ['fft.hfft', 'fft.irfft',
'_refs.fft.hfft', '_refs.fft.irfft']:
shapes = ((2, 9, 9), (33,))
elif self.name in ['fft.hfft2', 'fft.irfft2',
'_refs.fft.hfft2', '_refs.fft.irfft2']:
shapes = ((2, 8, 9), (33,))
elif self.name in ['fft.hfftn', 'fft.irfftn',
'_refs.fft.hfftn', '_refs.fft.irfftn']:
shapes = ((2, 2, 33), (33,))
# Adjusting the limits because the test would be flaky due to over-saturation of float16
# See: https://github.com/pytorch/pytorch/pull/81416
low = -1.0
high = 1.0
else:
shapes = ((2, 8, 16), (32,))
nd_tensor = partial(make_tensor, shapes[0], device=device, low=low, high=high,
dtype=dtype, requires_grad=requires_grad)
oned_tensor = partial(make_tensor, shapes[1], device=device, low=low, high=high,
dtype=dtype, requires_grad=requires_grad)
if self.ndimensional == SpectralFuncType.ND:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(8,))),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3, (0, -1)]),
]
elif self.ndimensional == SpectralFuncType.TwoD:
return [
SampleInput(nd_tensor(),
kwargs=dict(s=(3, 10) if not is_fp16_or_chalf else (4, 8), dim=(1, 2), norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(s=(6, 8) if not is_fp16_or_chalf else (4, 8))),
SampleInput(nd_tensor(),
kwargs=dict(dim=0)),
SampleInput(nd_tensor(),
kwargs=dict(dim=(0, -1))),
SampleInput(nd_tensor(),
kwargs=dict(dim=(-3, -2, -1))),
]
else:
return [
SampleInput(nd_tensor(),
kwargs=dict(n=10 if not is_fp16_or_chalf else 8, dim=1, norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(norm='ortho')),
SampleInput(nd_tensor(),
kwargs=dict(n=7 if not is_fp16_or_chalf else 8)
),
SampleInput(oned_tensor()),
*(SampleInput(nd_tensor(),
kwargs=dict(dim=dim))
for dim in [-1, -2, -3]),
]
SpectralFuncType = Enum('SpectralFuncType', ('OneD', 'TwoD', 'ND'))
# Metadata class for Fast Fourier Transforms in torch.fft.
class SpectralFuncInfo(OpInfo):
"""Operator information for torch.fft transforms. """
def __init__(self,
name, # the string name of the function
*,
ref=None, # Reference implementation (probably in np.fft namespace)
dtypes=floating_and_complex_types(),
ndimensional: SpectralFuncType,
sample_inputs_func=sample_inputs_spectral_ops,
decorators=None,
**kwargs):
self._original_spectral_func_args = dict(locals()).copy()
self._original_spectral_func_args.update(kwargs)
decorators = list(decorators) if decorators is not None else []
decorators += [
skipCPUIfNoFFT,
DecorateInfo(toleranceOverride({torch.chalf: tol(4e-2, 4e-2)}),
"TestCommon", "test_complex_half_reference_testing")
]
super().__init__(name=name,
dtypes=dtypes,
decorators=decorators,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
self.ndimensional = ndimensional
class ShapeFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for Shape manipulating operations like tile and roll"""
def __init__(self,
name, # the string name of the function
*,
ref, # a reference function
dtypes=floating_types(),
dtypesIfCUDA=None,
dtypesIfROCM=None,
sample_inputs_func=None,
**kwargs):
super(ShapeFuncInfo, self).__init__(name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs)
self.ref = ref
def sample_inputs_foreach(self, device, dtype, N, *, noncontiguous=False, same_size=False, low=None, high=None):
if same_size:
return [make_tensor((N, N), dtype=dtype, device=device, noncontiguous=noncontiguous) for _ in range(N)]
else:
return [make_tensor((N - i, N - i), dtype=dtype, device=device, noncontiguous=noncontiguous) for i in range(N)]
def get_foreach_method_names(name):
# get torch inplace reference function
op_name = "_foreach_" + name
inplace_op_name = op_name + "_"
op = getattr(torch, op_name, None)
inplace_op = getattr(torch, inplace_op_name, None)
ref = getattr(torch, name, None)
ref_inplace = getattr(torch.Tensor, name + "_", None)
return op, inplace_op, ref, ref_inplace
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
def __init__(self,
name,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
dtypesIfROCM=None,
supports_alpha_param=False,
sample_inputs_func=sample_inputs_foreach,
**kwargs):
super().__init__(
"_foreach_" + name,
dtypes=dtypes,
dtypesIfCUDA=dtypesIfCUDA,
dtypesIfROCM=dtypesIfROCM,
sample_inputs_func=sample_inputs_func,
**kwargs
)
foreach_method, foreach_method_inplace, torch_ref_method, torch_ref_inplace = get_foreach_method_names(name)
self.method_variant = foreach_method
self.inplace_variant = foreach_method_inplace
self.ref = torch_ref_method
self.ref_inplace = torch_ref_inplace
self.supports_alpha_param = supports_alpha_param
if name == "norm":
self.ref = torch.linalg.vector_norm
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
d818f2b57247fda0d8236a1bd190811a8475a46a | 6f917fb1840ea950b21941995ea96c3ec17d0a18 | /Study/keras2/keras77_07_cifar10_InceptionV3.py | 06f20232131f71809ece548e20bab695e8a87d22 | [] | no_license | maiorem/Artificial-Intelligence | 504a1656921c6d206237f17cd053ae38e1b4705c | 367fbffb678bd1761105ae4f37f015f90120d3f0 | refs/heads/main | 2023-02-03T00:24:20.571707 | 2020-12-18T08:24:48 | 2020-12-18T08:24:48 | 311,254,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | import numpy as np
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.layers import Dense, Flatten, Input, BatchNormalization, Dropout, Activation
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_predict=x_test[:10, :, :, :]
x_train=x_train.astype('float32')/255.
x_test=x_test.astype('float32')/255.
x_predict=x_predict.astype('float32')/255.
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
# 2. 모델
inceptionv3=InceptionV3(weights='imagenet', include_top=False, input_shape=(32, 32, 3)) # 14,714,688
inceptionv3.trainable=False
model=Sequential()
model.add(inceptionv3)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))
# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
es=EarlyStopping(monitor='val_loss', patience=10, mode='auto')
reduce_lr=ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)
modelpath='./model/inceptionv3-{epoch:02d}-{val_loss:.4f}.hdf5'
cp=ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.fit(x_train, y_train, epochs=1000, batch_size=32, verbose=1, validation_split=0.2, callbacks=[es, cp, reduce_lr])
#4. 평가, 예측
loss, accuracy=model.evaluate(x_test, y_test, batch_size=32)
print('loss : ', loss)
print('accuracy : ', accuracy)
y_predict=model.predict(x_predict)
y_predict=np.argmax(y_predict, axis=1) #One hot encoding의 decoding은 numpy의 argmax를 사용한다.
y_actually=np.argmax(y_test[:10, :], axis=1)
print('실제값 : ', y_actually)
print('예측값 : ', y_predict)
'''
ValueError: Input size must be at least 75x75; got `input_shape=(32, 32, 3)`
''' | [
"maiorem00@gmail.com"
] | maiorem00@gmail.com |
c9638982979fe9123e4137a7d6a755d188484b69 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_DCHP_Server/test_c139270.py | 09792a397e1f38790b7d1ba0d50c6c908e764c4c | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_physical_interface import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 139270
def test_c139270(browser):
try:
login_web(browser, url=dev1)
a = Shell_SSH()
a.connect(dev1)
a.execute("en")
a.execute("conf t")
a.execute("interface gigabitethernet "+interface_name_3)
a.execute("ip address 131.1.1.1 255.255.255.0")
a.execute("exit")
dhcp_server_add(browser, interface=interface_name_3,
dhcp_type="dhcp_server", dhcp_gw="131.1.1.254", dhcp_sm="24",
dns_server1="114.114.114.114", wins_server1="115.115.115.115",
ip_range1_1="131.1.1.5", ip_range1_2="131.1.1.20")
time.sleep(1)
loginfo1 = get_log_info(browser, 管理日志)
# print(loginfo1)
dhcp_server_edit_or_delete(browser, fuction="edit", dhcp_type="server",
ip_range1_1="131.1.1.6", ip_range1_2="131.1.1.15")
time.sleep(1)
loginfo2 = get_log_info(browser, 管理日志)
browser.switch_to.default_content()
# print(loginfo2)
time.sleep(1)
dhcp_server_edit_or_delete(browser, fuction="delete")
loginfo3 = get_log_info(browser, 管理日志)
# print(loginfo3)
time.sleep(1)
a = Shell_SSH()
a.connect(dev1)
a.execute("en")
a.execute("conf t")
a.execute("interface gigabitethernet "+interface_name_3)
a.execute("no ip address 131.1.1.1")
a.execute("exit")
try:
assert "启动DHCP成功" in loginfo1
assert "设置DHCP成功" in loginfo2
assert "删除DHCP成功" in loginfo3
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "启动DHCP成功" in loginfo1
assert "设置DHCP成功" in loginfo2
assert "删除DHCP成功" in loginfo3
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload(hostip=dev1)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
d89fb20582b7a2b2efe09169d26ca73fe3e9f35d | 31a0b0749c30ff37c3a72592387f9d8195de4bd6 | /python/ray/_private/runtime_env/_clonevirtualenv.py | d50d2d3c98989f2653fb5b966584708fb5ad1eae | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | longshotsyndicate/ray | 15100bad514b602a3fa39bfe205288e7bec75d90 | 3341fae573868338b665bcea8a1c4ee86b702751 | refs/heads/master | 2023-01-28T15:16:00.401509 | 2022-02-18T05:35:47 | 2022-02-18T05:35:47 | 163,961,795 | 1 | 1 | Apache-2.0 | 2023-01-14T08:01:02 | 2019-01-03T11:03:35 | Python | UTF-8 | Python | false | false | 10,825 | py | #!/usr/bin/env python
from __future__ import with_statement
import logging
import optparse
import os
import os.path
import re
import shutil
import subprocess
import sys
import itertools
__version__ = "0.5.7"
logger = logging.getLogger()
env_bin_dir = "bin"
if sys.platform == "win32":
env_bin_dir = "Scripts"
class UserError(Exception):
pass
def _dirmatch(path, matchwith):
"""Check if path is within matchwith's tree.
>>> _dirmatch('/home/foo/bar', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar/', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')
True
>>> _dirmatch('/home/foo/bar2', '/home/foo/bar')
False
>>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')
False
"""
matchlen = len(matchwith)
if path.startswith(matchwith) and path[matchlen : matchlen + 1] in [os.sep, ""]:
return True
return False
def _virtualenv_sys(venv_path):
"obtain version and path info from a virtualenv."
executable = os.path.join(venv_path, env_bin_dir, "python")
# Must use "executable" as the first argument rather than as the
# keyword argument "executable" to get correct value from sys.path
p = subprocess.Popen(
[
executable,
"-c",
"import sys;"
'print ("%d.%d" % (sys.version_info.major, sys.version_info.minor));'
'print ("\\n".join(sys.path));',
],
env={},
stdout=subprocess.PIPE,
)
stdout, err = p.communicate()
assert not p.returncode and stdout
lines = stdout.decode("utf-8").splitlines()
return lines[0], list(filter(bool, lines[1:]))
def clone_virtualenv(src_dir, dst_dir):
if not os.path.exists(src_dir):
raise UserError("src dir %r does not exist" % src_dir)
if os.path.exists(dst_dir):
raise UserError("dest dir %r exists" % dst_dir)
# sys_path = _virtualenv_syspath(src_dir)
logger.info("cloning virtualenv '%s' => '%s'..." % (src_dir, dst_dir))
shutil.copytree(
src_dir, dst_dir, symlinks=True, ignore=shutil.ignore_patterns("*.pyc")
)
version, sys_path = _virtualenv_sys(dst_dir)
logger.info("fixing scripts in bin...")
fixup_scripts(src_dir, dst_dir, version)
has_old = lambda s: any(i for i in s if _dirmatch(i, src_dir))
if has_old(sys_path):
# only need to fix stuff in sys.path if we have old
# paths in the sys.path of new python env. right?
logger.info("fixing paths in sys.path...")
fixup_syspath_items(sys_path, src_dir, dst_dir)
v_sys = _virtualenv_sys(dst_dir)
remaining = has_old(v_sys[1])
assert not remaining, v_sys
fix_symlink_if_necessary(src_dir, dst_dir)
def fix_symlink_if_necessary(src_dir, dst_dir):
# sometimes the source virtual environment has symlinks that point to itself
# one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib
# this function makes sure
# $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib
# usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find.
logger.info("scanning for internal symlinks that point to the original virtual env")
for dirpath, dirnames, filenames in os.walk(dst_dir):
for a_file in itertools.chain(filenames, dirnames):
full_file_path = os.path.join(dirpath, a_file)
if os.path.islink(full_file_path):
target = os.path.realpath(full_file_path)
if target.startswith(src_dir):
new_target = target.replace(src_dir, dst_dir)
logger.debug("fixing symlink in %s" % (full_file_path,))
os.remove(full_file_path)
os.symlink(new_target, full_file_path)
def fixup_scripts(old_dir, new_dir, version, rewrite_env_python=False):
bin_dir = os.path.join(new_dir, env_bin_dir)
root, dirs, files = next(os.walk(bin_dir))
pybinre = re.compile(r"pythonw?([0-9]+(\.[0-9]+(\.[0-9]+)?)?)?$")
for file_ in files:
filename = os.path.join(root, file_)
if file_ in ["python", "python%s" % version, "activate_this.py"]:
continue
elif file_.startswith("python") and pybinre.match(file_):
# ignore other possible python binaries
continue
elif file_.endswith(".pyc"):
# ignore compiled files
continue
elif file_ == "activate" or file_.startswith("activate."):
fixup_activate(os.path.join(root, file_), old_dir, new_dir)
elif os.path.islink(filename):
fixup_link(filename, old_dir, new_dir)
elif os.path.isfile(filename):
fixup_script_(
root,
file_,
old_dir,
new_dir,
version,
rewrite_env_python=rewrite_env_python,
)
def fixup_script_(root, file_, old_dir, new_dir, version, rewrite_env_python=False):
old_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(old_dir))
new_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(new_dir))
env_shebang = "#!/usr/bin/env python"
filename = os.path.join(root, file_)
with open(filename, "rb") as f:
if f.read(2) != b"#!":
# no shebang
return
f.seek(0)
lines = f.readlines()
if not lines:
# warn: empty script
return
def rewrite_shebang(version=None):
logger.debug("fixing %s" % filename)
shebang = new_shebang
if version:
shebang = shebang + version
shebang = (shebang + "\n").encode("utf-8")
with open(filename, "wb") as f:
f.write(shebang)
f.writelines(lines[1:])
try:
bang = lines[0].decode("utf-8").strip()
except UnicodeDecodeError:
# binary file
return
# This takes care of the scheme in which shebang is of type
# '#!/venv/bin/python3' while the version of system python
# is of type 3.x e.g. 3.5.
short_version = bang[len(old_shebang) :]
if not bang.startswith("#!"):
return
elif bang == old_shebang:
rewrite_shebang()
elif bang.startswith(old_shebang) and bang[len(old_shebang) :] == version:
rewrite_shebang(version)
elif (
bang.startswith(old_shebang)
and short_version
and bang[len(old_shebang) :] == short_version
):
rewrite_shebang(short_version)
elif rewrite_env_python and bang.startswith(env_shebang):
if bang == env_shebang:
rewrite_shebang()
elif bang[len(env_shebang) :] == version:
rewrite_shebang(version)
else:
# can't do anything
return
def fixup_activate(filename, old_dir, new_dir):
logger.debug("fixing %s" % filename)
with open(filename, "rb") as f:
data = f.read().decode("utf-8")
data = data.replace(old_dir, new_dir)
with open(filename, "wb") as f:
f.write(data.encode("utf-8"))
def fixup_link(filename, old_dir, new_dir, target=None):
logger.debug("fixing %s" % filename)
if target is None:
target = os.readlink(filename)
origdir = os.path.dirname(os.path.abspath(filename)).replace(new_dir, old_dir)
if not os.path.isabs(target):
target = os.path.abspath(os.path.join(origdir, target))
rellink = True
else:
rellink = False
if _dirmatch(target, old_dir):
if rellink:
# keep relative links, but don't keep original in case it
# traversed up out of, then back into the venv.
# so, recreate a relative link from absolute.
target = target[len(origdir) :].lstrip(os.sep)
else:
target = target.replace(old_dir, new_dir, 1)
# else: links outside the venv, replaced with absolute path to target.
_replace_symlink(filename, target)
def _replace_symlink(filename, newtarget):
tmpfn = "%s.new" % filename
os.symlink(newtarget, tmpfn)
os.rename(tmpfn, filename)
def fixup_syspath_items(syspath, old_dir, new_dir):
for path in syspath:
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if _dirmatch(path, old_dir):
path = path.replace(old_dir, new_dir, 1)
if not os.path.exists(path):
continue
elif not _dirmatch(path, new_dir):
continue
root, dirs, files = next(os.walk(path))
for file_ in files:
filename = os.path.join(root, file_)
if filename.endswith(".pth"):
fixup_pth_file(filename, old_dir, new_dir)
elif filename.endswith(".egg-link"):
fixup_egglink_file(filename, old_dir, new_dir)
def fixup_pth_file(filename, old_dir, new_dir):
logger.debug("fixup_pth_file %s" % filename)
with open(filename, "r") as f:
lines = f.readlines()
has_change = False
for num, line in enumerate(lines):
line = (line.decode("utf-8") if hasattr(line, "decode") else line).strip()
if not line or line.startswith("#") or line.startswith("import "):
continue
elif _dirmatch(line, old_dir):
lines[num] = line.replace(old_dir, new_dir, 1)
has_change = True
if has_change:
with open(filename, "w") as f:
payload = os.linesep.join([l.strip() for l in lines]) + os.linesep
f.write(payload)
def fixup_egglink_file(filename, old_dir, new_dir):
logger.debug("fixing %s" % filename)
with open(filename, "rb") as f:
link = f.read().decode("utf-8").strip()
if _dirmatch(link, old_dir):
link = link.replace(old_dir, new_dir, 1)
with open(filename, "wb") as f:
link = (link + "\n").encode("utf-8")
f.write(link)
def main():
parser = optparse.OptionParser(
"usage: %prog [options]" " /path/to/existing/venv /path/to/cloned/venv"
)
parser.add_option(
"-v", action="count", dest="verbose", default=False, help="verbosity"
)
options, args = parser.parse_args()
try:
old_dir, new_dir = args
except ValueError:
print("virtualenv-clone %s" % (__version__,))
parser.error("not enough arguments given.")
old_dir = os.path.realpath(old_dir)
new_dir = os.path.realpath(new_dir)
loglevel = (logging.WARNING, logging.INFO, logging.DEBUG)[min(2, options.verbose)]
logging.basicConfig(level=loglevel, format="%(message)s")
try:
clone_virtualenv(old_dir, new_dir)
except UserError:
e = sys.exc_info()[1]
parser.error(str(e))
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | longshotsyndicate.noreply@github.com |
9d6bbb76470fb5d0e262f7b536a47205de61ce1e | e1abd868bfad11bf93c50eee1dc9976674de2358 | /setup.py | af6a5de4baca477cae84d74eba1926d6aac1f4f6 | [] | no_license | markmuetz/scaffold_analysis | 5c7e9d04b24abe3462c8946381f4cab264bf09e0 | c02d32536c801b23ac8a71e36d25fa922e7cfd94 | refs/heads/master | 2022-06-03T16:13:54.775718 | 2022-05-31T13:22:24 | 2022-05-31T13:22:24 | 92,677,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | #!/usr/bin/env python
import os
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from scaffold.version import get_version
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='scaffold',
version=get_version(),
description='High-resolution analysis',
long_description=read('readme.rst'),
author='Mark Muetzelfeldt',
author_email='m.muetzelfeldt@pgr.reading.ac.uk',
maintainer='Mark Muetzelfeldt',
maintainer_email='m.muetzelfeldt@pgr.reading.ac.uk',
packages=[
'scaffold',
'scaffold.cycle',
'scaffold.expt',
'scaffold.suite',
'scaffold.tests'
],
scripts=[ ],
python_requires='>=3.6',
install_requires=[
'omnium>=0.10.2',
'cloud_tracking',
'f90nml',
# 'iris',
'matplotlib',
'numpy',
'scipy',
],
package_data={ },
url='https://github.com/markmuetz/scaffold_analysis',
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: C',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
keywords=[''],
)
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
c631ce14fa9545f28ff4141ddc75270876ae683c | da459298c4bdbb745f4ed80ce1c9da15dd8fbb34 | /demisto_sdk/commands/format/update_genericmodule.py | 6dae063b2baf56525d321cab77a0cde0727d3fff | [
"MIT"
] | permissive | demisto/demisto-sdk | af998a87523d03097f725ed8f31f6a44f4605ef2 | 3169757a2f98c8457e46572bf656ec6b69cc3a2e | refs/heads/master | 2023-08-22T03:44:31.654275 | 2023-08-21T14:45:22 | 2023-08-21T14:45:22 | 219,291,269 | 63 | 75 | MIT | 2023-09-14T14:41:12 | 2019-11-03T11:36:13 | Python | UTF-8 | Python | false | false | 2,162 | py | from typing import Tuple
from demisto_sdk.commands.common.constants import (
FILETYPE_TO_DEFAULT_FROMVERSION,
FileType,
)
from demisto_sdk.commands.common.logger import logger
from demisto_sdk.commands.format.format_constants import (
ERROR_RETURN_CODE,
SKIP_RETURN_CODE,
SUCCESS_RETURN_CODE,
)
from demisto_sdk.commands.format.update_generic_json import BaseUpdateJSON
class GenericModuleJSONFormat(BaseUpdateJSON):
"""GenericModuleJSONFormat class is designed to update generic module JSON file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the JSON to.
"""
def __init__(
self,
input: str = "",
output: str = "",
path: str = "",
from_version: str = "",
no_validate: bool = False,
**kwargs,
):
super().__init__(
input=input,
output=output,
path=path,
from_version=from_version,
no_validate=no_validate,
**kwargs,
)
def run_format(self) -> int:
try:
logger.info(
f"\n[blue]================= Updating file {self.source_file} =================[/blue]"
)
super().update_json(
default_from_version=FILETYPE_TO_DEFAULT_FROMVERSION.get(
FileType.GENERIC_MODULE
)
)
self.set_default_values_as_needed()
self.save_json_to_destination_file()
return SUCCESS_RETURN_CODE
except Exception as err:
logger.debug(
f"\n[red]Failed to update file {self.source_file}. Error: {err}[/red]"
)
return ERROR_RETURN_CODE
def format_file(self) -> Tuple[int, int]:
"""Manager function for the generic module JSON updater."""
format_res = self.run_format()
if format_res:
return format_res, SKIP_RETURN_CODE
else:
return format_res, self.initiate_file_validator()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
70b47bc20fa0747f8ebdcddca39cee6d773a59b7 | b9b967c8154ffb3c3622c4b46065132a33e785f6 | /server/migrations/versions/25bdca95116e_backfill_usernames_history.py | 9d47ccf0e197217c90187c58165fcf84ba31396d | [
"Apache-2.0"
] | permissive | SURFscz/SBS | 5917561656caec042e5a6c966aeb54b82e96f51d | b159eeb7a5b8246aebd9849b4b3b61b9af1a8514 | refs/heads/main | 2023-08-31T12:42:52.473898 | 2023-08-31T11:58:51 | 2023-08-31T11:58:51 | 162,148,147 | 4 | 1 | Apache-2.0 | 2023-09-12T12:07:41 | 2018-12-17T15:05:54 | JavaScript | UTF-8 | Python | false | false | 736 | py | """Backfill usernames history
Revision ID: 25bdca95116e
Revises: 3cda08121a2f
Create Date: 2021-04-07 08:04:36.467191
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = '25bdca95116e'
down_revision = '3cda08121a2f'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(text("ALTER TABLE user_names_history ADD UNIQUE INDEX user_names_history_username(username)"))
result = conn.execute(text("SELECT username FROM `users` WHERE `username` IS NOT NULL"))
for row in result:
username = row[0]
conn.execute(text(f"INSERT INTO `user_names_history` (`username`) VALUES ('{username}')"))
def downgrade():
pass
| [
"oharsta@zilverline.com"
] | oharsta@zilverline.com |
8f61f314bcc16cd58a2e225568d2e117e5a2418f | 40c796dc77d6b3ba85a8d5a48f047de1d3e0e4fb | /scripts/extrapolation_uncertainty.py | 457527bcaeeb1e59966de81725637d91a7af8b96 | [] | no_license | zhangzc11/WVZLooper | d55c24127a65a36bd4a0ac25a8c53c007b5a71a1 | e9d8c80ebb6bd5b54a00a39538c964588f4ce44b | refs/heads/master | 2020-05-20T16:28:18.838367 | 2019-10-14T20:02:11 | 2019-10-14T20:02:11 | 185,660,975 | 0 | 0 | null | 2019-05-08T18:39:19 | 2019-05-08T18:39:19 | null | UTF-8 | Python | false | false | 35,813 | py | #!/bin/env python
from plottery import plottery as ply
import plottery_wrapper as p
import ROOT as r
import glob
import sys
import read_table as rt
from errors import E
from array import array
import pyrootutil as pr
import math
Ntuple_Version = "v0.1.12.7"
Baseline_Version = "syst"
syst_list_all = ["Nominal", "ElLepSF", "MuLepSF", "JES", "Pileup", "BTagHF", "BTagLF", "MET", "PDF", "Qsq", "AlphaS", "METPileup"]
syst_list = ["Nominal", "JES", "JER", "Pileup", "MET", "METPileup"]
syst_list = syst_list_all
def get_alpha_uncertainty(ntuple_version, tag, numerator, denominator, num_proc, valopt):
if "2016" in ntuple_version: lumi = 35.9
if "2017" in ntuple_version: lumi = 41.3
if "2018" in ntuple_version: lumi = 59.74
plots_basedir = "plots/{}/{}/exp/".format(ntuple_version, tag)
fname_sig = "outputs/{}/{}/sig.root".format(ntuple_version, tag)
# fname_sig = "outputs/{}/{}/wwz.root".format(ntuple_version, tag)
fname_ttz = "outputs/{}/{}/ttz.root".format(ntuple_version, tag)
fname_zz = "outputs/{}/{}/zz.root".format(ntuple_version, tag)
fname_wz = "outputs/{}/{}/wz.root".format(ntuple_version, tag)
fname_twz = "outputs/{}/{}/twz.root".format(ntuple_version, tag)
fname_rare = "outputs/{}/{}/rare.root".format(ntuple_version, tag)
# fname_rare = "outputs/{}/{}/rarevvv.root".format(ntuple_version, tag)
fname_dyttbar = "outputs/{}/{}/dyttbar.root".format(ntuple_version, tag)
fname_higgs = "outputs/{}/{}/higgs.root".format(ntuple_version, tag)
fname_data = "outputs/{}/{}/data.root".format(ntuple_version, tag)
year = "2" + ntuple_version.split("_")[0].split("2")[1]
prefix = "{}/{}".format(ntuple_version, tag)
procs = ["data_obs", "sig", "ttz", "zz", "wz", "twz", "rare", "dyttbar", "higgs"]
mcprocs = procs[1:]
bkgprocs = procs[2:]
fnames = [fname_data, fname_sig, fname_ttz, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
nonzzbkg = [fname_sig, fname_ttz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
nonttzbkg = [fname_sig, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
if num_proc == "zz":
h_denom_nonzzbkg = pr.get_summed_histogram(nonzzbkg, denominator)
E_denom_nonzzbkg = pr.get_integral_as_E(h_denom_nonzzbkg)
h_denom_data = pr.get_summed_histogram([fname_data], denominator)
E_denom_data = pr.get_integral_as_E(h_denom_data)
h_denom_zz = pr.get_summed_histogram([fname_zz], denominator)
E_denom_zz = pr.get_integral_as_E(h_denom_zz)
# print (E_denom_data - E_denom_nonzzbkg)
# print E_denom_zz
h_numer_nonzzbkg = pr.get_summed_histogram(nonzzbkg, numerator)
E_numer_nonzzbkg = pr.get_integral_as_E(h_numer_nonzzbkg)
h_numer_data = pr.get_summed_histogram([fname_data], numerator)
E_numer_data = pr.get_integral_as_E(h_numer_data)
h_numer_zz = pr.get_summed_histogram([fname_zz], numerator)
E_numer_zz = pr.get_integral_as_E(h_numer_zz)
# print (E_numer_data - E_numer_nonzzbkg)
# print E_numer_zz
data_eff = (E_numer_data - E_numer_nonzzbkg) / (E_denom_data - E_denom_nonzzbkg)
mc_eff = E_numer_zz / E_denom_zz
eff_ratio = data_eff / mc_eff
# print E_numer_data, E_numer_zz
# print "mc_eff:", mc_eff
# print "data_eff:", data_eff
if valopt == "eff":
return mc_eff
elif valopt == "den":
return E_denom_zz
elif valopt == "num":
return E_numer_zz
else:
h_denom_nonttzbkg = pr.get_summed_histogram(nonttzbkg, denominator)
E_denom_nonttzbkg = pr.get_integral_as_E(h_denom_nonttzbkg)
h_denom_data = pr.get_summed_histogram([fname_data], denominator)
E_denom_data = pr.get_integral_as_E(h_denom_data)
h_denom_ttz = pr.get_summed_histogram([fname_ttz], denominator)
E_denom_ttz = pr.get_integral_as_E(h_denom_ttz)
# print (E_denom_data - E_denom_nonttzbkg)
# print E_denom_ttz
h_numer_nonttzbkg = pr.get_summed_histogram(nonttzbkg, numerator)
E_numer_nonttzbkg = pr.get_integral_as_E(h_numer_nonttzbkg)
h_numer_data = pr.get_summed_histogram([fname_data], numerator)
E_numer_data = pr.get_integral_as_E(h_numer_data)
h_numer_ttz = pr.get_summed_histogram([fname_ttz], numerator)
E_numer_ttz = pr.get_integral_as_E(h_numer_ttz)
# print (E_numer_data - E_numer_nonttzbkg)
# print E_numer_ttz
data_eff = (E_numer_data - E_numer_nonttzbkg) / (E_denom_data - E_denom_nonttzbkg)
mc_eff = E_numer_ttz / E_denom_ttz
eff_ratio = data_eff / mc_eff
# print "mc_eff:", mc_eff
# print "data_eff:", data_eff
if valopt == "eff":
return mc_eff
elif valopt == "den":
return E_denom_ttz
elif valopt == "num":
return E_numer_ttz
def get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, num_proc, valopt):
if "2016" in ntuple_version: lumi = 35.9
if "2017" in ntuple_version: lumi = 41.3
if "2018" in ntuple_version: lumi = 59.74
plots_basedir = "plots/{}/{}/exp/".format(ntuple_version, tag)
fname_sig = "outputs/{}/{}/sig.root".format(ntuple_version, tag)
# fname_sig = "outputs/{}/{}/wwz.root".format(ntuple_version, tag)
fname_ttz = "outputs/{}/{}/ttz.root".format(ntuple_version, tag)
fname_zz = "outputs/{}/{}/zz.root".format(ntuple_version, tag)
fname_wz = "outputs/{}/{}/wz.root".format(ntuple_version, tag)
fname_twz = "outputs/{}/{}/twz.root".format(ntuple_version, tag)
fname_rare = "outputs/{}/{}/rare.root".format(ntuple_version, tag)
# fname_rare = "outputs/{}/{}/rarevvv.root".format(ntuple_version, tag)
fname_dyttbar = "outputs/{}/{}/dyttbar.root".format(ntuple_version, tag)
fname_higgs = "outputs/{}/{}/higgs.root".format(ntuple_version, tag)
fname_data = "outputs/{}/{}/data.root".format(ntuple_version, tag)
year = "2" + ntuple_version.split("_")[0].split("2")[1]
prefix = "{}/{}".format(ntuple_version, tag)
procs = ["data_obs", "sig", "ttz", "zz", "wz", "twz", "rare", "dyttbar", "higgs"]
mcprocs = procs[1:]
bkgprocs = procs[2:]
fnames = [fname_data, fname_sig, fname_ttz, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
nonzzbkg = [fname_sig, fname_ttz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
nonttzbkg = [fname_sig, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs]
if num_proc == "zz":
h_denom_nonzzbkg = pr.get_summed_histogram(nonzzbkg, denominator)
E_denom_nonzzbkg = pr.get_integral_as_E(h_denom_nonzzbkg)
h_denom_data = pr.get_summed_histogram([fname_data], denominator)
E_denom_data = pr.get_integral_as_E(h_denom_data)
h_denom_zz = pr.get_summed_histogram([fname_zz], denominator)
E_denom_zz = pr.get_integral_as_E(h_denom_zz)
# print (E_denom_data - E_denom_nonzzbkg)
# print E_denom_zz
h_numer_nonzzbkg = pr.get_summed_histogram(nonzzbkg, numerator)
E_numer_nonzzbkg = pr.get_integral_as_E(h_numer_nonzzbkg)
h_numer_data = pr.get_summed_histogram([fname_data], numerator)
E_numer_data = pr.get_integral_as_E(h_numer_data)
h_numer_zz = pr.get_summed_histogram([fname_zz], numerator)
E_numer_zz = pr.get_integral_as_E(h_numer_zz)
# print (E_numer_data - E_numer_nonzzbkg)
# print E_numer_zz
data_eff = (E_numer_data - E_numer_nonzzbkg) / (E_denom_data - E_denom_nonzzbkg)
mc_eff = E_numer_zz / E_denom_zz
eff_ratio = data_eff / mc_eff
# print E_numer_data, E_numer_zz
# print "data_eff:", data_eff
if valopt == "ratio":
return eff_ratio
elif valopt == "mc":
return mc_eff
elif valopt == "data":
return data_eff
elif valopt == "mc_num":
return E_numer_zz
elif valopt == "data_num":
return (E_numer_data - E_numer_nonzzbkg)
elif valopt == "mc_den":
return E_denom_zz
elif valopt == "data_den":
return (E_denom_data - E_denom_nonzzbkg)
else:
h_denom_nonttzbkg = pr.get_summed_histogram(nonttzbkg, denominator)
E_denom_nonttzbkg = pr.get_integral_as_E(h_denom_nonttzbkg)
h_denom_data = pr.get_summed_histogram([fname_data], denominator)
E_denom_data = pr.get_integral_as_E(h_denom_data)
h_denom_ttz = pr.get_summed_histogram([fname_ttz], denominator)
E_denom_ttz = pr.get_integral_as_E(h_denom_ttz)
# print (E_denom_data - E_denom_nonttzbkg)
# print E_denom_ttz
h_numer_nonttzbkg = pr.get_summed_histogram(nonttzbkg, numerator)
E_numer_nonttzbkg = pr.get_integral_as_E(h_numer_nonttzbkg)
h_numer_data = pr.get_summed_histogram([fname_data], numerator)
E_numer_data = pr.get_integral_as_E(h_numer_data)
h_numer_ttz = pr.get_summed_histogram([fname_ttz], numerator)
E_numer_ttz = pr.get_integral_as_E(h_numer_ttz)
# print (E_numer_data - E_numer_nonttzbkg)
# print E_numer_ttz
data_eff = (E_numer_data - E_numer_nonttzbkg) / (E_denom_data - E_denom_nonttzbkg)
mc_eff = E_numer_ttz / E_denom_ttz
eff_ratio = data_eff / mc_eff
# print "mc_eff:", mc_eff
# print "data_eff:", data_eff
if valopt == "ratio":
return eff_ratio
elif valopt == "mc":
return mc_eff
elif valopt == "data":
return data_eff
elif valopt == "mc_num":
return E_numer_ttz
elif valopt == "data_num":
return (E_numer_data - E_numer_nonttzbkg)
elif valopt == "mc_den":
return E_denom_ttz
elif valopt == "data_den":
return (E_denom_data - E_denom_nonttzbkg)
def run_for_variation(variation=""):
ntuple_version = "WVZ2016_{}_WVZ2017_{}_WVZ2018_{}".format(Ntuple_Version, Ntuple_Version, Ntuple_Version)
tag = "y2016_{}_y2017_{}_y2018_{}".format(Baseline_Version, Baseline_Version, Baseline_Version)
denominator = "ChannelBTagEMu{}__Yield".format(variation)
numerator = "ChannelBTagEMuHighMET{}__Yield".format(variation)
# print "Cut denominator:", denominator
print "Cut numerator:", numerator
print get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, "ttz")
print ""
print ""
denominator = "ChannelBTagEMu{}__Yield".format(variation)
numerator = "ChannelBTagEMuHighMT{}__Yield".format(variation)
# print "Cut denominator:", denominator
print "Cut numerator:", numerator
print get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, "ttz")
print ""
print ""
denominator = "ChannelOnZ{}__Yield".format(variation)
numerator = "ChannelOnZHighMET{}__Yield".format(variation)
# print "Cut denominator:", denominator
print "Cut numerator:", numerator
print get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, "zz")
print ""
print ""
denominator = "ChannelOnZ{}__Yield".format(variation)
numerator = "ChannelOnZHighMT{}__Yield".format(variation)
# print "Cut denominator:", denominator
print "Cut numerator:", numerator
print get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, "zz")
print ""
print ""
def run(process, region, variable, variation="", valopt="ratio"):
ntuple_version = "WVZ2016_{}_WVZ2017_{}_WVZ2018_{}".format(Ntuple_Version,Ntuple_Version,Ntuple_Version)
tag = "y2016_{}_y2017_{}_y2018_{}".format(Baseline_Version, Baseline_Version, Baseline_Version)
denominator = "Channel{}{}__Yield".format(region, variation)
numerator = "Channel{}High{}{}__Yield".format(region, variable, variation)
# print denominator, numerator
return get_extrapolation_uncertainty(ntuple_version, tag, numerator, denominator, process, valopt)
def run_alpha(process, numerator_region, denominator_region, variation="", valopt="eff"):
ntuple_version = "WVZ2016_{}_WVZ2017_{}_WVZ2018_{}".format(Ntuple_Version, Ntuple_Version, Ntuple_Version)
tag = "y2016_{}_y2017_{}_y2018_{}".format(Baseline_Version, Baseline_Version, Baseline_Version)
denominator = "{}{}__Yield".format(denominator_region, variation)
numerator = "{}{}__Yield".format(numerator_region, variation)
# print denominator, numerator
return get_alpha_uncertainty(ntuple_version, tag, numerator, denominator, process, valopt)
def get_eff_ratios(process, region, variable, valopt="ratio"):
systs = syst_list[1:]
nominal = run(process, region, variable, "", valopt)
rtn_val = {}
rtn_val["Nominal"] = nominal
for syst in systs:
var = E(nominal.val, 0)
varup = run(process, region, variable, syst+"Up", valopt)
vardn = run(process, region, variable, syst+"Down", valopt)
err = math.sqrt(abs(((varup - var) * (vardn - var)).val))
var.err = err
rtn_val[syst] = var
# print syst, varup, vardn, nominal
# Not entirely a correct treatment... but a work around
pufracerr = rtn_val["Pileup"].err / rtn_val["Pileup"].val
metpufracerr = rtn_val["METPileup"].err / rtn_val["METPileup"].val
rtn_val["Pileup"] = E(rtn_val["Pileup"].val, rtn_val["Pileup"].val * math.sqrt(pufracerr**2 + metpufracerr**2))
del rtn_val["METPileup"]
# for key in syst_list:
# if key == "METPileup": continue
# print "{:<10s} {:.4f} {:.4f} {:.4f}".format(key, rtn_val[key].val, rtn_val[key].err, rtn_val[key].err / rtn_val[key].val)
hists = []
for index, key in enumerate(syst_list):
if key == "METPileup": continue
h = r.TH1F("{}".format(key), "", 1, 0, 1)
h.SetBinContent(1, rtn_val[key].val)
h.SetBinError(1, rtn_val[key].err)
hists.append(h)
return hists
def get_alpha(process, numerator_region, denominator_region, valopt="eff"):
systs = syst_list_all[1:]
nominal = run_alpha(process, numerator_region, denominator_region, "", valopt)
rtn_val = {}
rtn_val["Nominal"] = nominal
for syst in systs:
var = E(nominal.val, 0)
varup = run_alpha(process, numerator_region, denominator_region, syst+"Up", valopt)
vardn = run_alpha(process, numerator_region, denominator_region, syst+"Down", valopt)
err = math.sqrt(abs(((varup - var) * (vardn - var)).val))
var.err = err
rtn_val[syst] = var
# print syst, varup, vardn, nominal
# Not entirely a correct treatment... but a work around
pufracerr = rtn_val["Pileup"].err / rtn_val["Pileup"].val
metpufracerr = rtn_val["METPileup"].err / rtn_val["METPileup"].val
rtn_val["Pileup"] = E(rtn_val["Pileup"].val, rtn_val["Pileup"].val * math.sqrt(pufracerr**2 + metpufracerr**2))
del rtn_val["METPileup"]
# for key in syst_list_all:
# if key == "METPileup": continue
# print "{:<10s} {:.3f} {:.3f} {:.3f}".format(key, rtn_val[key].val, rtn_val[key].err, rtn_val[key].err / rtn_val[key].val)
hists = []
for index, key in enumerate(syst_list_all):
if key == "METPileup": continue
h = r.TH1F("{}".format(key), "", 1, 0, 1)
h.SetBinContent(1, rtn_val[key].val)
h.SetBinError(1, rtn_val[key].err)
hists.append(h)
return hists
def main_onz_ttz_only():
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc") , options={"output_name":"exp/mc_eff_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data") , options={"output_name":"exp/data_eff_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc_num") , options={"output_name":"exp/eff_mc_num_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc_den") , options={"output_name":"exp/eff_mc_den_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data_num") , options={"output_name":"exp/eff_data_num_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data_den") , options={"output_name":"exp/eff_data_den_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "mc") , options={"output_name":"exp/mc_eff_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "data") , options={"output_name":"exp/data_eff_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "mc_num") , options={"output_name":"exp/eff_mc_num_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "mc_den") , options={"output_name":"exp/eff_mc_den_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "data_num") , options={"output_name":"exp/eff_data_num_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "data_den") , options={"output_name":"exp/eff_data_den_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "mc") , options={"output_name":"exp/mc_eff_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "data") , options={"output_name":"exp/data_eff_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "mc_num") , options={"output_name":"exp/eff_mc_num_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "mc_den") , options={"output_name":"exp/eff_mc_den_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "data_num") , options={"output_name":"exp/eff_data_num_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "OffZ" , "MET" , "data_den") , options={"output_name":"exp/eff_data_den_ttz_sr_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "mc") , options={"output_name":"exp/mc_eff_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "data") , options={"output_name":"exp/data_eff_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "mc_num") , options={"output_name":"exp/eff_mc_num_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "mc_den") , options={"output_name":"exp/eff_mc_den_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "data_num") , options={"output_name":"exp/eff_data_num_ttz_sr_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "EMu" , "MT" , "data_den") , options={"output_name":"exp/eff_data_den_ttz_sr_mt.pdf" , "print_yield":True} )
def main_onz_zz_met_only():
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc") , options={"output_name":"exp/mc_eff_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data") , options={"output_name":"exp/data_eff_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "ratio") , options={"output_name":"exp/eff_ratio_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc_num") , options={"output_name":"exp/eff_mc_num_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc_den") , options={"output_name":"exp/eff_mc_den_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data_num") , options={"output_name":"exp/eff_data_num_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data_den") , options={"output_name":"exp/eff_data_den_zz_met.pdf" , "print_yield":True} )
def main_old():
# Get TTZ MET Modeling Uncertainty
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc") , options={"output_name":"exp/mc_eff_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data") , options={"output_name":"exp/data_eff_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MET" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "mc") , options={"output_name":"exp/mc_eff_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "data") , options={"output_name":"exp/data_eff_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("ttz" , "BTagEMu" , "MT" , "ratio") , options={"output_name":"exp/eff_ratio_ttz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc") , options={"output_name":"exp/mc_eff_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data") , options={"output_name":"exp/data_eff_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "ratio") , options={"output_name":"exp/eff_ratio_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc_num") , options={"output_name":"exp/eff_mc_num_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "mc_den") , options={"output_name":"exp/eff_mc_den_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data_num") , options={"output_name":"exp/eff_data_num_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MET" , "data_den") , options={"output_name":"exp/eff_data_den_zz_met.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MT" , "mc") , options={"output_name":"exp/mc_eff_zz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MT" , "data") , options={"output_name":"exp/data_eff_zz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_eff_ratios("zz" , "OnZ" , "MT" , "ratio") , options={"output_name":"exp/eff_ratio_zz_mt.pdf" , "print_yield":True} )
p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "num"), options={"output_name":"exp/ttz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "den"), options={"output_name":"exp/ttz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "eff"), options={"output_name":"exp/ttz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "num"), options={"output_name":"exp/ttz_offz_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "den"), options={"output_name":"exp/ttz_offz_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "eff"), options={"output_name":"exp/ttz_offz_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "num"), options={"output_name":"exp/zz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "den"), options={"output_name":"exp/zz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "eff"), options={"output_name":"exp/zz_emu_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "num"), options={"output_name":"exp/zz_offz_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "den"), options={"output_name":"exp/zz_offz_alpha.pdf", "print_yield":True})
p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "eff"), options={"output_name":"exp/zz_offz_alpha.pdf", "print_yield":True})
def get_alpha_hists(proc, num, den):
hists_num = get_alpha(proc, num, den, "num")
hists_den = get_alpha(proc, num, den, "den")
hists_eff = get_alpha(proc, num, den, "eff")
hists = []
totalerrors = [E(1,0), E(1,0), E(1,0)]
for hist_num, hist_den, hist_eff in zip(hists_num, hists_den, hists_eff):
syst = hist_num.GetName()
if syst == "Nominal":
h = r.TH1F("{}".format(hist_num.GetName()), "", 3, 0, 3)
h.SetBinContent(1, hist_eff.GetBinContent(1))
h.SetBinError (1, hist_eff.GetBinError (1))
h.SetBinContent(2, hist_num.GetBinContent(1))
h.SetBinError (2, hist_num.GetBinError (1))
h.SetBinContent(3, hist_den.GetBinContent(1))
h.SetBinError (3, hist_den.GetBinError (1))
h_ratio = h.Clone("Ratio")
h_ratio.SetBinContent(2,0)
h_ratio.SetBinContent(3,0)
h_ratio.SetBinError(2,0)
h_ratio.SetBinError(3,0)
h_yield = h.Clone("Yield")
h_yield.SetBinContent(1,0)
h_yield.SetBinError(1,0)
hists.append(h_ratio)
hists.append(h_yield)
h = r.TH1F("Stat", "", 3, 0, 3)
h.SetBinContent(1, hist_eff.GetBinError(1) / hist_eff.GetBinContent(1) * 100.)
h.SetBinContent(2, hist_num.GetBinError(1) / hist_num.GetBinContent(1) * 100.)
h.SetBinContent(3, hist_den.GetBinError(1) / hist_den.GetBinContent(1) * 100.)
hists.append(h)
totalerrors[0] *= E(1, hist_eff.GetBinError(1) / hist_eff.GetBinContent(1))
totalerrors[1] *= E(1, hist_num.GetBinError(1) / hist_num.GetBinContent(1))
totalerrors[2] *= E(1, hist_den.GetBinError(1) / hist_den.GetBinContent(1))
else:
h = r.TH1F("{}".format(hist_num.GetName()), "", 3, 0, 3)
h.SetBinContent(1, hist_eff.GetBinError(1) / hist_eff.GetBinContent(1) * 100.)
h.SetBinContent(2, hist_num.GetBinError(1) / hist_num.GetBinContent(1) * 100.)
h.SetBinContent(3, hist_den.GetBinError(1) / hist_den.GetBinContent(1) * 100.)
hists.append(h)
totalerrors[0] *= E(1, hist_eff.GetBinError(1) / hist_eff.GetBinContent(1))
totalerrors[1] *= E(1, hist_num.GetBinError(1) / hist_num.GetBinContent(1))
totalerrors[2] *= E(1, hist_den.GetBinError(1) / hist_den.GetBinContent(1))
h = r.TH1F("Total", "", 3, 0, 3)
h.SetBinContent(1, totalerrors[0].err * 100.)
h.SetBinContent(2, totalerrors[1].err * 100.)
h.SetBinContent(3, totalerrors[2].err * 100.)
hists.insert(2, h)
return hists
def main_old_v2():
# N btag extrapolation uncertainty from simulation
hists = get_alpha_hists("ttz", "ChannelEMu", "ChannelBTagEMu")
p.print_yield_table_from_list(hists, "exp/ttz_emu_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_emu_alpha.tex", prec=2, caption="Nb extrapolation", noerror=True)
# N btag and em to eemm extrapolation uncertainty from simulation
hists = get_alpha_hists("ttz", "ChannelOffZ", "ChannelBTagEMu")
p.print_yield_table_from_list(hists, "exp/ttz_offz_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_offz_alpha.tex", prec=2, caption="Nb plus emu eemm Extrapolation", noerror=True)
# MT extrapolation
hists = get_alpha_hists("ttz", "ChannelEMuHighMT", "ChannelEMu")
p.print_yield_table_from_list(hists, "exp/ttz_emu_mt_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_emu_mt_alpha.tex", prec=2, caption="emu MT extrapolation", noerror=True)
# MET extrapolation
hists = get_alpha_hists("ttz", "ChannelOffZHighMET", "ChannelOffZ")
p.print_yield_table_from_list(hists, "exp/ttz_eemm_met_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_eemm_met_alpha.tex", prec=2, caption="eemm MET extrapolation", noerror=True)
# Mll extrapolation
hists = get_alpha_hists("zz", "ChannelOffZ", "ChannelOnZ")
p.print_yield_table_from_list(hists, "exp/zz_eemm_mll_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_eemm_mll_alpha.tex", prec=2, caption="eemm Mll extrapolation", noerror=True)
# MET extrapolation
hists = get_alpha_hists("zz", "ChannelOffZHighMET", "ChannelOffZ")
p.print_yield_table_from_list(hists, "exp/zz_eemm_met_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_eemm_met_alpha.tex", prec=2, caption="eemm Mll extrapolation", noerror=True)
# MET extrapolation
hists = get_alpha_hists("zz", "ChannelEMu", "ChannelOnZ")
p.print_yield_table_from_list(hists, "exp/zz_emu_flav_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_emu_flav_alpha.tex", prec=2, caption="emu flavor extrapolation", noerror=True)
# MT extrapolation
hists = get_alpha_hists("zz", "ChannelEMuHighMT", "ChannelEMu")
p.print_yield_table_from_list(hists, "exp/zz_emu_mt_alpha.txt", prec=2, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_emu_mt_alpha.tex", prec=2, caption="emu mtor extrapolation", noerror=True)
def main():
# -- combined version where only one transfer factor is computed
# MET/Mll combined extrapolation
hists = get_alpha_hists("zz", "ChannelOffZHighMET", "ChannelOnZ")
p.print_yield_table_from_list(hists, "exp/zz_eemm_tf.txt", prec=4, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_eemm_tf.tex", prec=4, caption="eemm zz transfer factor", noerror=True)
# flavor/Mll/MT combined extrapolation
hists = get_alpha_hists("zz", "ChannelEMuHighMT", "ChannelOnZ")
p.print_yield_table_from_list(hists, "exp/zz_emu_tf.txt", prec=4, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/zz_emu_tf.tex", prec=4, caption="emu zz transfer factor", noerror=True)
# nbjet/MT combined extrapolation
hists = get_alpha_hists("ttz", "ChannelEMuHighMT", "ChannelBTagEMu")
p.print_yield_table_from_list(hists, "exp/ttz_emu_tf.txt", prec=4, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_emu_tf.tex", prec=4, caption="emu ttz transfer factor", noerror=True)
# flavor/nbjet/MET combined extrapolation
hists = get_alpha_hists("ttz", "ChannelOffZHighMET", "ChannelBTagEMu")
p.print_yield_table_from_list(hists, "exp/ttz_eemm_tf.txt", prec=4, binrange=[1,2,3], noerror=True)
p.print_yield_tex_table_from_list(hists, "exp/ttz_eemm_tf.tex", prec=4, caption="eemm ttz transfer factor", noerror=True)
def main_add():
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_met.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_met.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_met.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_met.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
hists = get_eff_ratios("ttz" , "BTagEMu" , "MET" , "ratio")
p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_met.txt", prec=4, binrange=[1], noerror=False)
p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_met.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MT" , "mc")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_mt.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_mt.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MT" , "data")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_mt.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_mt.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
hists = get_eff_ratios("ttz" , "BTagEMu" , "MT" , "ratio")
p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_mt.txt", prec=4, binrange=[1], noerror=False)
p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_mt.tex", prec=4, caption="ttz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MET" , "mc")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_met.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_met.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("ttz" , "BTagEMu" , "MET" , "data")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_ttz_met.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_ttz_met.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
hists = get_eff_ratios("zz" , "OnZ" , "MET" , "ratio")
p.print_yield_table_from_list(hists, "exp/eff_ratio_zz_met.txt", prec=4, binrange=[1], noerror=False)
p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_zz_met.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("zz" , "OnZ" , "MT" , "mc")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_zz_mt.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_zz_mt.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
# hists = get_eff_ratios("zz" , "OnZ" , "MT" , "data")
# p.print_yield_table_from_list(hists, "exp/eff_ratio_zz_mt.txt", prec=4, binrange=[1], noerror=False)
# p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_zz_mt.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
hists = get_eff_ratios("zz" , "OnZ" , "MT" , "ratio")
p.print_yield_table_from_list(hists, "exp/eff_ratio_zz_mt.txt", prec=4, binrange=[1], noerror=False)
p.print_yield_tex_table_from_list(hists, "exp/eff_ratio_zz_mt.tex", prec=4, caption="zz cut eff. comparison", noerror=False)
if __name__ == "__main__":
main_add()
| [
"sgnoohc@gmail.com"
] | sgnoohc@gmail.com |
528a8ce895effa2425984fb1b4778eb503f97668 | 66c7b0da6ee27ddce0943945503cdecf199f77a2 | /rllib/dataset/transforms/clipper.py | ae5dc975babb030a2182a4fe53278f08e935ae14 | [
"MIT"
] | permissive | tzahishimkin/extended-hucrl | 07609f9e9f9436121bcc64ff3190c966183a2cd9 | c144aeecba5f35ccfb4ec943d29d7092c0fa20e3 | refs/heads/master | 2023-07-09T22:57:28.682494 | 2021-08-24T08:50:16 | 2021-08-24T08:50:16 | 383,819,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | """Implementation of a Transformation that clips attributes."""
import numpy as np
import torch
import torch.jit
import torch.nn as nn
from .abstract_transform import AbstractTransform
class Clipper(nn.Module):
"""Clipper Class."""
def __init__(self, min_val, max_val):
super().__init__()
self._min = min_val
self._max = max_val
def forward(self, array):
"""See `AbstractTransform.__call__'."""
if isinstance(array, torch.Tensor):
return torch.clamp(array, self._min, self._max)
else:
return np.clip(array, self._min, self._max)
@torch.jit.export
def inverse(self, array):
"""See `AbstractTransform.inverse'."""
return array
class RewardClipper(AbstractTransform):
"""Implementation of a Reward Clipper.
Given a reward, it will clip it between min_reward and max_reward.
Parameters
----------
min_reward: float, optional (default=0.)
minimum bound for rewards.
max_reward: float, optional (default=1.)
maximum bound for rewards.
Notes
-----
This transformation does not have a inverse so the same observation is returned.
"""
def __init__(self, min_reward=0.0, max_reward=1.0):
super().__init__()
self._clipper = Clipper(min_reward, max_reward)
def forward(self, observation):
"""See `AbstractTransform.__call__'."""
observation.reward = self._clipper(observation.reward)
return observation
@torch.jit.export
def inverse(self, observation):
"""See `AbstractTransform.inverse'."""
observation.reward = self._clipper.inverse(observation.reward)
return observation
class ActionClipper(AbstractTransform):
"""Implementation of a Action Clipper.
Given an action, it will clip it between min_action and max_action.
Parameters
----------
min_action: float, optional (default=0.)
minimum bound for rewards.
max_action: float, optional (default=1.)
maximum bound for rewards.
Notes
-----
This transformation does not have a inverse so the same observation is returned.
"""
def __init__(self, min_action=-1.0, max_action=1.0):
super().__init__()
self._clipper = Clipper(min_action, max_action)
def forward(self, observation):
"""See `AbstractTransform.__call__'."""
observation.action = self._clipper(observation.action)
return observation
@torch.jit.export
def inverse(self, observation):
"""See `AbstractTransform.inverse'."""
observation.action = self._clipper.inverse(observation.action)
return observation
| [
"shi.tzahi@gmail.com"
] | shi.tzahi@gmail.com |
25ba0535997991ef82b8d6d879ed5f81d8b47140 | 88ae8695987ada722184307301e221e1ba3cc2fa | /tools/metrics/histograms/merge_xml_test.py | 3dab9df3b524089bca4f33a13110782a250d6cbe | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 7,500 | py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import xml.dom.minidom
import expand_owners
import histogram_paths
import merge_xml
class MergeXmlTest(unittest.TestCase):
def testMergeFiles(self):
"""Checks that enums.xml and histograms.xml can merge successfully."""
merged = merge_xml.PrettyPrintMergedFiles([
histogram_paths.TEST_ENUMS_XML, histogram_paths.TEST_HISTOGRAMS_XML,
histogram_paths.TEST_SUFFIXES_XML
])
# If ukm.xml is not provided, there is no need to populate the
# UkmEventNameHash enum.
expected_merged_xml = """
<histogram-configuration>
<enums>
<enum name="Enum1">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="TestEnum">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="UkmEventNameHash">
<summary>
Placeholder enum. The values are UKM event name hashes truncated to 31 bits.
This gets populated by the GetEnumsNodes function in merge_xml.py when
producing the merged XML file.
</summary>
</enum>
</enums>
<histograms>
<variants name="TestToken">
<variant name="Variant1" summary="Label1"/>
<variant name="Variant2" summary="Label2"/>
</variants>
<histogram name="Foo.Bar" units="xxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyzzzz"
expires_after="M85">
<owner>person@chromium.org</owner>
<component>Component</component>
<summary>Foo</summary>
</histogram>
<histogram name="Test.EnumHistogram" enum="TestEnum" expires_after="M81">
<obsolete>
Obsolete message
</obsolete>
<owner>uma@chromium.org</owner>
<summary>A enum histogram.</summary>
</histogram>
<histogram name="Test.Histogram" units="microseconds" expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.TokenHistogram{TestToken}" units="microseconds"
expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
<token key="TestToken" variants="TestToken"/>
</histogram>
</histograms>
<histogram_suffixes_list>
<histogram_suffixes name="Test.EnumHistogramSuffixes" separator="."
ordering="prefix,2">
<suffix name="TestEnumSuffix" label="The enum histogram_suffixes"/>
<affected-histogram name="Test.EnumHistogram"/>
</histogram_suffixes>
<histogram_suffixes name="Test.HistogramSuffixes" separator=".">
<suffix name="TestSuffix" label="A histogram_suffixes"/>
<affected-histogram name="Test.Histogram"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
def testMergeFiles_WithXmlEvents(self):
"""Checks that the UkmEventNameHash enum is populated correctly.
If ukm.xml is provided, populate a list of ints to the UkmEventNameHash enum
where each value is a truncated hash of the event name and each label is the
corresponding event name, with obsolete label when applicable.
"""
merged = merge_xml.PrettyPrintMergedFiles(histogram_paths.ALL_TEST_XMLS)
expected_merged_xml = """
<histogram-configuration>
<enums>
<enum name="Enum1">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="TestEnum">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="UkmEventNameHash">
<summary>
Placeholder enum. The values are UKM event name hashes truncated to 31 bits.
This gets populated by the GetEnumsNodes function in merge_xml.py when
producing the merged XML file.
</summary>
<int value="151676257" label="AbusiveExperienceHeuristic.TestEvent1"/>
<int value="898353372"
label="AbusiveExperienceHeuristic.TestEvent2 (Obsolete)"/>
<int value="1052089961" label="Autofill.TestEvent3"/>
<int value="1758741469" label="FullyObsolete.TestEvent4 (Obsolete)"/>
</enum>
</enums>
<histograms>
<variants name="TestToken">
<variant name="Variant1" summary="Label1"/>
<variant name="Variant2" summary="Label2"/>
</variants>
<histogram name="Foo.Bar" units="xxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyzzzz"
expires_after="M85">
<owner>person@chromium.org</owner>
<component>Component</component>
<summary>Foo</summary>
</histogram>
<histogram name="Test.EnumHistogram" enum="TestEnum" expires_after="M81">
<obsolete>
Obsolete message
</obsolete>
<owner>uma@chromium.org</owner>
<summary>A enum histogram.</summary>
</histogram>
<histogram name="Test.Histogram" units="microseconds" expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.TokenHistogram{TestToken}" units="microseconds"
expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
<token key="TestToken" variants="TestToken"/>
</histogram>
</histograms>
<histogram_suffixes_list>
<histogram_suffixes name="Test.EnumHistogramSuffixes" separator="."
ordering="prefix,2">
<suffix name="TestEnumSuffix" label="The enum histogram_suffixes"/>
<affected-histogram name="Test.EnumHistogram"/>
</histogram_suffixes>
<histogram_suffixes name="Test.HistogramSuffixes" separator=".">
<suffix name="TestSuffix" label="A histogram_suffixes"/>
<affected-histogram name="Test.Histogram"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
def testMergeFiles_InvalidPrimaryOwner(self):
histograms_without_valid_first_owner = xml.dom.minidom.parseString("""
<histogram-configuration>
<histograms>
<histogram name="Caffeination" units="mg">
<owner>culprit@evil.com</owner>
<summary>I like coffee.</summary>
</histogram>
</histograms>
</histogram-configuration>
""")
with self.assertRaisesRegex(
expand_owners.Error,
'The histogram Caffeination must have a valid primary owner, i.e. a '
'Googler with an @google.com or @chromium.org email address. Please '
'manually update the histogram with a valid primary owner.'):
merge_xml.MergeTrees([histograms_without_valid_first_owner],
should_expand_owners=True)
def testMergeFiles_WithComponentMetadata(self):
merged = merge_xml.PrettyPrintMergedFiles(
[histogram_paths.TEST_XML_WITH_COMPONENTS_RELATIVE])
expected_merged_xml = """
<histogram-configuration>
<histograms>
<histogram name="Test.Histogram" units="seconds" expires_after="M104">
<owner>person@chromium.org</owner>
<owner>team-alias@chromium.org</owner>
<component>Test>Component</component>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.Histogram.WithComponent" enum="TestEnum"
expires_after="M104">
<owner>uma@chromium.org</owner>
<owner>team-alias@chromium.org</owner>
<component>First>Component</component>
<component>Test>Component</component>
<summary>A enum histogram.</summary>
</histogram>
</histograms>
<histogram_suffixes_list/>
</histogram-configuration>
"""
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
if __name__ == '__main__':
unittest.main()
| [
"jengelh@inai.de"
] | jengelh@inai.de |
6976ae2c9bf1b5aab381348924e7be61e3d86826 | aa28417be8935d6fa369fcb526174f9e1e30479a | /playstation/practice/字符串变形.py | 5e62f23e5ed85d838abb9634296be22909aa5c37 | [] | no_license | YYN117/Demo | d6fca95ed8a1a433ef06f1f3fc2e768414e863cb | 40690040a7422fd5d8f03a0d68f20f1be5d4a836 | refs/heads/master | 2020-04-14T12:29:30.129709 | 2019-01-02T13:31:10 | 2019-01-02T13:31:10 | 163,841,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # -*- coding:utf-8 -*-
def trans(s, n):
# write code here
a = s.split(' ')
a.reverse()
b = []
for i in a:
if len(i)==1:
if 65 <= ord(i) <= 90:
b.append(chr(ord(i) + 32))
elif 97 <= ord(i) <= 122:
b.append(chr(ord(i) - 32))
else:
c = []
for j in i:
if 65 <= ord(j) <= 90:
c.append(chr(ord(j) + 32))
elif 97 <= ord(j) <= 122:
c.append(chr(ord(j) - 32))
b.append(''.join(c))
d = ' '.join(b)
print(d)
trans("This is a sample",16)
| [
"41251061+YYN117@users.noreply.github.com"
] | 41251061+YYN117@users.noreply.github.com |
e286a3cb60b4dd2d5a5af2a6bb82d585ff0217de | 2d9a17e2b896d2f6a90913a4ba02d41f0ede5dd0 | /_court/_court_gx/_court_gx.py | a0dd7108f66078af69f115728efe44969c0cd40a | [] | no_license | wolfwhoami/xxxxx | 1cf2ed2c8ed78048d87cccf2953ca86c0871a783 | 670787ec71127bc05c1645cc3d8ef7c3a91fe84b | refs/heads/master | 2020-03-30T00:44:55.864817 | 2016-12-16T01:45:03 | 2016-12-16T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,608 | py | #!/usr/bin/env python
# -*- coding:utf8 -*-
import datetime
import json
import os
import re
import time
import sys
from court.save import CourtStore
from court.util import remove_file
from spider.savebin import FileSaver
from spider.spider import Spider
class CData():
@staticmethod
def split_param(url):
if not re.search(r'\?', url):
url += '?'
url = re.sub(r'page=[0-9]+', 'page=1', url)
urls = []
if not re.search(r'jbfyId=[0-9]+', url):
for fy in [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 22, 23, 29, 30]:
urls.append(url + ('&jbfyId=%d' % fy))
elif not re.search(r'ajlb=[0-9]+', url):
for ajlb in range(1, 6):
urls.append(url + ('&ajlb=%d' % ajlb))
elif not re.search(r'sxnflx=[0-9]+', url):
urls.append(url + '&sxnflx=1')
urls.append(url + '&sxnflx=2')
elif not re.search(r'startCprq=([0-9-]+)', url) and not re.search(r'endCprq=([0-9-]+)', url):
return CData.split_time(url)
else:
print 'Cannot spilt url any more:' + url
return None
return urls
@staticmethod
def split_time(url):
ft = re.search(r'startCprq=([0-9-]+)', url)
tt = re.search(r'endCprq=([0-9-]+)', url)
if ft or tt:
print 'Cannot split any more:', url
return None
url = re.sub(r'startCprq=[0-9-]*', '', url)
url = re.sub(r'endCprq=[0-9-]*', '', url)
oldtime = time.strptime('2012-01-01', "%Y-%m-%d")
time2012 = datetime.datetime(*oldtime[:3])
oldtime = time.strptime('2015-10-01', "%Y-%m-%d")
time2015 = datetime.datetime(*oldtime[:3])
today = datetime.datetime.today()
timearr = ['']
arr = CData.gen_date_arr(time2012, time2015, datetime.timedelta(days=30))
for t in arr:
timearr.append(t.strftime('%Y-%m-%d'))
arr = CData.gen_date_arr(time2015, today, datetime.timedelta(days=10))
for t in arr:
timearr.append(t.strftime('%Y-%m-%d'))
timearr.append('')
l = len(timearr) - 1
i = 0
urls = []
while i < l:
urls.append(url + ('&startCprq=%s&endCprq=%s' % (timearr[i], timearr[i + 1])))
i += 1
return urls
@staticmethod
def gen_date_arr(f, t, delta):
if not isinstance(f, datetime.datetime) or not isinstance(t, datetime.datetime):
return []
else:
tt = f
arr = []
while tt < t:
arr.append(tt)
tt += delta
arr.append(t)
return arr
class GXCourtStore(CourtStore):
def __init__(self):
CourtStore.__init__(self, 'gx_court')
def page_time(self):
js = json.loads(self.get_cur_doc().cur_content)
time_str = js['AddTime']
return int(time.mktime(list(time.strptime(time_str[:10], '%Y-%m-%d'))) * 1000)
class GXCourtSpider(Spider):
"Spider which crawl legal instrument from http://www.bjcourt.gov.cn"
def __init__(self, thcnt):
Spider.__init__(self, thcnt)
self._name = "GuangxiCourtSpider"
self.test_mode = False
self.enable_mainjob_timedlock = False
self.prlist = []
self.pagestore = GXCourtStore()
self._paper_url_format = 'http://ws.gxcourt.gov.cn:23001/WDocManage.asmx/GetDocFileInfo?param={"Param":"{\'DocID\':\'%s\'}"}'
self.case_types = [
{'key': '案件种类', 'value': 1, 'info': '案.案件种类', 'count': 67381},
{'key': '案件种类', 'value': 2, 'info': '案.案件种类', 'count': 178674},
{'key': '案件种类', 'value': 3, 'info': '案.案件种类', 'count': 6839},
{'key': '案件种类', 'value': 4, 'info': '案.案件种类', 'count': 46387},
{'key': '案件涉及', 'value': 12, 'info': '案.J案件特征.J民事案件特征.J案件涉及.案件涉及', 'count': 1618},
{'key': '案件类型', 'value': 16, 'info': '案.CLS', 'count': 40}
]
self.pagesize = 20
self.job_file = 'queries'
self.param_format = "{'Param':{'Dic':[{'@Key':'%s','@Value':'%d','@SearchType':'eq'},{'@Key':'searchType','@Value':'高级检索'}]}}"
def dispatch(self):
if not os.path.exists(self.job_file):
self.update_paper_count()
self.gen_queries()
with open(self.job_file, 'r') as f:
for l in f:
p = l.split('|', 4)
if len(p) < 4:
sys.stderr.write('invalid line:' + l)
continue
self.add_main_job(
{'type': 'main', 'param': self.param_format % (p[0], int(p[1])), 'page': p[2], 'pagesize': p[3]})
time.sleep(3)
self.wait_q()
self.add_job(None, True)
def update_paper_count(self):
print 'updating page count'
param_format = "{'Param':\"{'%s':'%d'}\",'TableName':'CaseInfo'}"
for ct in self.case_types:
url = 'http://ws.gxcourt.gov.cn:23001/WDocManage.asmx/GetDataCountByParam?param=' + (
param_format % (ct['info'], ct['value']))
con = self.request_url(url)
if con and con.text:
res = eval(con.text)
msg = eval(res['msg'])
ct['count'] = int(msg['count'])
for ct in self.case_types:
print ct['value'], '==>', ct['count']
def gen_queries(self):
remove_file(self.job_file)
fs = FileSaver(self.job_file)
for ct in self.case_types:
pcnt = ct['count'] / self.pagesize + 1
for page in range(1, pcnt + 1):
fs.append(ct['key'] + '|' + str(ct['value']) + '|' + str(page) + '|' + str(self.pagesize))
def run_job(self, jobid):
if not isinstance(jobid, dict):
return
jt = jobid['type']
if 'main' == jt:
self.do_main_job(jobid)
else:
url = self._paper_url_format % jobid['id']
content = self.post_for_case(url)
if content:
self.pagestore.save(int(time.time()), jobid['id'], url, content)
print jobid['id'], '==>', len(content)
else:
print 'Cannot find document', jobid['id']
def do_main_job(self, jid):
data = {
"param": jid['param'],
"sort": "案.J流程.标准裁判日期",
"direction": "1",
"pageNo": jid['page'],
"pageSize": jid['pagesize'],
"searchType": "高级检索"
}
cs = self.post_for_list('http://ws.gxcourt.gov.cn:22001/Service/SearchDocument.asmx/SearchDocumentJson',
data)
if len(cs) == 0:
return
for c in cs:
self.add_job(
{'type': 'paper', 'id': c['CaseID']})
def split_url(self, url):
return False
def post_for_list(self, url, data):
con = self.request_url(url, data=data)
if con:
jstr = re.findall(r'>(\{[^<]*)<', con.text)
js = json.loads(jstr[0])
return js['rows']
def post_for_case(self, url):
print url
con = self.request_url(url)
if con:
js = json.loads(con.text[1:-1])
if js['stuts'] == 'true':
return js['msg']
else:
return None
if '__main__' == __name__:
job = GXCourtSpider(1)
job.load_proxy('proxy')
job.run()
# param = '%7B%27Param%27:%7B%27Dic%27:[%7B%27@Key%27:%27案件种类%27,%27@Value%27:%272%27,%27@SearchType%27:%27eq%27%7D,%7B%27@Key%27:%27searchType%27,%27@Value%27:%27高级检索%27%7D]%7D%7D'
# print unquote(param)
# sort = "案.J流程.标准裁判日期"
# data = {
# "param": unquote(param),
# "sort": sort,
# "direction": "1",
# "pageNo": 100000,
# "pageSize": 20 + 1,
# "searchType": "高级检索"
# }
#
# cases = job.post_for_list('http://ws.gxcourt.gov.cn:22001/Service/SearchDocument.asmx/SearchDocumentJson', data)
# for case in cases:
# print '{'
# for k, v in case.items():
# print k, ':', v
# print '}'
# url2 = 'http://ws.gxcourt.gov.cn:23001/WDocManage.asmx/GetDocFileInfo?' + 'param={"Param":"{\'DocID\':\'9db7a0e8-27e7-471d-8c04-dec200caccd4\'}"}'
# content = job.post_for_case(url2)
# print content
| [
"jianghao@ipin.com"
] | jianghao@ipin.com |
c3e2b659072f60e2a9c3b9710ef26d0bc548581f | 81a069a740a557e7b89ad03a33ec306f5ea5b293 | /cristianoronaldoyopmailcom_223/settings.py | 9d9ce5cab0ee854b478d3b331a3f7610bdfc262f | [] | no_license | payush/cristianoronaldoyopmailcom-223 | 0c1113b5417ab0f51c9796c7f158a7d3c38827be | f6b26672613a880e9638070cf616c7a40fc803ad | refs/heads/master | 2020-03-23T12:27:37.229193 | 2018-07-19T09:57:58 | 2018-07-19T09:57:58 | 141,560,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | """
Django settings for cristianoronaldoyopmailcom_223 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '97rbb@^rz7pd#xa_je*qqytx55e=eg$2$ev1zf8ihak4s797-9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cristianoronaldoyopmailcom_223.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cristianoronaldoyopmailcom_223.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
5bd988b720a123e3d2023d60f781fc45f6f0bd9e | d5a786c47e171b8e0ce1634d28b4f13be5bedb32 | /blog/views.py | 987eb902f61f82096ae2a964ad9e8e65773a7f55 | [] | no_license | RaphaelfsOliveira/djeven | 9b48728e026572a74273c32b7b6cb09821b3e6fb | 689b3c91617bbbe147122d029ec0906b99da1e66 | refs/heads/master | 2021-01-09T06:17:21.335289 | 2017-04-26T20:09:51 | 2017-04-26T20:09:51 | 80,952,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def home(request):
return render(request, 'blog/home.html')
| [
"raphaelbrf@gmail.com"
] | raphaelbrf@gmail.com |
cf0ab34e186cb551aefd62312852dd5ccd9505fc | 73f7cc0e71bfd38d3bfe97367324f1e7a5d8b451 | /engine_code/gapi/modules/auth/text_xml.py | 1434494619269625c21fba9be8e04088f6e542ee | [] | no_license | cash2one/my-test | ccc0ae860f936262a601c1b579d3c85196b562f9 | 8bd23f5963f4dc7398b7670e28768a3533bd5d14 | refs/heads/master | 2021-01-18T03:20:30.889045 | 2017-01-19T02:52:02 | 2017-01-19T02:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,456 | py | #!/usr/bin/python
# -*- coding=utf-8 -*-
# author : wklken@yeah.net
# date: 2012-05-25
# version: 0.1
import sys
import os
from xml.etree.ElementTree import ElementTree,Element
def read_xml(in_path):
'''''读取并解析xml文件
in_path: xml路径
return: ElementTree'''
tree = ElementTree()
tree.parse(in_path)
return tree
def write_xml(tree, out_path):
'''''将xml文件写出
tree: xml树
out_path: 写出路径'''
tree.write(out_path, encoding="utf-8")#,xml_declaration=True)
def if_match(node, kv_map):
'''''判断某个节点是否包含所有传入参数属性
node: 节点
kv_map: 属性及属性值组成的map'''
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
#---------------search -----
def find_nodes(tree, path):
'''''查找某个路径匹配的所有节点
tree: xml树
path: 节点路径'''
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
'''''根据属性及属性值定位符合的节点,返回节点
nodelist: 节点列表
kv_map: 匹配属性及属性值map'''
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
'''''修改/增加 /删除 节点的属性及属性值
nodelist: 节点列表
kv_map:属性及属性值map'''
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
'''''改变/增加/删除一个节点的文本
nodelist:节点列表
text : 更新后的文本'''
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content,tailnum=None):
'''''新造一个节点
tag:节点标签
property_map:属性及属性值map
content: 节点闭合标签里的文本内容
return 新节点'''
element = Element(tag, property_map)
element.text = content
element.tail = tailnum
return element
def add_child_node(nodelist, element):
'''''给一个节点添加子节点
nodelist: 节点列表
element: 子节点'''
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
'''''同过属性及属性值定位一个节点,并删除之
nodelist: 父节点列表
tag:子节点标签
kv_map: 属性及属性值列表'''
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
def change_dict(str_argv,dst_dict,str_len):
for i in range(1,str_len,2):
dst_dict[str_argv[i]] = sys.argv[i+1]
def change_str(src_data,dst_dict,str_len):
tmp1=src_data
tmp3=[]
str2=' '
flag=True
while flag:
tmp=tmp1
tmp2=tmp1.find(str2)
tmp1=tmp1[tmp1.find(str2)+1:]
if tmp2 == -1:
flag=False
tmp2=None
tmp3.append(tmp[:tmp2])
for i in range(0,str_len,2):
dst_dict[tmp3[i]]=tmp3[i+1]
def xml_return(ret,buf):
tree = read_xml("/gms/conf/return_val.xml")
root = tree.getroot()
nod = find_nodes(tree, "network")
if nod == []:
b=create_node("network", {}, ret)
root.append(b)
else:
change_node_text(nod, ret)
nod2 = find_nodes(tree, "network")
nod_infor = find_nodes(tree, "network/information")
if nod_infor == []:
tion=create_node("information", {}, buf)
add_child_node(nod2,tion)
else:
change_node_text(nod_infor, buf)
write_xml(tree, "./out3.xml")
#if __name__ == "__main__":
#tmp_dict={}
#if len(sys.argv) > 2 :
# change_dict(sys.argv,tmp_dict,len(sys.argv))
#else:
# change_str(sys.argv[1],tmp_dict,len(sys.argv[1]))
#cmd_ip="ifconfig eth0"+tmp_dict['ip']+" netmask "+tmp_dict['netmask']+" gateway "+tmp_dict['gateway']
#cmd_dns="nameserver "+tmp_dict["dns"]+">"+"/etc/resolv.conf"
#cmd_dns1="nameserver "+tmp_dict["dns1"]+">>"+"/etc/resolv.conf"
#print cmd_ip
#if os.system(cmd_ip) != 0:
# return -1
#if os.system(cmd_dns) != 0:
# return -2
#if os.system(cmd_dns1) != 0:
# return -3
#1. 读取xml文件
#tree = read_xml("/gms/conf/test.xml")
#2. 属性修改
#A. 找到父节点
#nodes = find_nodes(tree, "network")
#nod = find_nodes(tree, "network/ip")
#if nod == []:
# b=create_node("ip", {}, "192.168.0.2")
# add_child_node(nodes,b)
#else:
# change_node_text(nod, "1.1.1.1")
#B. 通过属性准确定位子节点
#result_nodes = get_node_by_keyvalue(nodes, )
#C. 修改节点属性
#change_node_properties(result_nodes, {"age": "1"})
#D. 删除节点属性
#change_node_properties(result_nodes, {"value":""}, True)
#3. 节点修改
#A.新建节点
#a = create_node("person", {"age":"15","money":"200000"}, "this is the firest content")
#B.插入到父节点之下
#add_child_node(result_nodes, a)
#4. 删除节点
#定位父节点
#del_parent_nodes = find_nodes(tree, "processers/services/service")
#准确定位子节点并删除之
#target_del_node = del_node_by_tagkeyvalue(del_parent_nodes, "chain", {"sequency" : "chain1"})
#5. 修改节点文本
#定位节点
#text_nodes = get_node_by_keyvalue(find_nodes(tree, "processers/services/service/chain"), {"sequency":"chain3"})
#change_node_text(text_nodes, "new text")
#6. 输出到结果文件
#write_xml(tree, "./out1.xml")
| [
"zhizhi1908@yeahh.net"
] | zhizhi1908@yeahh.net |
a177b3a2aa4e806f6e522fe1e7879d42657393ec | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/goodhumor.py | ca0bbac3068e7296e4baaf277cb4e54b22e989e5 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 44 | py | ii = [('BentJDO2.py', 1), ('CoopJBT.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
1fd3a2b4c611dfa98d2db9ba170171832ca778b9 | 7355c7a5fb5f636b07598d4b4018491b435b553c | /tfx/types/standard_artifacts.py | 3dc2bf6db3dda9323c32b5efe52d9cef9a70bd89 | [
"Apache-2.0"
] | permissive | DevenLu/tfx | 4a3ce025594ad006d37f9c4c69f08d8d49f09e8f | 1b99e7f33017bcd0e49a5a4ae1dc13440da35d3e | refs/heads/master | 2020-07-08T13:23:54.033534 | 2019-08-21T22:07:54 | 2019-08-21T22:08:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of standard TFX Artifact types."""
from tfx.types import artifact
class Examples(artifact.Artifact):
TYPE_NAME = 'ExamplesPath'
class ExternalArtifact(artifact.Artifact):
TYPE_NAME = 'ExternalPath'
class ExampleStatistics(artifact.Artifact):
TYPE_NAME = 'ExampleStatisticsPath'
class ExampleAnomalies(artifact.Artifact):
TYPE_NAME = 'ExampleValidationPath'
class Model(artifact.Artifact):
TYPE_NAME = 'ModelExportPath'
class ModelBlessing(artifact.Artifact):
TYPE_NAME = 'ModelBlessingPath'
class ModelEvaluation(artifact.Artifact):
TYPE_NAME = 'ModelEvalPath'
class PushedModel(artifact.Artifact):
TYPE_NAME = 'ModelPushPath'
class Schema(artifact.Artifact):
TYPE_NAME = 'SchemaPath'
class TransformGraph(artifact.Artifact):
TYPE_NAME = 'TransformPath'
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
8a7e8108b39245ac5e4056d21b8905f678e233e7 | f40086079bdcb465da32bfc4c244d0a699a735e3 | /informatics/previous informatics/Informatics-2/series 8/smoke_s.py | fe64e9c0b638bb058af2b71ee21b2d04345759c0 | [] | no_license | isk02206/python | e6dfc1e219ae3a51bde80fed75412bed98b3defe | b2fc6d1aa1155c0758883677eb2e37d9f92a4382 | refs/heads/master | 2022-12-06T15:14:55.264792 | 2020-09-02T01:02:11 | 2020-09-02T01:02:11 | 292,142,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | '''
Created on 2015. 12. 2.
@author: User
'''
def observed(int1,int2):
| [
"67949037+isk02206@users.noreply.github.com"
] | 67949037+isk02206@users.noreply.github.com |
353bf95f32bbe15d426b90b4624987d5ebe0dcab | fa38f67a6f5296ba64de8d771492f9db230cf5ed | /beatspread.py | 6f758e79570b8c1438f107d9eb05417058cedc2a | [] | no_license | traffaillac/traf-kattis | 5ebc2b0411c9f27da5d9080c269ad0add227a79c | 1b6bfdad48f3fab31902d85ed48b1bd0a8f44d0f | refs/heads/master | 2023-08-16T21:44:56.885553 | 2023-08-12T15:04:43 | 2023-08-12T15:04:43 | 236,475,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | for _ in range(int(input())):
s,d = (int(i) for i in input().split())
a,b = (s+d)//2, (s-d)//2
print(f'{a} {b}' if b>=0 and s&1==d&1 else 'impossible')
| [
"traf@kth.se"
] | traf@kth.se |
5e81735f195751bfe4e5f1a58955022619eb4706 | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /src/sentry/south_migrations/0278_auto__add_releaseproject__add_unique_releaseproject_project_release__a.py | 87c1274037842200012d598a03852a460ba09ad7 | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 63,344 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReleaseProject'
db.create_table('sentry_release_project', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('release', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Release'])),
))
db.send_create_signal('sentry', ['ReleaseProject'])
# Adding unique constraint on 'ReleaseProject', fields ['project', 'release']
db.create_unique('sentry_release_project', ['project_id', 'release_id'])
# Adding field 'Release.organization'
db.add_column('sentry_release', 'organization',
self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Organization'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'ReleaseProject', fields ['project', 'release']
db.delete_unique('sentry_release_project', ['project_id', 'release_id'])
# Deleting model 'ReleaseProject'
db.delete_table('sentry_release_project')
# Deleting field 'Release.organization'
db.delete_column('sentry_release', 'organization_id')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 12, 10, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'2HaTThkNq5Fug2pUI2QbOW67eeueDrkv'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | [
"jeyce@github.com"
] | jeyce@github.com |
6829b76481bf86e1a23ec83e3e05484de249d009 | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleScience/tests/test_api/test_FCNet.py | 76b10aae7b71bcaadf357e383b365d8317bbf919 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 7,160 | py | """
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import numpy as np
import paddlescience as psci
import pytest
import paddle
from apibase import APIBase
from apibase import randtool
np.random.seed(22)
paddle.seed(22)
paddle.disable_static()
psci.config.set_dtype('float64')
def cal_FCNet(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
"""
calculate FCNet api
"""
net = psci.network.FCNet(
num_ins=num_ins,
num_outs=num_outs,
num_layers=num_layers,
hidden_size=hidden_size,
activation=activation)
for i in range(num_layers):
net._weights[i] = paddle.ones_like(net._weights[i])
res = net.nn_func(ins)
return res
def cal_with_np(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
"""
calculate with numpy
"""
w = []
for i in range(num_layers):
if i == 0:
lsize = num_ins
rsize = hidden_size
elif i == (num_layers - 1):
lsize = hidden_size
rsize = num_outs
else:
lsize = hidden_size
rsize = hidden_size
w.append(np.ones((lsize, rsize)))
u = ins
for i in range(num_layers - 1):
u = np.matmul(u, w[i])
if activation == 'tanh':
u = np.tanh(u)
elif activation == 'sigmoid':
u = 1 / (1 + np.exp(-u))
u = np.matmul(u, w[-1])
return u
class TestFCNet(APIBase):
"""
test flatten
"""
def hook(self):
"""
implement
"""
self.types = [np.float64]
# self.debug = True
# enable check grad
self.static = False
obj = TestFCNet(cal_FCNet)
@pytest.mark.api_network_FCNet
def test_FCNet0():
"""
default
"""
xy_data = np.array([[0.1, 0.5]])
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet1():
"""
xy shape (9, 2)
"""
xy_data = randtool("float", 0, 10, (9, 2))
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet2():
"""
xy shape (9, 3)
"""
xy_data = randtool("float", 0, 1, (9, 3))
u = cal_with_np(xy_data, 3, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=3,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet3():
"""
xy shape (9, 4)
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet4():
"""
xy shape (9, 4)
num_outs: 2
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 2, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=2,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet5():
"""
xy shape (9, 4)
num_outs: 3
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet6():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 20)
obj.delta = 1e-5
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet7():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet8():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
activation='sigmoid'
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20, activation='sigmoid')
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
paddle.enable_static()
def static_fcnet(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
net = psci.network.FCNet(
num_ins, num_outs, num_layers, hidden_size, activation=activation)
net.make_network()
for i in range(num_layers):
net._weights[i] = paddle.ones_like(net._weights[i])
return net.nn_func(ins)
class TestFCNet(APIBase):
"""
test flatten
"""
def hook(self):
"""
implement
"""
self.types = [np.float64]
# self.debug = True
# enable check grad
self.dygraph = False
self.static = True
self.enable_backward = False
obj1 = TestFCNet(static_fcnet)
@pytest.mark.api_network_FCNet
def test_FCNet9():
"""
static
default
"""
xy_data = np.array([[0.1, 0.5]])
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj1.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet10():
"""
static
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
activation='sigmoid'
"""
# xy_data = randtool("float", 0, 1, (9, 4))
xy_data = np.array([[0.1, 0.5, 0.2, 0.4]])
u = cal_with_np(xy_data, 4, 3, 5, 20, activation='sigmoid')
obj1.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20,
activation='sigmoid')
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
05d3daed5c13842cea79650ff0c744df26f7e996 | 3adbf4c196ce225f6bbf41d77d17fe312e5d4620 | /flexx/__main__.py | ccff891543f31cf807265ab42e5a2635d70df8cb | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | drorhilman/flexx | 9c818576aec1888092ab9819e2269d18b26f326b | 8de99132d0fa25b0fea81ed8ac7ff8f8f9e95661 | refs/heads/master | 2020-03-24T10:15:44.048180 | 2018-07-05T13:26:31 | 2018-07-05T13:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,326 | py | """
Flexx has a command line interface to perform some simple tasks.
Invoke it via ``python -m flexx``. Additional command line arguments
can be provided to configure Flexx, see
:func:`configuring flexx <flexx.config>`.
.. code-block:: none
"""
import sys
ALIASES = {'-h': 'help', '--help': 'help',
'--version': 'version',
}
class CLI:
""" Command line interface class. Commands are simply defined as methods.
"""
def __init__(self, args=None):
if args is None:
return
command = args[0] if args else 'help'
command = ALIASES.get(command, command)
if command not in self.get_command_names():
raise RuntimeError('Invalid command %r' % command)
func = getattr(self, 'cmd_' + command)
func(*args[1:])
def get_command_names(self):
commands = [d[4:] for d in dir(self) if d.startswith('cmd_')]
commands.sort()
return commands
def get_global_help(self):
lines = []
lines.append('Flexx command line interface')
lines.append(' python -m flexx <command> [args]')
lines.append('')
for command in self.get_command_names():
doc = getattr(self, 'cmd_' + command).__doc__
if doc:
summary = doc.strip().splitlines()[0]
lines.append('%s %s' % (command.ljust(15), summary))
return '\n'.join(lines)
def cmd_help(self, command=None):
""" show information on how to use this command.
"""
if command:
if command not in self.get_command_names():
raise RuntimeError('Invalid command %r' % command)
doc = getattr(self, 'cmd_' + command).__doc__
if doc:
lines = doc.strip().splitlines()
doc = '\n'.join([lines[0]] + [line[8:] for line in lines[1:]])
print('%s - %s' % (command, doc))
else:
print('%s - no docs' % command)
else:
print(self.get_global_help())
def cmd_version(self):
""" print the version number
"""
import sys
try:
import flexx
except ImportError:
sys.path.insert(0, '.')
import flexx
print(flexx.__version__)
def cmd_info(self, port=None):
""" show info on flexx server process corresponding to given port,
e.g. flexx info 8080
The kind of info that is provided is not standardized/documented yet.
"""
if port is None:
return self.cmd_help('info')
port = int(port)
try:
print(http_fetch('http://localhost:%i/flexx/cmd/info' % port))
except FetchError:
print('There appears to be no local server at port %i' % port)
def cmd_stop(self, port=None):
""" stop the flexx server process corresponding to the given port.
"""
if port is None:
return self.cmd_help('stop')
port = int(port)
try:
print(http_fetch('http://localhost:%i/flexx/cmd/stop' % port))
print('stopped server at %i' % port)
except FetchError:
print('There appears to be no local server at port %i' % port)
def cmd_log(self, port=None, level='info'):
""" Start listening to log messages from a server process - STUB
flexx log port level
"""
if port is None:
return self.cmd_help('log')
print('not yet implemented')
#print(http_fetch('http://localhost:%i/flexx/cmd/log' % int(port)))
class FetchError(Exception):
pass
def http_fetch(url):
""" Perform an HTTP request.
"""
from tornado.httpclient import HTTPClient
http_client = HTTPClient()
try:
response = http_client.fetch(url)
except Exception as err:
raise FetchError('http fetch failed: %s' % str(err))
finally:
http_client.close()
return response.body.decode()
# Prepare docss
_cli_docs = CLI().get_global_help().splitlines()
__doc__ += '\n'.join([' ' + line for line in _cli_docs])
def main():
# Main entry point (see setup.py)
CLI(sys.argv[1:])
if __name__ == '__main__':
main()
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
f1c1843044b9c187c5c7ffae3a14625d3b7e6f86 | 796613525c40a241b0f88ceb761838a5bca311e1 | /biasTF/BIAS_V2/src/MoreTransferFunctions.py | a243b13f8131edb9f58a22317611f42685ef40cc | [] | no_license | UAEDF/vbfHbb | 377e956a2d002eacd2090a4abbaa6bffb141454e | ecd5bfefa3db8d2c8283e306d68da42de44f7e39 | refs/heads/master | 2020-04-22T16:54:48.622168 | 2015-12-26T16:07:44 | 2015-12-26T16:07:44 | 12,751,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py | #!/usr/bin/env python
import ROOT
from ROOT import *
import sys,re,os
from optparse import OptionParser
####################################################################################################
def parser():
mp = OptionParser()
return mp
####################################################################################################
def printWToText(w):
old = os.dup( sys.stdout.fileno() )
out = file('stdouterr.txt','w')
os.dup2( out.fileno(), sys.stdout.fileno() )
w.Print()
os.dup2( old, sys.stdout.fileno() )
out.close()
#
out = file('stdouterr.txt','r')
text = out.read()
out.close()
#
os.remove('stdouterr.txt')
return text
####################################################################################################
def getObject(w,nam):
obj = w.obj(nam)
return obj
####################################################################################################
def line(nam,fun,x1,x2):
lin = TF1(nam,fun,x1,x2)
lin.SetLineColor(kViolet+3)
lin.SetLineStyle(kDashed)
return lin
####################################################################################################
def legend(a,b,c,d):
leg = TLegend(a,b,c,d)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetTextFont(62)
leg.SetTextColor(kBlack)
leg.SetTextSize(0.045)
leg.SetBorderSize(0)
return leg
####################################################################################################
def pave(a,b,c,d):
pav = TPaveText(a,b,c,d,"NDC")
pav.SetFillColor(0)
pav.SetFillStyle(0)
pav.SetTextFont(62)
pav.SetTextColor(kViolet+3)
pav.SetTextSize(0.045)
pav.SetBorderSize(0)
pav.SetTextAlign(11)
return pav
####################################################################################################
def main():
mp = parser()
opts,args = mp.parse_args()
gROOT.SetBatch(1)
gROOT.ProcessLineSync(".x ../../common/styleCMSSara.C")
archive = {}
cplain = TCanvas("cplain","cplain",3600,1500)
cplain.Divide(4,2)
cratio = TCanvas("cratio","cratio",3600,1500)
cratio.Divide(4,2)
cplains = TCanvas("cplains","cplains",2400,1000)
cplains.Divide(4,2)
cratios = TCanvas("cratios","cratios",2400,1000)
cratios.Divide(4,2)
ftransfer = TFile.Open('transferFunctions.root','read')
tran = {}
for i in range(7):
if not (i==0 or i==4): tran[i] = [ftransfer.Get("fitRatio_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i),ftransfer.Get("gUnc_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i)]
else: tran[i] = [ftransfer.Get("fitRatio_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i),None]
tran[i][0].SetLineColor(kGreen+3)
tran[i][0].SetLineStyle(kSolid)
if not tran[i][1]==None:
tran[i][1].SetFillColor(kGray+1)
tran[i][1].SetFillStyle(3454)
for fname in args:
fopen = TFile.Open(fname,'read')
w = fopen.Get("w")
print fname
alt = re.search('.*Alt([A-Za-z0-9_]*).root',fname).group(1)
text = printWToText(w)
for Line in text.split('\n'):
if '::qcd_model' in Line:
typ = re.search('(.*)::.*',Line).group(1)
nam = re.search('.*::(.*)\[.*',Line).group(1)
cat = re.search('.*CAT([0-9]*).*',nam).group(1)
obj = getObject(w,nam)
th1 = obj.createHistogram("mbbReg_CAT%d"%int(cat),240)
th1.SetName("h"+nam)
#print alt, cat, nam, '(%s)'%typ, obj, th1
archive[(alt,cat)] = {}
archive[(alt,cat)]['alt'] = alt
archive[(alt,cat)]['cat'] = cat
archive[(alt,cat)]['typ'] = typ
archive[(alt,cat)]['nam'] = nam
archive[(alt,cat)]['obj'] = obj
archive[(alt,cat)]['th1'] = th1
rat = th1.Clone("r"+nam)
rat.Divide(archive[(alt,cat)]['th1'],archive[(alt,'0' if int(cat)<4 else '4')]['th1'])
rat.GetYaxis().SetRangeUser(0.92,1.08)
pav = pave(0.6,0.7,0.9,0.9)
pav.AddText('Function: %s'%alt)
lin = line("lin","1.",th1.GetXaxis().GetXmin(),th1.GetXaxis().GetXmax())
archive[(alt,cat)]['rat'] = rat
cplain.cd(int(cat)+1)
th1.Draw()
#for ibin in range(th1.GetNbinsX()):
# print th1.GetBinContent(ibin), th1.GetBinError(ibin)
#print
pav.Draw()
cratio.cd(int(cat)+1)
archive[(alt,cat)]['pav'] = pav
archive[(alt,cat)]['lin'] = lin
rat.Draw("axis")
if not (int(cat)==0 or int(cat)==4): tran[int(cat)][1].Draw("E3")
tran[int(cat)][0].Draw("same")
rat.Draw("same")
pav.Draw("same")
lin.Draw("same")
gPad.Update()
pav.SetY1NDC(pav.GetY2NDC()-len(pav.GetListOfLines())*0.055)
leg = legend(0.6,0.5,0.9,pav.GetY1NDC()-0.02)
leg.AddEntry(rat,"CAT%d / CAT%d"%(int(cat),0 if int(cat)<4 else 4),"L")
leg.AddEntry(tran[int(cat)][0],"TF POL1","L")
leg.Draw()
gPad.Update()
leg.SetY1NDC(leg.GetY2NDC()-leg.GetNRows()*0.055)
archive[(alt,cat)]['leg'] = leg
cplains.cd(int(cat)+1)
th1.Draw()
pav.Draw()
cratios.cd(int(cat)+1)
rat.Draw()
tran[int(cat)][0].Draw("same")
if not (int(cat)==0 or int(cat)==4): tran[int(cat)][1].Draw("sameE3")
pav.Draw()
lin.Draw("same")
leg.Draw()
if not os.path.exists('plots'): os.makedirs('plots')
cplain.SaveAs("plots/c_%s_plain.pdf"%alt)
cratio.SaveAs("plots/c_%s_ratio.pdf"%alt)
cplains.SaveAs("plots/c_%s_plain.png"%alt)
cratios.SaveAs("plots/c_%s_ratio.png"%alt)
fopen.Close()
ftransfer.Close()
cplain.Close()
cratio.Close()
####################################################################################################
if __name__=='__main__':
main()
| [
"sara.alderweireldt@cern.ch"
] | sara.alderweireldt@cern.ch |
7c9e1c0a5c012818be68148a3a2adfb9fe3cdd8f | 43a1e9c15132398433ef1bd941e49eb0372136e6 | /day21/class_test.py | 1ef6ff0a6a57edd645b641af0ca7dd32e4a6df21 | [] | no_license | dlatnrud/pyworks | 3eaf253f7e9cf74e6504770885e4a63fd1c4e293 | 745ae5c6a85015800d049176b7d5aeb0df0f000a | refs/heads/master | 2023-08-12T16:14:50.936403 | 2021-10-15T00:48:04 | 2021-10-15T00:48:04 | 402,286,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py |
from libs.myclass import Car, Student
s1 = Student("콩쥐", 3)
print(s1)
s1.learn()
s2 = Student("팥쥐", 2)
print(s2)
car1 = Car("소나타", "흰색", 2500)
car2 = Car("BMW", "black", 3000)
print("\t 모델명 \t색상 \t배기량")
print("차량1 " + car1.model + '\t' + car1.color + '\t' + str(car1.cc))
print("차량2 " + car2.model + '\t ' + car2.color + '\t' + str(car2.cc))
| [
"dlatnrud2268@naver.com"
] | dlatnrud2268@naver.com |
aa2bde45f02c21dde8c35da4febe185068b1d850 | 172189e030da9b1cd55877ba8e76ed3ad7ab8e2a | /venv/Scripts/pip3-script.py | b8d0f92f006d806cd6fd661c6200993d17351521 | [] | no_license | class-yoo/practice02 | 8f3d44de85d2d39d5979840f0a86029bb925c995 | cc6ee1f472de7f0e84e17566ab629e6ea2871b39 | refs/heads/master | 2022-01-31T05:11:18.175308 | 2019-06-13T10:31:12 | 2019-06-13T10:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #!D:\cafe24\dowork\pycharmProjects\practice02\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"mynameisyjh@gmail.com"
] | mynameisyjh@gmail.com |
dcc20f5683f3d92aa30cd10bbd9d1b271ee391ce | c380659f6a79eee18c2ea41ec2cff8b55d725243 | /src/pyAHP/where.py | 77a23578a1feab2cb7fb007809940bc0c440ad11 | [] | no_license | ai-se/softgoals | 49b0c7f8fa010697c339831bf0561f54f0e10910 | 41e9b467811c7a491aeedcc88d76910a83fe5c50 | refs/heads/master | 2021-01-17T00:11:04.123534 | 2017-06-04T02:56:11 | 2017-06-04T02:56:11 | 41,162,015 | 1 | 4 | null | 2015-12-01T03:45:40 | 2015-08-21T15:03:25 | Python | UTF-8 | Python | false | false | 5,731 | py | from __future__ import print_function, division
import sys,os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from utilities.lib import *
__author__ = 'panzer'
def default_settings():
return O(
min_size = 8,
max_depth = 10,
prefix = "|.. "
)
class Row(O):
"""
Row Of a Binary Tree Node
"""
def __init__(self, decisions):
O.__init__(self)
self.decisions = decisions
self.meta = None
self.normalized = None
class TreeNode(O):
"""
Node of a binary Tree
"""
id_counter = 0
def __init__(self, rows, parent, level):
"""
:param parent: Node's parent
:param level: Level of a node. Starts from 0
:return:
"""
O.__init__(self)
self.id = TreeNode.id_counter
self._parent = parent
self.level = level
self.kids = None
self._rows = rows
TreeNode.id_counter += 1
def add_kid(self, kid):
"""
Add a child to the node
:param kid:
:return:
"""
if self.kids is None:
self.kids = []
self.kids.append(kid)
def get_rows(self):
return self._rows
class Where(O):
"""
Fastmap based clusterer
"""
def __init__(self, rows, **settings):
"""
:param rows: Rows to be clustered
:param settings:
:return:
"""
O.__init__(self)
self.rows = rows
self.limits = self.set_limits()
self.settings = default_settings().update(**settings)
def set_limits(self):
"""
Assign max and min values based on all the data
:return:
"""
maxs = [-sys.maxint]*len(self.rows[0].decisions)
mins = [sys.maxint]*len(self.rows[0].decisions)
for row in self.rows:
for i, decision in enumerate(row.decisions):
if decision > maxs[i]: maxs[i] = decision
if decision < mins[i]: mins[i] = decision
return O(maxs = maxs, mins = mins)
def too_deep(self, level):
"""
Check if the tree is too deep
:param level:
:return:
"""
return level > self.settings.max_depth
def too_few(self, rows):
"""
Check if a cluster contains the minimal rows
:param rows:
:return:
"""
return len(rows) < self.settings.min_size
def get_furthest(self, row, rows):
"""
Get furthest row from a set of rows wrt a current row
:param row:
:param rows:
:return:
"""
furthest, dist = None, 0
for one in rows:
if row.id == one.id: continue
tmp = self.euclidean(row, one)
if tmp > dist:
furthest, dist = one, dist
return furthest
def euclidean(self, one, two):
"""
Compute Euclidean distance
:param one:
:param two:
:return:
"""
one_normalized = self.normalize(one)
two_normalized = self.normalize(two)
dist = 0
for one_i, two_i in zip(one_normalized, two_normalized):
dist += (one_i - two_i) ** 2
return dist
def normalize(self, one):
"""
Normalize row
:param one:
:return:
"""
if one.normalized is None:
normalized = []
for i, decision in enumerate(one.decisions):
if self.limits.mins[i] == self.limits.maxs[i]:
value = 0
else:
value = (decision - self.limits.mins[i]) / (self.limits.maxs[i] - self.limits.mins[i])
normalized.append(value)
one.normalized = normalized
return one.normalized
def get_furthest2(self, rows):
"""
Get furthest extreme rows from a list of rows
:param rows:
:return:
"""
east, west, dist = None, None, -1
for i in range(len(rows)-1):
for j in range(i+1, len(rows)):
temp_dist = self.euclidean(rows[i], rows[j])
if temp_dist > dist:
east, west, dist = rows[i], rows[j], temp_dist
return east, west
def fastmap(self, node):
"""
Fastmap projection
:param node:
:return:
"""
def second(iterable): return iterable[1]
rows = shuffle(node.get_rows())
east, west = self.get_furthest2(rows)
c = self.euclidean(east, west)
lst = []
for one in rows:
a = self.euclidean(one, west)
b = self.euclidean(one, east)
if c == 0:
x = 0
else:
x = (a**2 + c**2 - b**2)/(2*c)
lst += [(x, one)]
lst = sorted(lst)
mid = len(lst)//2
wests = map(second, lst[:mid])
easts = map(second, lst[mid:])
west = wests[0]
east = easts[-1]
return wests, west, easts, east
def show(self, rows, node, level, has_kids = True):
"""
Print Node
:param rows:
:param node:
:param level:
:param has_kids:
:return:
"""
if not has_kids:
print(self.settings.prefix*level, len(rows), ' ; ', node.id)
else:
print(self.settings.prefix*level, len(rows))
def cluster(self, rows = None, level = 0, parent = None, verbose = False):
"""
Cluster rows
:param rows:
:param level:
:param parent:
:param verbose:
:return:
"""
if rows is None:
rows = self.rows
node = TreeNode(rows, parent, level)
if not self.too_deep(level) and not self.too_few(rows):
if verbose: self.show(rows, node, level, has_kids=True)
wests, west, easts, east = self.fastmap(node)
node.west, node.east = west, east
node.add_kid(self.cluster(wests, level=level+1, parent=node, verbose=verbose))
node.add_kid(self.cluster(easts, level=level+1, parent=node, verbose=verbose))
else:
if verbose: self.show(rows, node, level, has_kids=False)
east, west = self.get_furthest2(rows)
node.west, node.east = west, east
return node
def get_leaves(self, node):
leaves = []
if node.kids:
for kid in node.kids:
leaves += self.get_leaves(kid)
else:
leaves = [node]
return leaves
| [
"george.meg91@gmail.com"
] | george.meg91@gmail.com |
7669a41a8804ee7b4055f88380962bbbc771ea49 | 2daa10000d265cd039ee4489d5ade35837e48bb0 | /log/tasks/post_schedule.py | 1061173a98933287b3c1d908047a2a98453d201c | [] | no_license | mohsenamoon1160417237/invites | ef2d23e6e21965b99f0861efa9f2c36a5ead131e | eacef16787f8bfecfe10e5ab9500116419aa4643 | refs/heads/master | 2023-08-22T11:35:09.071118 | 2021-10-24T12:15:03 | 2021-10-24T12:15:03 | 420,020,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from celery import shared_task
from celery.utils.log import get_task_logger
from log.models.PostLog import PostLog
from django.shortcuts import get_object_or_404
logger = get_task_logger(__name__)
@shared_task
def post_schedule(post_id):
post = get_object_or_404(PostLog , id=post_id)
post.status = PostLog.PUBLISH
post.save()
logger.info("the post saved as publish!") | [
"dramatic225@gmail.com"
] | dramatic225@gmail.com |
0c14fbbc574d2ff198fe9688adc63b8361eee419 | 908ad8a65600996b263bb53dd3054e742c533dab | /akshare/stock/stock_info.py | 5999c616aa544102b154cd70c65aef64b35bc27c | [
"MIT"
] | permissive | pangyouzhen/akshare | 47c7d9e944ac197d3df5cce81eb33da5feccd518 | 5050cda92624c642d70a196d93a343e53a12fe17 | refs/heads/master | 2023-05-09T00:27:26.011181 | 2021-05-30T07:58:59 | 2021-05-30T07:58:59 | 371,892,903 | 0 | 0 | MIT | 2021-05-29T10:07:23 | 2021-05-29T05:57:53 | null | UTF-8 | Python | false | false | 8,743 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/12/28 16:31
Desc: 股票基本信息
"""
import json
from io import BytesIO
import pandas as pd
import requests
def stock_info_sz_name_code(indicator: str = "B股列表") -> pd.DataFrame:
"""
深圳证券交易所-股票列表
http://www.szse.cn/market/product/stock/list/index.html
:param indicator: choice of {"A股列表", "B股列表", "CDR列表", "AB股列表"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
indicator_map = {"A股列表": "tab1", "B股列表": "tab2", "CDR列表": "tab3", "AB股列表": "tab4"}
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1110",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
if len(temp_df) > 10:
temp_df["A股代码"] = temp_df["A股代码"].astype(str).str.split('.', expand=True).iloc[:, 0].str.zfill(6).str.replace("000nan", "")
return temp_df
else:
return temp_df
def stock_info_sh_name_code(indicator: str = "主板A股") -> pd.DataFrame:
"""
上海证券交易所-股票列表
http://www.sse.com.cn/assortment/stock/list/share/
:param indicator: choice of {"主板A股": "1", "主板B股": "2", "科创板": "8"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"主板A股": "1", "主板B股": "2", "科创板": "8"}
url = "http://query.sse.com.cn/security/stock/getStockListData.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"jsonCallBack": "jsonpCallback66942",
"isPagination": "true",
"stockCode": "",
"csrcCode": "",
"areaName": "",
"stockType": indicator_map[indicator],
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "2000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "11",
"_": "1589881387934",
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):-1])
temp_df = pd.DataFrame(json_data["result"])
return temp_df
def stock_info_sh_delist(indicator: str = "暂停上市公司"):
"""
上海证券交易所-暂停上市公司-终止上市公司
http://www.sse.com.cn/assortment/stock/list/firstissue/
:param indicator: choice of {"终止上市公司": "5", "暂停上市公司": "4"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"终止上市公司": "5", "暂停上市公司": "4"}
url = "http://query.sse.com.cn/security/stock/getStockListData2.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"jsonCallBack": "jsonpCallback66942",
"isPagination": "true",
"stockCode": "",
"csrcCode": "",
"areaName": "",
"stockType": indicator_map[indicator],
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "2000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "11",
"_": "1589881387934",
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):-1])
temp_df = pd.DataFrame(json_data["result"])
return temp_df
def stock_info_sz_delist(indicator: str = "暂停上市公司") -> pd.DataFrame:
"""
深证证券交易所-暂停上市公司-终止上市公司
http://www.szse.cn/market/stock/suspend/index.html
:param indicator: choice of {"暂停上市公司", "终止上市公司"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"暂停上市公司": "tab1", "终止上市公司": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1793_ssgs",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
def stock_info_sz_change_name(indicator: str = "全称变更") -> pd.DataFrame:
"""
深证证券交易所-更名公司
http://www.szse.cn/market/companys/changename/index.html
:param indicator: choice of {"全称变更": "tab1", "简称变更": "tab2"}
:type indicator: str
:return: 全称变更 or 简称变更 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"全称变更": "tab1", "简称变更": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "SSGSGMXX",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
def stock_info_change_name(stock: str = "688588") -> pd.DataFrame:
"""
新浪财经-股票曾用名
http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/300378.phtml
:param stock: 股票代码
:type stock: str
:return: 股票曾用名列表
:rtype: list
"""
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{stock}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[3].iloc[:, :2]
temp_df.dropna(inplace=True)
temp_df.columns = ["item", "value"]
temp_df["item"] = temp_df["item"].str.split(":", expand=True)[0]
try:
name_list = temp_df[temp_df["item"] == "证券简称更名历史"].value.tolist()[0].split(" ")
return name_list
except:
return None
def stock_info_a_code_name() -> pd.DataFrame:
"""
沪深 A 股列表
:return: 沪深 A 股数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
stock_sh = stock_info_sh_name_code(indicator="主板A股")
stock_sh = stock_sh[["SECURITY_CODE_A", "SECURITY_ABBR_A"]]
stock_sh.columns = ["公司代码", "公司简称"]
stock_sz = stock_info_sz_name_code(indicator="A股列表")
stock_sz["A股代码"] = stock_sz["A股代码"].astype(str).str.zfill(6)
big_df = big_df.append(stock_sz[["A股代码", "A股简称"]], ignore_index=True)
big_df.columns = ["公司代码", "公司简称"]
stock_kcb = stock_info_sh_name_code(indicator="科创板")
stock_kcb = stock_kcb[["SECURITY_CODE_A", "SECURITY_ABBR_A"]]
stock_kcb.columns = ["公司代码", "公司简称"]
big_df = big_df.append(stock_sh, ignore_index=True)
big_df = big_df.append(stock_kcb, ignore_index=True)
big_df.columns = ["code", "name"]
return big_df
if __name__ == '__main__':
stock_info_sz_df = stock_info_sz_name_code(indicator="A股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="B股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="AB股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="CDR列表")
print(stock_info_sz_df)
stock_info_sh_delist_df = stock_info_sh_delist(indicator="终止上市公司")
print(stock_info_sh_delist_df)
stock_info_sz_delist_df = stock_info_sz_delist(indicator="终止上市公司")
print(stock_info_sz_delist_df)
stock_info_sz_change_name_df = stock_info_sz_change_name(indicator="全称变更")
print(stock_info_sz_change_name_df)
stock_info_change_name_list = stock_info_change_name(stock="000503")
print(stock_info_change_name_list)
stock_info_a_code_name_df = stock_info_a_code_name()
print(stock_info_a_code_name_df)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
b3889823658d4ea8723d6e2206876dad2817f7e7 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/CommonScripts/Scripts/DomainReputation/DomainReputation.py | 63cc309711c1842d76d42b7efd6895d62bd3645f | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 975 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def domain_reputation():
results = demisto.executeCommand('domain', {'domain': demisto.get(demisto.args(), 'domain')})
for item in results:
if isError(item):
if is_offset_error(item): # call to is_offset_error is a temporary fix to ignore offset 1 error
results.remove(item)
else:
item['Contents'] = item['Brand'] + ' returned an error.\n' + str(item['Contents'])
demisto.results(results)
def is_offset_error(item) -> bool:
'''error msg: 'Offset: 1' will not be displayed to Users
This method is temporary and will be removed
once XSUP-18208 issue is fixed.'''
if item['Contents'] and 'Offset' in item['Contents']:
return True
return False
def main():
domain_reputation()
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
a45f0830634e474ce6dad420e01827de758b3d2b | af10bcb9ee5b61f82081c63dd275134c62baa21e | /dagsearch/env.py | b4e47c0653f6426db23f51b3a387a8033075fc41 | [] | no_license | zbyte64/pytorch-dagsearch | 2ed7ed648d7e361744f129ace10a2c65d9f3036d | c52fe84f52b76bdc6c9d2e74f140bdf3fd119244 | refs/heads/master | 2020-04-14T05:24:31.565200 | 2019-01-23T03:27:06 | 2019-01-23T03:27:06 | 163,659,872 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu' | [
"zbyte64@gmail.com"
] | zbyte64@gmail.com |
f21bc36aca61bad4889e5e3463d4efea8fa18d04 | 23fb7793e9d94e56714b618faacc4e85db8d74f9 | /explore/transform_angles.py | 7c1065a9ff9c1163f802df1697413f0c57cb4d89 | [
"BSD-3-Clause"
] | permissive | SasView/sasmodels | b4b6432c083deeaf77a96d352afbf10c696f4527 | 00fd0242007be2023cf7b4887b33da6247a6adcc | refs/heads/master | 2023-08-30T23:58:16.030202 | 2023-08-15T13:47:45 | 2023-08-15T13:47:45 | 30,761,174 | 17 | 31 | BSD-3-Clause | 2023-09-12T13:27:39 | 2015-02-13T15:04:20 | Python | UTF-8 | Python | false | false | 2,048 | py | #!/usr/bin/env python
"""
Small application to change theta, phi and psi from SasView 3.x models to the
new angle definition in SasView 4.x and above.
Usage: python explore/transform_angles.py theta phi psi
"""
from __future__ import print_function, division
import sys
import numpy as np
from numpy import pi, cos, sin, sqrt, exp, degrees, radians
from scipy.optimize import fmin
# Definition of rotation matrices comes from wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(angle):
"""Construct a matrix to rotate points about *x* by *angle* degrees."""
a = radians(angle)
R = [[1, 0, 0],
[0, +cos(a), -sin(a)],
[0, +sin(a), +cos(a)]]
return np.array(R)
def Ry(angle):
"""Construct a matrix to rotate points about *y* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), 0, +sin(a)],
[0, 1, 0],
[-sin(a), 0, +cos(a)]]
return np.array(R)
def Rz(angle):
"""Construct a matrix to rotate points about *z* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), -sin(a), 0],
[+sin(a), +cos(a), 0],
[0, 0, 1]]
return np.array(R)
def transform_angles(theta, phi, psi, qx=0.1, qy=0.1):
Rold = Rz(-psi)@Rx(theta)@Ry(-(90 - phi))
cost = lambda p: np.linalg.norm(Rz(-p[2])@Ry(-p[0])@Rz(-p[1]) - Rold)
result = fmin(cost, (theta, phi, psi))
theta_p, phi_p, psi_p = result
Rnew = Rz(-psi_p)@Ry(-theta_p)@Rz(-phi_p)
print("old: theta, phi, psi =", ", ".join(str(v) for v in (theta, phi, psi)))
print("new: theta, phi, psi =", ", ".join(str(v) for v in result))
try:
point = np.array([qx, qy, [0]*len(qx)])
except TypeError:
point = np.array([[qx],[qy],[0]])
for p in point.T:
print("q abc old for", p, (Rold@p.T).T)
print("q abc new for", p, (Rnew@p.T).T)
if __name__ == "__main__":
theta, phi, psi = (float(v) for v in sys.argv[1:])
#transform_angles(theta, phi, psi)
transform_angles(theta, phi, psi, qx=-0.017, qy=0.035)
| [
"pkienzle@nist.gov"
] | pkienzle@nist.gov |
3d44f956b37985fb6c1fff55f9c60a82d9c0bde3 | ed1e81a2325d310de7961274a06bfe6cdb7993d0 | /basic-python/xmlcreation.py | f448a461e86acb8db27c96bd5449e8453674cf27 | [] | no_license | fahimkhan/python | ce573298adf30ca8426b74f3ab275ab7f8047a91 | 1733ad39cf214362c8a76f8996740715888d2101 | refs/heads/master | 2021-01-15T15:50:27.323739 | 2016-08-24T11:02:56 | 2016-08-24T11:02:56 | 20,254,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/python
import xml.etree.cElementTree as ET
root = ET.Element("root")
doc = ET.SubElement(root, "doc")
field1 = ET.SubElement(doc, "field1")
field1.set("name", "blah")
field1.text = "some value1"
field2 = ET.SubElement(doc, "field2")
field2.set("name", "asdfasd")
field2.text = "some vlaue2"
tree = ET.ElementTree(root)
tree.write("filename.xml")
| [
"fahim.elex@gmail.com"
] | fahim.elex@gmail.com |
a7a9b9af5f5f6b54cbafe242f0bc5ab853a93079 | 1ebba24841912613f9c70dffee05270c4f1f4adb | /willie/willie/modules/github.py | 5f3e46c6ca0161e15e3072ecb311d7baab7b42af | [
"MIT",
"EFL-2.0"
] | permissive | freifunk-darmstadt/ffda-jarvis | 4953af0cd8629c9b9632806eb0a7440fcf94da57 | 127f3333c837c592177f84b361e3c050e00f2d3f | refs/heads/master | 2020-04-06T06:56:21.472931 | 2017-10-23T23:00:57 | 2017-10-23T23:10:03 | 32,585,430 | 0 | 8 | MIT | 2017-12-20T00:46:26 | 2015-03-20T13:32:00 | Python | UTF-8 | Python | false | false | 8,251 | py | # coding=utf8
"""
github.py - Willie Github Module
Copyright 2012, Dimitri Molenaars http://tyrope.nl/
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net/
"""
from __future__ import unicode_literals
from datetime import datetime
import sys
if sys.version_info.major < 3:
from urllib2 import HTTPError
else:
from urllib.error import HTTPError
import json
from willie import web, tools
from willie.module import commands, rule, NOLIMIT
import os
import re
from willie.logger import get_logger
LOGGER = get_logger(__name__)
issueURL = (r'https?://(?:www\.)?github.com/'
'([A-z0-9\-]+/[A-z0-9\-]+)/'
'(?:issues|pull)/'
'([\d]+)')
regex = re.compile(issueURL)
def checkConfig(bot):
if not bot.config.has_option('github', 'oauth_token') or not bot.config.has_option('github', 'repo'):
return False
else:
return [bot.config.github.oauth_token, bot.config.github.repo]
def configure(config):
"""
| [github] | example | purpose |
| -------- | ------- | ------- |
| oauth_token | 5868e7af57496cc3ae255868e7af57496cc3ae25 | The OAuth token to connect to your github repo |
| repo | embolalia/willie | The GitHub repo you're working from. |
"""
chunk = ''
if config.option('Configuring github issue reporting and searching module', False):
config.interactive_add('github', 'oauth_token', 'Github API Oauth2 token', '')
config.interactive_add('github', 'repo', 'Github repository', 'embolalia/willie')
return chunk
def setup(bot):
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = tools.WillieMemory()
bot.memory['url_callbacks'][regex] = issue_info
def shutdown(bot):
del bot.memory['url_callbacks'][regex]
@commands('makeissue', 'makebug')
def issue(bot, trigger):
"""Create a GitHub issue, also known as a bug report. Syntax: .makeissue Title of the bug report"""
# check input
if not trigger.group(2):
return bot.say('Please title the issue')
# Is the Oauth token and repo available?
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined')
# parse input
now = ' '.join(str(datetime.utcnow()).split(' ')).split('.')[0] + ' UTC'
body = 'Submitted by: %s\nFrom channel: %s\nAt %s' % (trigger.nick, trigger.sender, now)
data = {"title": trigger.group(2), "body": body}
# submit
try:
raw = web.post('https://api.github.com/repos/' + gitAPI[1] + '/issues?access_token=' + gitAPI[0], json.dumps(data))
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
bot.say('Issue #%s posted. %s' % (data['number'], data['html_url']))
LOGGER.warning('Issue #%s created in %s', data['number'], trigger.sender)
@commands('addtrace', 'addtraceback')
def add_traceback(bot, trigger):
"""Add a traceback to a GitHub issue.
This pulls the traceback from the exceptions log file. To use, put .addtrace
followed by the issue number to add the comment to, then the signature of
the error (the message shown to the channel when the error occured). This
command will only work for errors from unhandled exceptions."""
# Make sure the API is set up
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('GitHub module not configured, make sure github.oauth_token and github.repo are defined')
if not trigger.group(2):
bot.say('Please give both the issue number and the error message.')
return
# Make sure the input is valid
args = trigger.group(2).split(None, 1)
if len(args) != 2:
bot.say('Please give both the issue number and the error message.')
return
number, trace = args
# Make sure the given issue number exists
issue_data = web.get('https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], number))
issue_data = json.loads(issue_data)
if 'message' in issue_data and issue_data['message'] == 'Not Found':
return bot.say("That issue doesn't exist.")
# Find the relevant lines from the log file
post = ''
logfile = os.path.join(bot.config.logdir, 'exceptions.log')
with open(logfile) as log:
in_trace = False
for data in log:
if data == 'Signature: ' + trace + '\n':
post = data
in_trace = True
elif data == '----------------------------------------\n':
in_trace = False
elif in_trace:
post += data
# Give an error if we didn't find the traceback
if not post:
return bot.say("I don't remember getting that error. Please post it "
"yourself at https://github.com/%s/issues/%s"
% (gitAPI[1], number))
# Make the comment
try:
raw = web.post('https://api.github.com/repos/' + gitAPI[1] + '/issues/'
+ number + '/comments?access_token=' + gitAPI[0],
json.dumps({'body': '``\n' + post + '``'}))
except OSError: # HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
bot.say('Added traceback to issue #%s. %s' % (number, data['html_url']))
LOGGER.warning('Traceback added to #%s in %s.', number, trigger.sender)
@commands('findissue', 'findbug')
def findIssue(bot, trigger):
"""Search for a GitHub issue by keyword or ID. usage: .findissue search keywords/ID (optional) You can specify the first keyword as "CLOSED" to search closed issues."""
if not trigger.group(2):
return bot.reply('What are you searching for?')
# Is the Oauth token and repo available?
gitAPI = checkConfig(bot)
if not gitAPI:
return bot.say('Git module not configured, make sure github.oauth_token and github.repo are defined')
firstParam = trigger.group(2).split(' ')[0]
if firstParam.isdigit():
URL = 'https://api.github.com/repos/%s/issues/%s' % (gitAPI[1], firstParam)
elif firstParam == 'CLOSED':
if '%20'.join(trigger.group(2).split(' ')[1:]) not in ('', '\x02', '\x03'):
URL = 'https://api.github.com/legacy/issues/search/' + gitAPI[1] + '/closed/' + '%20'.join(trigger.group(2).split(' ')[1:])
else:
return bot.reply('What are you searching for?')
else:
URL = 'https://api.github.com/legacy/issues/search/%s/open/%s' % (gitAPI[1], web.quote(trigger.group(2)))
try:
raw = web.get(URL)
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
try:
if firstParam.isdigit():
data = json.loads(raw)
else:
data = json.loads(raw)['issues'][-1]
except (KeyError, IndexError):
return bot.say('No search results.')
try:
if len(data['body'].split('\n')) > 1:
body = data['body'].split('\n')[0] + '...'
else:
body = data['body'].split('\n')[0]
except (KeyError):
LOGGER.exception('API returned an invalid result on query request %s',
trigger.group(2))
bot.say('Invalid result, please try again later.')
return NOLIMIT
bot.reply('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body))
bot.say(data['html_url'])
@rule('.*%s.*' % issueURL)
def issue_info(bot, trigger, match=None):
match = match or trigger
URL = 'https://api.github.com/repos/%s/issues/%s' % (match.group(1), match.group(2))
try:
raw = web.get(URL)
except HTTPError:
bot.say('The GitHub API returned an error.')
return NOLIMIT
data = json.loads(raw)
try:
if len(data['body'].split('\n')) > 1:
body = data['body'].split('\n')[0] + '...'
else:
body = data['body'].split('\n')[0]
except (KeyError):
bot.say('The API says this is an invalid issue. Please report this if you know it\'s a correct link!')
return NOLIMIT
bot.say('[#%s]\x02title:\x02 %s \x02|\x02 %s' % (data['number'], data['title'], body))
| [
"mweinelt@gmail.com"
] | mweinelt@gmail.com |
2d11dd2f6d92cd3a3ddcd2c943eb1e114be0e46e | 99cba296261b41781f642e98e4427853bf15a6ea | /modules/s3/s3anonymize.py | fc9a4ca0ea58462b93c7577a0c5d7f3b7234fb62 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sahana/SAMBRO | 6f2b06060d5abc05b260337a45a63cca6d7439c9 | afeef1ebb38e9e893a79c5e6824860a6e7ba09d5 | refs/heads/master | 2020-05-23T02:53:42.579167 | 2019-06-12T22:37:05 | 2019-06-12T22:37:05 | 191,917,305 | 1 | 1 | NOASSERTION | 2019-10-20T23:45:43 | 2019-06-14T09:40:54 | Python | UTF-8 | Python | false | false | 19,447 | py | # -*- coding: utf-8 -*-
""" S3 Person Record Anonymizing
@copyright: 2018-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import uuid
from gluon import current, A, BUTTON, DIV, FORM, INPUT, LABEL, P
from s3dal import original_tablename
from .s3rest import S3Method
from .s3query import FS, S3Joins
from .s3validators import JSONERRORS
from .s3utils import s3_str
__all__ = ("S3Anonymize",
"S3AnonymizeWidget",
)
# =============================================================================
class S3Anonymize(S3Method):
""" REST Method to Anonymize Person Records """
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request instance
@param attr: controller parameters
@return: output data (JSON)
"""
output = {}
table, record_id = self.get_target_id()
if not table:
r.error(405, "Anonymizing not configured for resource")
if not record_id:
r.error(400, "No target record specified")
if not self.permitted(table, record_id):
r.unauthorized()
if r.representation == "json":
if r.http == "POST":
output = self.anonymize(r, table, record_id)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(415, current.ERROR.BAD_FORMAT)
# Set Content Type
current.response.headers["Content-Type"] = "application/json"
return output
# -------------------------------------------------------------------------
@classmethod
def anonymize(cls, r, table, record_id):
"""
Handle POST (anonymize-request), i.e. anonymize the target record
@param r: the S3Request
@param table: the target Table
@param record_id: the target record ID
@returns: JSON message
"""
# Read+parse body JSON
s = r.body
s.seek(0)
try:
options = json.load(s)
except JSONERRORS:
options = None
if not isinstance(options, dict):
r.error(400, "Invalid request options")
# Verify submitted action key against session (CSRF protection)
widget_id = "%s-%s-anonymize" % (table, record_id)
session_s3 = current.session.s3
keys = session_s3.anonymize
if keys is None or \
widget_id not in keys or \
options.get("key") != keys[widget_id]:
r.error(400, "Invalid action key (form reopened in another tab?)")
# Get the available rules from settings
rules = current.s3db.get_config(table, "anonymize")
if isinstance(rules, (tuple, list)):
names = set(rule.get("name") for rule in rules)
names.discard(None)
else:
# Single rule
rules["name"] = "default"
names = (rules["name"],)
rules = [rules]
# Get selected rules from options
selected = options.get("apply")
if not isinstance(selected, list):
r.error(400, "Invalid request options")
# Validate selected rules
for name in selected:
if name not in names:
r.error(400, "Invalid rule: %s" % name)
# Merge selected rules
cleanup = {}
cascade = []
for rule in rules:
name = rule.get("name")
if not name or name not in selected:
continue
field_rules = rule.get("fields")
if field_rules:
cleanup.update(field_rules)
cascade_rules = rule.get("cascade")
if cascade_rules:
cascade.extend(cascade_rules)
# Apply selected rules
if cleanup or cascade:
rules = {"fields": cleanup, "cascade": cascade}
# NB will raise (+roll back) if configuration is invalid
cls.cascade(table, (record_id,), rules)
# Audit anonymize
prefix, name = original_tablename(table).split("_", 1)
current.audit("anonymize", prefix, name,
record = record_id,
representation = "html",
)
output = current.xml.json_message(updated=record_id)
else:
output = current.xml.json_message(msg="No applicable rules found")
return output
# -------------------------------------------------------------------------
def get_target_id(self):
"""
Determine the target table and record ID
@return: tuple (table, record_id)
"""
resource = self.resource
rules = resource.get_config("anonymize")
if not rules:
return None, None
return resource.table, self.record_id
# -------------------------------------------------------------------------
@staticmethod
def permitted(table, record_id):
"""
Check permissions to anonymize the target record
@param table: the target Table
@param record_id: the target record ID
@return: True|False
"""
has_permission = current.auth.s3_has_permission
return has_permission("update", table, record_id=record_id) and \
has_permission("delete", table, record_id=record_id)
# -------------------------------------------------------------------------
@classmethod
def cascade(cls, table, record_ids, rules):
"""
Apply cascade of rules to anonymize records
@param table: the Table
@param record_ids: a set of record IDs
@param rules: the rules for this Table
@raises Exception: if the cascade failed due to DB constraints
or invalid rules; callers should roll back
the transaction if an exception is raised
"""
s3db = current.s3db
pkey = table._id.name
cascade = rules.get("cascade")
if cascade:
fieldnames = set(rule.get("match", pkey) for _, rule in cascade)
if pkey not in fieldnames:
fieldnames.add(pkey)
fields = [table[fn] for fn in fieldnames]
db = current.db
rows = db(table._id.belongs(record_ids)).select(*fields)
for tablename, rule in cascade:
lookup = rule.get("lookup")
if lookup:
# Explicit look-up function, call with master table+rows,
# as well as the name of the related table; should return
# a set/tuple/list of record ids in the related table
ids = lookup(table, rows, tablename)
else:
key = rule.get("key")
if not key:
continue
field = rule.get("match", pkey)
match = set(row[field] for row in rows)
# Resolve key and construct query
resource = s3db.resource(tablename, components=[])
rq = FS(key).belongs(match)
query = rq.query(resource)
# Construct necessary joins
joins = S3Joins(tablename)
joins.extend(rq._joins(resource)[0])
joins = joins.as_list()
# Extract the target table IDs
target_rows = db(query).select(resource._id,
join = joins,
)
ids = set(row[resource._id.name] for row in target_rows)
# Recurse into related table
if ids:
cls.cascade(resource.table, ids, rule)
# Apply field rules
field_rules = rules.get("fields")
if field_rules:
cls.apply_field_rules(table, record_ids, field_rules)
# Apply deletion rules
if rules.get("delete"):
resource = s3db.resource(table, id=list(record_ids))
resource.delete(cascade=True)
# -------------------------------------------------------------------------
@staticmethod
def apply_field_rules(table, record_ids, rules):
"""
Apply field rules on a set of records in a table
@param table: the Table
@param record_ids: the record IDs
@param rules: the rules
@raises Exception: if the field rules could not be applied
due to DB constraints or invalid rules;
callers should roll back the transaction
if an exception is raised
"""
fields = [table[fn] for fn in rules if fn in table.fields]
if table._id.name not in rules:
fields.insert(0, table._id)
# Select the records
query = table._id.belongs(record_ids)
rows = current.db(query).select(*fields)
pkey = table._id.name
s3db = current.s3db
update_super = s3db.update_super
onaccept = s3db.onaccept
for row in rows:
data = {}
for fieldname, rule in rules.items():
if fieldname in table.fields:
field = table[fieldname]
else:
continue
if rule == "remove":
# Set to None
if field.notnull:
raise ValueError("Cannot remove %s - must not be NULL" % field)
else:
data[fieldname] = None
elif rule == "reset":
# Reset to the field's default value
default = field.default
if default is None and field.notnull:
raise ValueError("Cannot reset %s - default value None violates notnull-constraint")
data[fieldname] = default
elif callable(rule):
# Callable rule to procude a new value
data[fieldname] = rule(row[pkey], field, row[field])
elif type(rule) is tuple:
method, value = rule
if method == "set":
# Set a fixed value
data[fieldname] = value
if data:
success = row.update_record(**data)
if not success:
raise ValueError("Could not clean %s record" % table)
update_super(table, row)
data[pkey] = row[pkey]
onaccept(table, data, method="update")
# =============================================================================
class S3AnonymizeWidget(object):
""" GUI widget for S3Anonymize """
# -------------------------------------------------------------------------
@classmethod
def widget(cls, r, _class="action-lnk"):
"""
Render an action item (link or button) to anonymize the
target record of an S3Request, which can be embedded in
the record view
@param r: the S3Request
@param _class: HTML class for the action item
@returns: the action item (a HTML helper instance), or an empty
string if no anonymize-rules are configured for the
target table, no target record was specified or the
user is not permitted to anonymize it
"""
T = current.T
default = ""
# Determine target table
if r.component:
resource = r.component
if resource.link and not r.actuate_link():
resource = resource.link
else:
resource = r.resource
table = resource.table
# Determine target record
record_id = S3Anonymize._record_id(r)
if not record_id:
return default
# Check if target is configured for anonymize
rules = resource.get_config("anonymize")
if not rules:
return default
if not isinstance(rules, (tuple, list)):
# Single rule
rules["name"] = "default"
rules = [rules]
# Check permissions to anonymize
if not S3Anonymize.permitted(table, record_id):
return default
# Determine widget ID
widget_id = "%s-%s-anonymize" % (table, record_id)
# Inject script
script_options = {"ajaxURL": r.url(method = "anonymize",
representation = "json",
),
}
cls.inject_script(widget_id, script_options)
# Action button
action_button = A(T("Anonymize"), _class="anonymize-btn")
if _class:
action_button.add_class(_class)
# Dialog and Form
INFO = T("The following information will be deleted from the record")
CONFIRM = T("Are you sure you want to delete the selected details?")
SUCCESS = T("Action successful - please wait...")
form = FORM(P("%s:" % INFO),
cls.selector(rules),
P(CONFIRM),
DIV(INPUT(value = "anonymize_confirm",
_name = "anonymize_confirm",
_type = "checkbox",
),
LABEL(T("Yes, delete the selected details")),
_class = "anonymize-confirm",
),
cls.buttons(),
_class = "anonymize-form",
# Store action key in form
hidden = {"action-key": cls.action_key(widget_id)},
)
dialog = DIV(form,
DIV(P(SUCCESS),
_class = "hide anonymize-success",
),
_class = "anonymize-dialog hide",
_title = T("Anonymize"),
)
# Assemble widget
widget = DIV(action_button,
dialog,
_class="s3-anonymize",
_id = widget_id,
)
return widget
# -------------------------------------------------------------------------
@staticmethod
def action_key(widget_id):
"""
Generate a unique STP token for the widget (CSRF protection) and
store it in session
@param widget_id: the widget ID (which includes the target
table name and record ID)
@return: a unique identifier (as string)
"""
session_s3 = current.session.s3
keys = session_s3.anonymize
if keys is None:
session_s3.anonymize = keys = {}
key = keys[widget_id] = str(uuid.uuid4())
return key
# -------------------------------------------------------------------------
@staticmethod
def selector(rules):
"""
Generate the rule selector for anonymize-form
@param rules: the list of configured rules
@return: the selector (DIV)
"""
T = current.T
selector = DIV(_class="anonymize-select")
for rule in rules:
name = rule.get("name")
if not name:
continue
title = T(rule.get("title", name))
selector.append(DIV(INPUT(value = "on",
_name = s3_str(name),
_type = "checkbox",
_class = "anonymize-rule",
),
LABEL(title),
_class = "anonymize-option",
))
return selector
# -------------------------------------------------------------------------
@staticmethod
def buttons():
"""
Generate the submit/cancel buttons for the anonymize-form
@return: the buttons row (DIV)
"""
T = current.T
return DIV(BUTTON(T("Submit"),
_class = "small alert button anonymize-submit",
_disabled = "disabled",
_type = "button",
),
A(T("Cancel"),
_class = "cancel-form-btn action-lnk anonymize-cancel",
_href = "javascript:void(0)",
),
_class = "anonymize-buttons",
)
# -------------------------------------------------------------------------
@staticmethod
def inject_script(widget_id, options):
"""
Inject the necessary JavaScript for the UI dialog
@param widget_id: the widget ID
@param options: JSON-serializable dict of widget options
"""
request = current.request
s3 = current.response.s3
# Static script
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.anonymize.js" % \
request.application
else:
script = "/%s/static/scripts/S3/s3.ui.anonymize.min.js" % \
request.application
scripts = s3.scripts
if script not in scripts:
scripts.append(script)
# Widget options
opts = {}
if options:
opts.update(options)
# Widget instantiation
script = '''$('#%(widget_id)s').anonymize(%(options)s)''' % \
{"widget_id": widget_id,
"options": json.dumps(opts),
}
jquery_ready = s3.jquery_ready
if script not in jquery_ready:
jquery_ready.append(script)
# END =========================================================================
| [
"dominic@nursix.org"
] | dominic@nursix.org |
ef82b63e0e7dbbd1085825847ec183ac0f11b914 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano2615.py | b9cfb7d4b3e6f3a41db23aa2782f28b088a22c4b | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/E2E949DF-C719-1B48-80C3-156011763C93.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest2615.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
bfae3709907e8c2bfdecf0b16044bc79c317d929 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/8kyu/multiply-the-number/Python/test.py | 65310c35d9c4b0fb2c4cc05b73d6733837d2db71 | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # Python - 2.7.6
Test.describe('Basic Tests')
Test.assert_equals(multiply(10), 250)
Test.assert_equals(multiply(5), 25)
Test.assert_equals(multiply(200), 25000)
Test.assert_equals(multiply(0), 0)
Test.assert_equals(multiply(-2), -10)
| [
"d79523@hotmail.com"
] | d79523@hotmail.com |
dbc68db512ec1e4767fb5aa260cf368e4c11642e | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/swexpert/1216_회문2.py | f7bb26f33e8ca6571a0314bcc3dffae1b790dea9 | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | T = 10
def chk_palindrome(list_to_chk, length):
for i in range(length//2):
if list_to_chk[i] != list_to_chk[-1-i]:
return False
return True
for _ in range(1, T+1):
t = int(input())
a = [list(input()) for _ in range(100)]
found = False
for l in range(100, 0, -1): # 가장 긴 100부터 1칸씩 내려가며 검사
for r in range(100):
if found: break
for s in range(100-l+1):
if found: break
chk_list = a[r][s:s+l] # 가로(각 행) 검사
chk_list2 = [a[x][r] for x in range(s,s+l)] # 세로(각 열) 검사
if chk_palindrome(chk_list, l) or chk_palindrome(chk_list2, l):
found = True
if found: break
print("#{} {}".format(t, l)) | [
"acoustic0419@gmail.com"
] | acoustic0419@gmail.com |
88147435253293e973a357dbd1eb71c9efe2242f | ec95cf11db88a7aad1ae6bf4daf321fc38a3ca43 | /evennia/evennia/evennia/commands/command.py | 17902b36026fe73f8637abf0af816f2171423fb3 | [
"BSD-3-Clause"
] | permissive | castlelorestudios/EvenniaPluginSampleProject | 25d68adf7fe2b716a7e1d317d1aeca568f67fcdd | 095ad7a2fe583033fb7e2070f3f8920a6e88b323 | refs/heads/master | 2022-12-14T08:19:07.572206 | 2019-09-19T01:11:16 | 2019-09-19T01:11:16 | 195,511,481 | 3 | 3 | BSD-3-Clause | 2022-11-22T03:00:00 | 2019-07-06T07:32:23 | Python | UTF-8 | Python | false | false | 17,147 | py | """
The base Command class.
All commands in Evennia inherit from the 'Command' class in this module.
"""
from builtins import range
import re
from django.conf import settings
from evennia.locks.lockhandler import LockHandler
from evennia.utils.utils import is_iter, fill, lazy_property, make_iter
from future.utils import with_metaclass
def _init_command(cls, **kwargs):
"""
Helper command.
Makes sure all data are stored as lowercase and
do checking on all properties that should be in list form.
Sets up locks to be more forgiving. This is used both by the metaclass
and (optionally) at instantiation time.
If kwargs are given, these are set as instance-specific properties
on the command.
"""
for i in range(len(kwargs)):
# used for dynamic creation of commands
key, value = kwargs.popitem()
setattr(cls, key, value)
cls.key = cls.key.lower()
if cls.aliases and not is_iter(cls.aliases):
try:
cls.aliases = [str(alias).strip().lower()
for alias in cls.aliases.split(',')]
except Exception:
cls.aliases = []
cls.aliases = list(set(alias for alias in cls.aliases
if alias and alias != cls.key))
# optimization - a set is much faster to match against than a list
cls._matchset = set([cls.key] + cls.aliases)
# optimization for looping over keys+aliases
cls._keyaliases = tuple(cls._matchset)
# by default we don't save the command between runs
if not hasattr(cls, "save_for_next"):
cls.save_for_next = False
# pre-process locks as defined in class definition
temp = []
if hasattr(cls, 'permissions'):
cls.locks = cls.permissions
if not hasattr(cls, 'locks'):
# default if one forgets to define completely
cls.locks = "cmd:all()"
if "cmd:" not in cls.locks:
cls.locks = "cmd:all();" + cls.locks
for lockstring in cls.locks.split(';'):
if lockstring and ':' not in lockstring:
lockstring = "cmd:%s" % lockstring
temp.append(lockstring)
cls.lock_storage = ";".join(temp)
if hasattr(cls, 'arg_regex') and isinstance(cls.arg_regex, basestring):
cls.arg_regex = re.compile(r"%s" % cls.arg_regex, re.I + re.UNICODE)
if not hasattr(cls, "auto_help"):
cls.auto_help = True
if not hasattr(cls, 'is_exit'):
cls.is_exit = False
if not hasattr(cls, "help_category"):
cls.help_category = "general"
cls.help_category = cls.help_category.lower()
class CommandMeta(type):
"""
The metaclass cleans up all properties on the class
"""
def __init__(cls, *args, **kwargs):
_init_command(cls, **kwargs)
super(CommandMeta, cls).__init__(*args, **kwargs)
# The Command class is the basic unit of an Evennia command; when
# defining new commands, the admin subclass this class and
# define their own parser method to handle the input. The
# advantage of this is inheritage; commands that have similar
# structure can parse the input string the same way, minimizing
# parsing errors.
class Command(with_metaclass(CommandMeta, object)):
"""
Base command
Usage:
command [args]
This is the base command class. Inherit from this
to create new commands.
The cmdhandler makes the following variables available to the
command methods (so you can always assume them to be there):
self.caller - the game object calling the command
self.cmdstring - the command name used to trigger this command (allows
you to know which alias was used, for example)
cmd.args - everything supplied to the command following the cmdstring
(this is usually what is parsed in self.parse())
cmd.cmdset - the cmdset from which this command was matched (useful only
seldomly, notably for help-type commands, to create dynamic
help entries and lists)
cmd.obj - the object on which this command is defined. If a default command,
this is usually the same as caller.
cmd.rawstring - the full raw string input, including any args and no parsing.
The following class properties can/should be defined on your child class:
key - identifier for command (e.g. "look")
aliases - (optional) list of aliases (e.g. ["l", "loo"])
locks - lock string (default is "cmd:all()")
help_category - how to organize this help entry in help system
(default is "General")
auto_help - defaults to True. Allows for turning off auto-help generation
arg_regex - (optional) raw string regex defining how the argument part of
the command should look in order to match for this command
(e.g. must it be a space between cmdname and arg?)
(Note that if auto_help is on, this initial string is also used by the
system to create the help entry for the command, so it's a good idea to
format it similar to this one). This behavior can be changed by
overriding the method 'get_help' of a command: by default, this
method returns cmd.__doc__ (that is, this very docstring, or
the docstring of your command). You can, however, extend or
replace this without disabling auto_help.
"""
# the main way to call this command (e.g. 'look')
key = "command"
# alternative ways to call the command (e.g. 'l', 'glance', 'examine')
aliases = []
# a list of lock definitions on the form
# cmd:[NOT] func(args) [ AND|OR][ NOT] func2(args)
locks = settings.COMMAND_DEFAULT_LOCKS
# used by the help system to group commands in lists.
help_category = settings.COMMAND_DEFAULT_HELP_CATEGORY
# This allows to turn off auto-help entry creation for individual commands.
auto_help = True
# optimization for quickly separating exit-commands from normal commands
is_exit = False
# define the command not only by key but by the regex form of its arguments
arg_regex = settings.COMMAND_DEFAULT_ARG_REGEX
# whether self.msg sends to all sessions of a related account/object (default
# is to only send to the session sending the command).
msg_all_sessions = settings.COMMAND_DEFAULT_MSG_ALL_SESSIONS
# auto-set (by Evennia on command instantiation) are:
# obj - which object this command is defined on
# session - which session is responsible for triggering this command. Only set
# if triggered by an account.
def __init__(self, **kwargs):
"""
The lockhandler works the same as for objects.
optional kwargs will be set as properties on the Command at runtime,
overloading evential same-named class properties.
"""
if kwargs:
_init_command(self, **kwargs)
@lazy_property
def lockhandler(self):
return LockHandler(self)
def __str__(self):
"""
Print the command key
"""
return self.key
def __eq__(self, cmd):
"""
Compare two command instances to each other by matching their
key and aliases.
Args:
cmd (Command or str): Allows for equating both Command
objects and their keys.
Returns:
equal (bool): If the commands are equal or not.
"""
try:
# first assume input is a command (the most common case)
return self._matchset.intersection(cmd._matchset)
except AttributeError:
# probably got a string
return cmd in self._matchset
def __ne__(self, cmd):
"""
The logical negation of __eq__. Since this is one of the most
called methods in Evennia (along with __eq__) we do some
code-duplication here rather than issuing a method-lookup to
__eq__.
"""
try:
return self._matchset.isdisjoint(cmd._matchset)
except AttributeError:
return cmd not in self._matchset
def __contains__(self, query):
"""
This implements searches like 'if query in cmd'. It's a fuzzy
matching used by the help system, returning True if query can
be found as a substring of the commands key or its aliases.
Args:
query (str): query to match against. Should be lower case.
Returns:
result (bool): Fuzzy matching result.
"""
return any(query in keyalias for keyalias in self._keyaliases)
def _optimize(self):
"""
Optimize the key and aliases for lookups.
"""
# optimization - a set is much faster to match against than a list
self._matchset = set([self.key] + self.aliases)
# optimization for looping over keys+aliases
self._keyaliases = tuple(self._matchset)
def set_key(self, new_key):
"""
Update key.
Args:
new_key (str): The new key.
Notes:
This is necessary to use to make sure the optimization
caches are properly updated as well.
"""
self.key = new_key.lower()
self._optimize()
def set_aliases(self, new_aliases):
"""
Replace aliases with new ones.
Args:
new_aliases (str or list): Either a ;-separated string
or a list of aliases. These aliases will replace the
existing ones, if any.
Notes:
This is necessary to use to make sure the optimization
caches are properly updated as well.
"""
if isinstance(new_aliases, basestring):
new_aliases = new_aliases.split(';')
aliases = (str(alias).strip().lower() for alias in make_iter(new_aliases))
self.aliases = list(set(alias for alias in aliases if alias != self.key))
self._optimize()
def match(self, cmdname):
"""
This is called by the system when searching the available commands,
in order to determine if this is the one we wanted. cmdname was
previously extracted from the raw string by the system.
Args:
cmdname (str): Always lowercase when reaching this point.
Returns:
result (bool): Match result.
"""
return cmdname in self._matchset
def access(self, srcobj, access_type="cmd", default=False):
"""
This hook is called by the cmdhandler to determine if srcobj
is allowed to execute this command. It should return a boolean
value and is not normally something that need to be changed since
it's using the Evennia permission system directly.
Args:
srcobj (Object): Object trying to gain permission
access_type (str, optional): The lock type to check.
default (bool, optional): The fallback result if no lock
of matching `access_type` is found on this Command.
"""
return self.lockhandler.check(srcobj, access_type, default=default)
def msg(self, text=None, to_obj=None, from_obj=None,
session=None, **kwargs):
"""
This is a shortcut instead of calling msg() directly on an
object - it will detect if caller is an Object or an Account and
also appends self.session automatically if self.msg_all_sessions is False.
Args:
text (str, optional): Text string of message to send.
to_obj (Object, optional): Target object of message. Defaults to self.caller.
from_obj (Object, optional): Source of message. Defaults to to_obj.
session (Session, optional): Supply data only to a unique
session (ignores the value of `self.msg_all_sessions`).
Kwargs:
options (dict): Options to the protocol.
any (any): All other keywords are interpreted as th
name of send-instructions.
"""
from_obj = from_obj or self.caller
to_obj = to_obj or from_obj
if not session and not self.msg_all_sessions:
if to_obj == self.caller:
session = self.session
else:
session = to_obj.sessions.get()
to_obj.msg(text=text, from_obj=from_obj, session=session, **kwargs)
def execute_cmd(self, raw_string, session=None, obj=None, **kwargs):
"""
A shortcut of execute_cmd on the caller. It appends the
session automatically.
Args:
raw_string (str): Execute this string as a command input.
session (Session, optional): If not given, the current command's Session will be used.
obj (Object or Account, optional): Object or Account on which to call the execute_cmd.
If not given, self.caller will be used.
Kwargs:
Other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
"""
obj = self.caller if obj is None else obj
session = self.session if session is None else session
obj.execute_cmd(raw_string, session=session, **kwargs)
# Common Command hooks
def at_pre_cmd(self):
"""
This hook is called before self.parse() on all commands. If
this hook returns anything but False/None, the command
sequence is aborted.
"""
pass
def at_post_cmd(self):
"""
This hook is called after the command has finished executing
(after self.func()).
"""
pass
def parse(self):
"""
Once the cmdhandler has identified this as the command we
want, this function is run. If many of your commands have a
similar syntax (for example 'cmd arg1 = arg2') you should
simply define this once and just let other commands of the
same form inherit from this. See the docstring of this module
for which object properties are available to use (notably
self.args).
"""
pass
def func(self):
"""
This is the actual executing part of the command. It is
called directly after self.parse(). See the docstring of this
module for which object properties are available (beyond those
set in self.parse())
"""
# a simple test command to show the available properties
string = "-" * 50
string += "\n|w%s|n - Command variables from evennia:\n" % self.key
string += "-" * 50
string += "\nname of cmd (self.key): |w%s|n\n" % self.key
string += "cmd aliases (self.aliases): |w%s|n\n" % self.aliases
string += "cmd locks (self.locks): |w%s|n\n" % self.locks
string += "help category (self.help_category): |w%s|n\n" % self.help_category.capitalize()
string += "object calling (self.caller): |w%s|n\n" % self.caller
string += "object storing cmdset (self.obj): |w%s|n\n" % self.obj
string += "command string given (self.cmdstring): |w%s|n\n" % self.cmdstring
# show cmdset.key instead of cmdset to shorten output
string += fill("current cmdset (self.cmdset): |w%s|n\n" %
(self.cmdset.key if self.cmdset.key else self.cmdset.__class__))
self.caller.msg(string)
def get_extra_info(self, caller, **kwargs):
"""
Display some extra information that may help distinguish this
command from others, for instance, in a disambiguity prompt.
If this command is a potential match in an ambiguous
situation, one distinguishing feature may be its attachment to
a nearby object, so we include this if available.
Args:
caller (TypedObject): The caller who typed an ambiguous
term handed to the search function.
Returns:
A string with identifying information to disambiguate the
object, conventionally with a preceding space.
"""
if hasattr(self, 'obj') and self.obj and self.obj != caller:
return " (%s)" % self.obj.get_display_name(caller).strip()
return ""
def get_help(self, caller, cmdset):
"""
Return the help message for this command and this caller.
By default, return self.__doc__ (the docstring just under
the class definition). You can override this behavior,
though, and even customize it depending on the caller, or other
commands the caller can use.
Args:
caller (Object or Account): the caller asking for help on the command.
cmdset (CmdSet): the command set (if you need additional commands).
Returns:
docstring (str): the help text to provide the caller for this command.
"""
return self.__doc__
class InterruptCommand(Exception):
"""Cleanly interrupt a command."""
pass
| [
"corneelbooysen@hotmail.com"
] | corneelbooysen@hotmail.com |
08506cafbe766926973725265dc18f740b64100d | a9ca402cc2a0757831d355781f388443067bae76 | /swagger_server/controllers/bsdf_material_controller.py | d4c0836bc326159fa12808677a6507173f728866 | [] | no_license | AntoineDao/LadyBugToolsAPIServer | 23a21fbc0a492df35923e33d096be5151adb7f52 | 2c13c96ed3e7c9e44f22875f80c2dd7c10cb2727 | refs/heads/master | 2020-03-26T18:23:51.461620 | 2018-09-10T22:40:47 | 2018-09-10T22:40:47 | 145,210,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import connexion
import six
from swagger_server.models.bsdf_material_schema import BSDFMaterialSchema # noqa: E501
from swagger_server.models.error_model_schema import ErrorModelSchema # noqa: E501
from swagger_server.models.succesfully_created_schema import SuccesfullyCreatedSchema # noqa: E501
from swagger_server import util
def material_bsdf_post(bsdf_material): # noqa: E501
"""Create a new bsdf material object
Adds a new bsdf material object to the database # noqa: E501
:param bsdf_material: a bsdf material object
:type bsdf_material: dict | bytes
:rtype: SuccesfullyCreatedSchema
"""
if connexion.request.is_json:
bsdf_material = BSDFMaterialSchema.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def material_bsdf_uuid_put(uuid, bsdf_material): # noqa: E501
"""Modify an existing bsdf material file
Modifies any parameter (except uuid) of a material file by completely replacing the definition file. A finer grain method can be set up later. # noqa: E501
:param uuid: The unique identifier of the material.
:type uuid: str
:param bsdf_material: a bsdf material object
:type bsdf_material: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
bsdf_material = BSDFMaterialSchema.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
351fe2da8b0c829785ddfedfdeb245bb586d9f7b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/invariance-equivariance_ID2466_for_PyTorch/eval_fewshot.py | 9c4cbe8bfed5b4736a93c1af7c925be9f0e067f0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,704 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
from __future__ import print_function
import argparse
import socket
import time
import os
import mkl
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from models import model_pool
from models.util import create_model
from dataset.mini_imagenet import MetaImageNet
from dataset.tiered_imagenet import MetaTieredImageNet
from dataset.cifar import MetaCIFAR100
from dataset.transform_cfg import transforms_test_options, transforms_list
from eval.meta_eval import meta_test, meta_test_tune
from eval.cls_eval import validate, embedding
from dataloader import get_dataloaders
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
mkl.set_num_threads(2)
def parse_option():
parser = argparse.ArgumentParser('argument for training')
# load pretrained model
parser.add_argument('--model', type=str, default='resnet12', choices=model_pool)
parser.add_argument('--model_path', type=str, default="", help='absolute path to .pth model')
# parser.add_argument('--model_path', type=str, default="/raid/data/IncrementLearn/imagenet/neurips20/model/maml_miniimagenet_test_5shot_step_5_5ways_5shots/pretrain_maml_miniimagenet_test_5shot_step_5_5ways_5shots.pt", help='absolute path to .pth model')
# dataset
parser.add_argument('--dataset', type=str, default='miniImageNet', choices=['miniImageNet', 'tieredImageNet',
'CIFAR-FS', 'FC100', "toy"])
parser.add_argument('--transform', type=str, default='A', choices=transforms_list)
# specify data_root
parser.add_argument('--data_root', type=str, default='/raid/data/IncrementLearn/imagenet/Datasets/MiniImagenet/', help='path to data root')
parser.add_argument('--simclr', type=bool, default=False, help='use simple contrastive learning representation')
# meta setting
parser.add_argument('--n_test_runs', type=int, default=600, metavar='N',
help='Number of test runs')
parser.add_argument('--n_ways', type=int, default=5, metavar='N',
help='Number of classes for doing each classification run')
parser.add_argument('--n_shots', type=int, default=1, metavar='N',
help='Number of shots in test')
parser.add_argument('--n_queries', type=int, default=15, metavar='N',
help='Number of query in test')
parser.add_argument('--n_aug_support_samples', default=5, type=int,
help='The number of augmented samples for each meta test sample')
parser.add_argument('--num_workers', type=int, default=3, metavar='N',
help='Number of workers for dataloader')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size',
help='Size of test batch)')
parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
opt = parser.parse_args()
if opt.dataset == 'CIFAR-FS' or opt.dataset == 'FC100':
opt.transform = 'D'
if 'trainval' in opt.model_path:
opt.use_trainval = True
else:
opt.use_trainval = False
# set the path according to the environment
if not opt.data_root:
opt.data_root = './data/{}'.format(opt.dataset)
else:
if(opt.dataset=="toy"):
opt.data_root = '{}/{}'.format(opt.data_root, "CIFAR-FS")
else:
opt.data_root = '{}/{}'.format(opt.data_root, opt.dataset)
opt.data_aug = True
return opt
def main():
opt = parse_option()
opt.n_test_runs = 600
train_loader, val_loader, meta_testloader, meta_valloader, n_cls, _ = get_dataloaders(opt)
# load model
model = create_model(opt.model, n_cls, opt.dataset)
ckpt = torch.load(opt.model_path)["model"]
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in ckpt.items():
name = k.replace("module.","")
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
# model.load_state_dict(ckpt["model"])
if torch.npu.is_available():
model = model.npu()
cudnn.benchmark = True
start = time.time()
test_acc, test_std = meta_test(model, meta_testloader)
test_time = time.time() - start
print('test_acc: {:.4f}, test_std: {:.4f}, time: {:.1f}'.format(test_acc, test_std, test_time))
start = time.time()
test_acc_feat, test_std_feat = meta_test(model, meta_testloader, use_logit=False)
test_time = time.time() - start
print('test_acc_feat: {:.4f}, test_std: {:.4f}, time: {:.1f}'.format(test_acc_feat, test_std_feat, test_time))
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
309fc39980c3f32d8daa830f99857d0155d49716 | c6b9b9f2fbc6c62e7a86b02718954661af3c564f | /configs/_base_/schedules/schedule_s_short.py | dea71cb530411533af0eec5170b5d1105c0c0d92 | [
"Apache-2.0"
] | permissive | open-mmlab/mmflow | a90ff072805ac79cbc0b277baded1e74d25cccf0 | 9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4 | refs/heads/master | 2023-05-22T05:19:48.986601 | 2023-01-10T16:05:18 | 2023-01-10T16:05:18 | 428,493,460 | 808 | 110 | Apache-2.0 | 2023-09-05T13:19:38 | 2021-11-16T02:42:41 | Python | UTF-8 | Python | false | false | 413 | py | # optimizer
optimizer = dict(
type='Adam', lr=0.0001, weight_decay=0.0004, betas=(0.9, 0.999))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step', by_epoch=False, gamma=0.5, step=[300000, 400000, 500000])
runner = dict(type='IterBasedRunner', max_iters=600000)
checkpoint_config = dict(by_epoch=False, interval=50000)
evaluation = dict(interval=50000, metric='EPE')
| [
"meowzheng@outlook.com"
] | meowzheng@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.