content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
def binPack(i, array, target, dp):
if (i == len(array)):
return(0)
if (i not in dp):
dp[i] = {}
if (target not in dp[i]):
best = binPack(i + 1, array, target, dp)
if (target - array[i] >= 0):
aux = binPack(i + 1, array, target - array[i], dp) + array[i]
if (aux > best):
best = aux
dp[i][target] = best
return(dp[i][target])
items = list(map(int, input().split()))
target = int(input())
positive, negative = [], []
for i in items:
if (i < 0):
negative += [-i]
else:
positive += [i]
if (target == 0):
if (target in items):
print("Yes")
else:
sumMin, Yes = min( sum(positive), sum(negative) ), False
for i in range(1, sumMin + 1):
if (Yes):
break
dp = {}
posAnswer = binPack(0, positive, i, dp)
dp = {}
negAnswer = binPack(0, negative, i, dp)
if (posAnswer == negAnswer):
Yes = True
if (Yes):
print("Yes")
else:
print("No")
elif (target > 0):
sumMin, Yes = sum(positive), False
for i in range(target, sumMin + 1):
if (Yes):
break
dp = {}
posAnswer = binPack(0, positive, i, dp)
dp = {}
negAnswer = binPack(0, negative, i - target, dp)
#print(negAnswer, posAnswer)
total = sum(positive)
if (abs(posAnswer - (total - posAnswer)) == target):
Yes = True
if (Yes):
print("Yes")
else:
print("No")
else:
sumMin, Yes = sum(negative), False
for i in range(-target, sumMin + 1):
if (Yes):
break
dp = {}
negAnswer = binPack(0, negative, i, dp)
dp = {}
posAnswer = binPack(0, positive, i + target, dp)
#print(i, negAnswer, posAnswer)
if (posAnswer - negAnswer == target):
Yes = True
if (Yes):
print("Yes")
else:
print("No")
| def bin_pack(i, array, target, dp):
if i == len(array):
return 0
if i not in dp:
dp[i] = {}
if target not in dp[i]:
best = bin_pack(i + 1, array, target, dp)
if target - array[i] >= 0:
aux = bin_pack(i + 1, array, target - array[i], dp) + array[i]
if aux > best:
best = aux
dp[i][target] = best
return dp[i][target]
items = list(map(int, input().split()))
target = int(input())
(positive, negative) = ([], [])
for i in items:
if i < 0:
negative += [-i]
else:
positive += [i]
if target == 0:
if target in items:
print('Yes')
else:
(sum_min, yes) = (min(sum(positive), sum(negative)), False)
for i in range(1, sumMin + 1):
if Yes:
break
dp = {}
pos_answer = bin_pack(0, positive, i, dp)
dp = {}
neg_answer = bin_pack(0, negative, i, dp)
if posAnswer == negAnswer:
yes = True
if Yes:
print('Yes')
else:
print('No')
elif target > 0:
(sum_min, yes) = (sum(positive), False)
for i in range(target, sumMin + 1):
if Yes:
break
dp = {}
pos_answer = bin_pack(0, positive, i, dp)
dp = {}
neg_answer = bin_pack(0, negative, i - target, dp)
total = sum(positive)
if abs(posAnswer - (total - posAnswer)) == target:
yes = True
if Yes:
print('Yes')
else:
print('No')
else:
(sum_min, yes) = (sum(negative), False)
for i in range(-target, sumMin + 1):
if Yes:
break
dp = {}
neg_answer = bin_pack(0, negative, i, dp)
dp = {}
pos_answer = bin_pack(0, positive, i + target, dp)
if posAnswer - negAnswer == target:
yes = True
if Yes:
print('Yes')
else:
print('No') |
"""
Datos de entrada
capital-->c-->int
Datos de salida
ganancia-->g-->float
"""
#Entradas
c=int(input("Ingrese la cantidad de dinero invertida: "))
#Caja negra
g=(c*0.2)/100
#Salidas
print("Su ganancia mensual es de: ", g) | """
Datos de entrada
capital-->c-->int
Datos de salida
ganancia-->g-->float
"""
c = int(input('Ingrese la cantidad de dinero invertida: '))
g = c * 0.2 / 100
print('Su ganancia mensual es de: ', g) |
"""
0923. 3Sum With Multiplicity
Medium
Given an integer array arr, and an integer target, return the number of tuples i, j, k such that i < j < k and arr[i] + arr[j] + arr[k] == target.
As the answer can be very large, return it modulo 109 + 7.
Example 1:
Input: arr = [1,1,2,2,3,3,4,4,5,5], target = 8
Output: 20
Explanation:
Enumerating by the values (arr[i], arr[j], arr[k]):
(1, 2, 5) occurs 8 times;
(1, 3, 4) occurs 8 times;
(2, 2, 4) occurs 2 times;
(2, 3, 3) occurs 2 times.
Example 2:
Input: arr = [1,1,2,2,2,2], target = 5
Output: 12
Explanation:
arr[i] = 1, arr[j] = arr[k] = 2 occurs 12 times:
We choose one 1 from [1,1] in 2 ways,
and two 2s from [2,2,2,2] in 6 ways.
Constraints:
3 <= arr.length <= 3000
0 <= arr[i] <= 100
0 <= target <= 300
"""
class Solution:
def threeSumMulti(self, arr: List[int], target: int) -> int:
c = collections.Counter(arr)
res = 0
for i, j in itertools.combinations_with_replacement(c, 2):
k = target - i - j
if i == j == k:
res += c[i] * (c[i] - 1) * (c[i] - 2) // 6
elif i == j != k:
res += c[i] * (c[i] - 1) // 2 * c[k]
elif k > i and k > j:
res += c[i] * c[j] * c[k]
return res % (10**9 + 7)
| """
0923. 3Sum With Multiplicity
Medium
Given an integer array arr, and an integer target, return the number of tuples i, j, k such that i < j < k and arr[i] + arr[j] + arr[k] == target.
As the answer can be very large, return it modulo 109 + 7.
Example 1:
Input: arr = [1,1,2,2,3,3,4,4,5,5], target = 8
Output: 20
Explanation:
Enumerating by the values (arr[i], arr[j], arr[k]):
(1, 2, 5) occurs 8 times;
(1, 3, 4) occurs 8 times;
(2, 2, 4) occurs 2 times;
(2, 3, 3) occurs 2 times.
Example 2:
Input: arr = [1,1,2,2,2,2], target = 5
Output: 12
Explanation:
arr[i] = 1, arr[j] = arr[k] = 2 occurs 12 times:
We choose one 1 from [1,1] in 2 ways,
and two 2s from [2,2,2,2] in 6 ways.
Constraints:
3 <= arr.length <= 3000
0 <= arr[i] <= 100
0 <= target <= 300
"""
class Solution:
def three_sum_multi(self, arr: List[int], target: int) -> int:
c = collections.Counter(arr)
res = 0
for (i, j) in itertools.combinations_with_replacement(c, 2):
k = target - i - j
if i == j == k:
res += c[i] * (c[i] - 1) * (c[i] - 2) // 6
elif i == j != k:
res += c[i] * (c[i] - 1) // 2 * c[k]
elif k > i and k > j:
res += c[i] * c[j] * c[k]
return res % (10 ** 9 + 7) |
if __name__ == '__main__':
Str1 = '14:59~15:20'
StrList = Str1.split('~')
print(StrList)
print(StrList[0])
print(StrList[1])
| if __name__ == '__main__':
str1 = '14:59~15:20'
str_list = Str1.split('~')
print(StrList)
print(StrList[0])
print(StrList[1]) |
# Copyright (C) 2011 MetaBrainz Foundation
# Distributed under the MIT license, see the LICENSE file for details.
__version__ = "0.1.0"
| __version__ = '0.1.0' |
#App
HOST = '0.0.0.0'
PORT = 6000
DEBUG = False
#AWS
BUCKET = 'fastermlpipeline'
ACCESS_KEY = 'sorry_itsasecret'
SECRET_KEY = 'sure_itsasecret'
#Extract Data
URL_DATA = 'http://api:5000/credits'
#Preprocessors
NUMERICAL_FEATURES = ['age', 'job', 'credit_amount' ,'duration']
CATEGORICAL_FEATURES = ['sex', 'housing', 'saving_accounts', 'checking_account', 'purpose']
DROP_FEATURES = ['id'] | host = '0.0.0.0'
port = 6000
debug = False
bucket = 'fastermlpipeline'
access_key = 'sorry_itsasecret'
secret_key = 'sure_itsasecret'
url_data = 'http://api:5000/credits'
numerical_features = ['age', 'job', 'credit_amount', 'duration']
categorical_features = ['sex', 'housing', 'saving_accounts', 'checking_account', 'purpose']
drop_features = ['id'] |
""" Return nth element from last node
input: A -> B -> C -> D
output: B
"""
class Node:
""" Node class contains everything related to Linked List node """
def __init__(self, data):
""" initializing single node with data """
self.data = data
self.next = None
class LinkedList:
""" Singly Linked List is a linear data structure """
def __init__(self):
""" initializing singly linked list with zero node """
self.head = None
def insert_head(self, data):
""" inserts node at the start of linked list """
node = Node(data)
node.next = self.head
self.head = node
def print(self):
""" prints entire linked list without changing underlying data """
current = self.head
while current is not None:
print(" ->", current.data, end="")
current = current.next
print()
def nth_to_last_node(self, position):
""" returns nth to last node """
fast_ptr = self.head
slow_ptr = self.head
while position > 0:
slow_ptr = slow_ptr.next
position -= 1
while slow_ptr is not None:
slow_ptr = slow_ptr.next
fast_ptr = fast_ptr.next
return fast_ptr
def main():
""" operational function """
linkedlist = LinkedList()
linkedlist.insert_head('D')
linkedlist.insert_head('C')
linkedlist.insert_head('B')
linkedlist.insert_head('A')
linkedlist.print()
result = linkedlist.nth_to_last_node(3)
print(result.data)
if __name__ == '__main__':
main()
| """ Return nth element from last node
input: A -> B -> C -> D
output: B
"""
class Node:
""" Node class contains everything related to Linked List node """
def __init__(self, data):
""" initializing single node with data """
self.data = data
self.next = None
class Linkedlist:
""" Singly Linked List is a linear data structure """
def __init__(self):
""" initializing singly linked list with zero node """
self.head = None
def insert_head(self, data):
""" inserts node at the start of linked list """
node = node(data)
node.next = self.head
self.head = node
def print(self):
""" prints entire linked list without changing underlying data """
current = self.head
while current is not None:
print(' ->', current.data, end='')
current = current.next
print()
def nth_to_last_node(self, position):
""" returns nth to last node """
fast_ptr = self.head
slow_ptr = self.head
while position > 0:
slow_ptr = slow_ptr.next
position -= 1
while slow_ptr is not None:
slow_ptr = slow_ptr.next
fast_ptr = fast_ptr.next
return fast_ptr
def main():
""" operational function """
linkedlist = linked_list()
linkedlist.insert_head('D')
linkedlist.insert_head('C')
linkedlist.insert_head('B')
linkedlist.insert_head('A')
linkedlist.print()
result = linkedlist.nth_to_last_node(3)
print(result.data)
if __name__ == '__main__':
main() |
lookup_table = {
"TRI_FACILITY_NPDES": {
"ASGN_NPDES_IND": "Indicates that the associated NPDES_NUM represents the principal NPDES permit number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"NPDES_NUM": "The permit number of a specific discharge to a water body under the National Pollutant Discharge Elimination System (NPDES) of the Clean Water Act (CWA). Not all facilities will have a NPDES permit number. A facility may have multiple NPDES permit numbers. The NPDES permit number may not pertain to the toxic chemical reported to TRI."
},
"TRI_OFF_SITE_TRANSFER_LOCATION": {
"PROVINCE": "The province of the location to which the toxic chemical in wastes is transferred. A facility may transfer toxic chemicals in waste to off-site locations that are outside of the United States. The province field gives a facility the flexibility needed to enter a correct off-site location address that is outside the United States.",
"TRANSFER_LOC_NUM": "The sequence in which an off-site transfer is reported on a Form R submission.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"CONTROLLED_LOC": "Indicator that shows whether the off-site location to which toxic chemicals are transferred in wastes is owned or controlled by the facility or the parent company. Values: 1 = 'Yes', 0 = 'No', 2 = blank or not entered.",
"OFF_SITE_STREET_ADDRESS": "The street address for the physical location of the entity receiving the toxic chemical.",
"COUNTRY_CODE": "The country code where the entity receiving the toxic chemical is located.",
"COUNTY_NAME": "The standardized name of the county where the facility is located.",
"CITY_NAME": "The city where the facility or establishment is physically located.",
"OFF_SITE_NAME": "The name of the entity receiving the toxic chemical.",
"RCRA_NUM": "The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number.",
"STATE_ABBR": "The state abbreviation where the facility or establishment is physically located.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility."
},
"TRI_TRANSFER_QTY": {
"TRANSFER_LOC_NUM": "The sequence in which an off-site transfer is reported on a Form R submission.",
"TRANSFER_BASIS_EST_CODE": "The code representing the technique used to develop the estimate of the release amount reported in the 'Total Transfers' box (TOTAL_TRANSFER). The values are as follows:",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"TOTAL_TRANSFER": "The total amount (in pounds) of the toxic chemical transferred from the facility to Publicly Owned Treatment Works (POTW) or to an off-site location (non-POTW) during the calendar year (January 1 - December 31). POTW refers to a municipal sewage treatment plant. The most common transfers will be conveyances of the toxic chemical in facility wastewater through underground sewage pipes, however, trucked or other direct shipments to a POTW are also included in this estimate.",
"TRANSFER_RANGE_CODE": "Code that corresponds to the amount of toxic chemical released annually by the reporting facility, reported as a range for releases less than 1,000 pounds. When a facility uses a range code, the amount reported to TRI is the midpoint of the range. On Form R, letter codes are used to represent ranges: A = 1-10 pounds, B = 11-499 pounds, and C = 500-999 pounds. The letters are converted to numbers for storage in the TRIS database where '1' represents range 'A', '3' represents range 'B', and'4' represents range 'C'. The historical value '2' = 1-499 pounds.",
"OFF_SITE_AMOUNT_SEQUENCE": "Sequence in which an off-site transfer amount is reported on a submission.",
"TRANSFER_EST_NA": "Indicates that 'NA' (Not Applicable) was entered on Form R when a facility does not discharge wastewater containing the toxic chemical to Publicly Owned Treatment Works (Section 6.1.B_) or in wastes to other off-site facilities (section 6.2_). Values: 1 = 'Yes', 0 = 'No'.",
"TYPE_OF_WASTE_MANAGEMENT": "The type of waste treatment, disposal, recycling, or energy recovery methods the off-site location uses to manage the toxic chemical. A two-digit code is used to indicate the type of waste management activity employed. This refers to the ultimate disposition of the toxic chemical, not the intermediate activities used for the waste stream. (In Envirofacts, the code 'P91' indicates a transfer to a POTW. All other codes refer to off-site transfers.)"
},
"TRI_SOURCE_REDUCT_METHOD": {
"SOURCE_REDUCT_METHOD_1": "Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.",
"SOURCE_REDUCT_METHOD_2": "Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.",
"SOURCE_REDUCT_METHOD_3": "Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"SOURCE_REDUCT_ACTIVITY": "Indicates the type of source reduction activity implemented at the facility during the reporting year. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a source reduction activity would include a spill and leak prevention program such as the installation of a vapor recovery system.",
"REDUCTION_SEQUENCE_NUM": "Sequence in which a source reduction method is reported on a submission."
},
"TRI_POTW_LOCATION": {
"POTW_NAME": "The name of the publicly owned treatment works (POTW) receiving the toxic chemical.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"POTW_STREET_ADDRESS": "The street address for the physical location of the publicly owned treatment works (POTW) receiving the toxic chemical.",
"STATE_ABBR": "The state abbreviation where the facility or establishment is physically located.",
"COUNTY_NAME": "The standardized name of the county where the facility is located.",
"CITY_NAME": "The city where the facility or establishment is physically located.",
"POTW_LOC_NUM": "The sequence in which an POTW transfer is reported on a Form R submission.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility."
},
"TRI_TABLE_ID_NAME": {
"TABLE_ID": "A designation for a related group of permissible values. The name that identifies this group is located in TRI_TABLE_ID_NAME.",
"TABLE_NAME": "The table description for the TRI_CODE_DESC.TABLE_ID ."
},
"TRI_CODE_DESC": {
"DESCRIPT": "The text description of a permissible value contained in CODE.",
"CODE": "The permissible values for a column.",
"TABLE_ID": "A designation for a related group of permissible values. The name that identifies this group is located in TRI_TABLE_ID_NAME."
},
"TRI_FACILITY_NPDES_HISTORY": {
"ASGN_NPDES_IND": "Indicates that the associated NPDES_NUM represents the principal NPDES permit number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"NPDES_NUM": "The permit number of a specific discharge to a water body under the National Pollutant Discharge Elimination System (NPDES) of the Clean Water Act (CWA). Not all facilities will have a NPDES permit number. A facility may have multiple NPDES permit numbers. The NPDES permit number may not pertain to the toxic chemical reported to TRI.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste."
},
"TRI_ZIP_CODE": {
"TRI_CENTROID_LAT": "The assigned centroid latitude based on zip code.",
"REGION": "The EPA region in which the facility is located.",
"CITY_NAME": "The city where the facility or establishment is physically located.",
"STATE_ABBR": "The state abbreviation where the facility or establishment is physically located.",
"TRI_CENTROID_LONG": "The assigned centroid longitude based on zip code.",
"COUNTRY_NAME": "The country where the facility is located, if outside the United States.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility."
},
"TRI_FACILITY_RCRA_HISTORY": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_RCRA_IND": "Indicates that the associated RCRA_NUM represents the principal RCRA Identification Number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.",
"RCRA_NUM": "The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number."
},
"TRI_REPORTING_FORM": {
"PRODUCTION_RATIO_NA": "Indicator that shows whether 'NA' was entered in Section 8.9, Production Ratio or Activity Index (PRODUCTION_RATIO). Values: 1 = 'Yes', 0 = 'No'.",
"PUBLIC_CONTACT_PHONE": "The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON).",
"FEDERAL_FAC_IND": "Indicates whether the 'Federal' box was checked on the submission. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: 1 = box checked, 0 = box not checked.",
"TRADE_SECRET_IND": "Indicator that shows whether the identity of the toxic chemical has been claimed a trade secret. If the facility has indicated that the chemical name is a trade secret, the chemical name will not be released to the public. Values: 1 = 'Trade Secret' box checked, 0 = 'Trade Secret' box not checked.",
"MAX_AMOUNT_OF_CHEM": "The two digit code indicating a range for the maximum amount of the chemical present at the facility at any one time during the calendar year (January 1 - December 31) for which the report was submitted.",
"CERTIF_NAME": "The name of the owner, operator, or senior management official who is certifying that the information provided is true and complete and that the values reported are accurate based on reasonable estimates. This individual has management responsibility for the person or persons completing the report.",
"CERTIF_OFFICIAL_TITLE": "The title of the owner, operator, or senior management official who is certifying that the information provided is true and complete and that the values reported are accurate based on reasonable estimates. This individual has management responsibility for the person or persons completing the report.",
"ONE_TIME_RELEASE_QTY": "The total amount (in pounds) of the toxic chemical released directly to the environment or sent offsite for recycling, energy recovery, treatment, or disposal during the reporting year due to remedial actions, catastrophic events such as earthquakes or floods, and one-time events not associated with normal or routine production processes. These amounts are not included in the amounts reported in sections 8.1-8.7 (TRI_SOURCE_REDUCTION_QTY).",
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ORIG_RECEIVED": "The original received date for a submission for this chemical from this facility and this reporting year.",
"REVISION_NA": "Indicator that shows whether the submission 'Revision' box on form R was checked by the submitter. Values: 1 = box checked, 0 = box not checked.",
"PUBLIC_CONTACT_PERSON": "The name of the individual who may be contacted by the general public with questions regarding the information reported to TRI on this chemical. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.",
"ENTIRE_FAC": "Indicates that only one Form R was filed for this chemical for the entire facility. Values: 1 = Form R 'Entire' box check, 0 = box not checked.",
"ACTIVE_STATUS": "Indicates the status of the submitted Form R. Value: 1 = 'Active submission'.",
"POSTMARK_DATE": "The most recent postmark date for a submission for this chemical from this facility and this reporting year . The date may represent a revised submission or be the same as the ORIG_POSTMARK.",
"DIOXIN_DISTRIBUTION_2": "Indicates the distribution (percentage) of 1,2,3,4,7,8,9-Heptachlorodibenzofuran (CAS Number: 55673-89-7) in the reported dioxin or dioxin-like compounds.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.",
"RECEIVED_DATE": "The date the submission was received at the EPCRA Reporting Center.",
"DIOXIN_DISTRIBUTION_14": "Indicates the distribution (percentage) of 2,3,4,7,8-Pentachlorodibenzofuran (CAS Number: 57117-31-4) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_15": "Indicates the distribution (percentage) of 1,2,3,7,8-Pentachlorodibenzo- p-dioxin (CAS Number: 40321-76-4) in the reported dioxin or dioxin-like compounds.",
"ORIG_POSTMARK": "The original postmark date for a submission for this chemical from this facility and this reporting year.",
"DIOXIN_DISTRIBUTION_17": "Indicates the distribution (percentage) of 2,3,7,8-Tetrachlorodibenzo- p-dioxin (CAS Number: 01746-01-6) in the reported dioxin or dioxin-like compounds.",
"CERTIF_SIGNATURE": "Indicator for the signature of the individual who is certifying that the information being provided in the form is true and complete and that the values reported are accurate based on reasonable estimates.",
"DIOXIN_DISTRIBUTION_11": "Indicates the distribution (percentage) of 1,2,3,4,6,7,8,9-Octachlorodibenzofuran (CAS Number: 39001-02-0) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_12": "Indicates the distribution (percentage) of 1,2,3,4,6,7,8,9-Octachlorodibenzo- p-dioxin (CAS Number: 03268-87-9) in the reported dioxin or dioxin-like compounds.",
"ADDITIONAL_DATA_IND": "For reporting years beginning in 1991, the indicator that shows whether additional optional information on source reduction, pollution control, or recycling activities implemented during the reporting year or prior years has been attached to the submission. For reporting years 1987 through 1990, the indicator shows whether waste minimization data was reported on Form R and has since been archived. Values: 1 = 'Yes', 0 = 'No'', 2 = blank or not entered.",
"CAS_CHEM_NAME": "The official name of the toxic chemical, toxic chemical mixture, (e.g., xylene mixed isomers), or chemical category as it appears on the EPCRA Section 313 list. ) or 2.1 . This space will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 (MIXTURE_NAME) or 2.1 (GENERIC_CHEM_NAME).",
"DIOXIN_DISTRIBUTION_8": "Indicates the distribution (percentage) of 1,2,3,6,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 57653-85-7) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_9": "Indicates the distribution (percentage) of 1,2,3,7,8,9-Hexachlorodibenzo- p-dioxin (CAS Number: 19408-74-3) in the reported dioxin or dioxin-like compounds.",
"GENERIC_CHEM_NAME": "The generic, structurally descriptive term used in place of the toxic chemical name when a trade secret was claimed for the toxic chemical. The name must appear on both sanitized and unsanitized Form Rs and be the same as that used on the substantiation form. Section 1.3 will be 'NA' or blank if information is provided in Sections 1.1 (TRI_CHEM_ID) and 1.2 (CAS_CHEM_NAME), or 2.1 (MIXTURE_NAME). Note: Only Sanitized Trade Secret submissions are stored in the TRIS database.",
"DIOXIN_DISTRIBUTION_3": "Indicates the distribution (percentage) of 1,2,3,4,7,8-Hexachlorodibenzofuran (CAS Number: 70648-26-9) in the reported dioxin or dioxin-like compounds.",
"MIXTURE_NAME": "The generic term used in place of the toxic chemical name when a trade secret was claimed for the toxic chemical by the supplier of the toxic chemical. This is generally used when the supplier of a chemical formulation wishes to keep the identity of a particular ingredient in the formulation a secret. It is only used when the supplier, not the reporting facility, is claiming the trade secret. If the reporting facility is claiming a trade secret for the toxic chemical, the generic name is provided in Section 1.3 (GENERIC_CHEM_NAME) and this section (MIXTURE_NAME) is left blank. This space will also be left blank if a trade secret is not being claimed for the toxic chemical.",
"DIOXIN_DISTRIBUTION_1": "Indicates the distribution (percentage) of 1,2,3,4,6,7,8-Heptachlorodibenzofuran (CAS Number: 67562-39-4) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_6": "Indicates the distribution (percentage) of 2,3,4,6,7,8-Hexachlorodibenzofuran (CAS Number: 60851-34-5) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_7": "Indicates the distribution (percentage) of 1,2,3,4,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 39227-28-6) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_4": "Indicates the distribution (percentage) of 1,2,3,6,7,8-Hextachlorodibenzofuran (CAS Number: 57117-44-9) in the reported dioxin or dioxin-like compounds.",
"DIOXIN_DISTRIBUTION_5": "Indicates the distribution (percentage) of 1,2,3,7,8,9-Hexachlorodibenzofuran (CAS Number: 72918-21-9) in the reported dioxin or dioxin-like compounds.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"PUBLIC_CONTACT_EMAIL": "The Email address of the PUBLIC_CONTACT_PERSON.",
"PARTIAL_FAC": "Indicates that the facility has chosen to report by establishment or groups of establishments. Therefore, there may be other reports filed for this chemical by other establishments of the facility. Values: 1 = Form R 'Partial' box checked, 0 = box not checked.",
"REVISION_CODE": "Facilities that filed a Form R and/or a Form A Certification Statement under EPCRA section 313 may submit a request to the revise the data. The REVISION_CODE is a code indicating the current form is a revision of a previous form and the reason it was revised. Added in reporting year 2007, the data element can have the following values:",
"DIOXIN_DISTRIBUTION_16": "Indicates the distribution (percentage) of 2,3,7,8-Tetrachlorodibenzofuran (CAS Number: 51207-31-9) in the reported dioxin or dioxin-like compounds.",
"ONE_TIME_RELEASE_QTY_NA": "Indicator that shows whether 'NA' was entered in Section 8.8, Quantity Released to the Environment as Result of Remedial Actions, Catastrophic Events, or One-Time Events Not Associated with Production Process (ONE_TIME_RELEASE_QTY). Values: 1 = 'Yes', 0 = 'No'.",
"CERTIF_DATE_SIGNED": "The date that the senior management official signed the certification statement.",
"DIOXIN_DISTRIBUTION_NA": "Indicates whether 'NA' (Not Applicable) was entered on the Form R for the Distribution of Each Member of the Dioxin and Dioxin-like Compounds Category. The Form R asks facilities to report a distribution of chemicals included in the Dioxin and Dioxin-like compounds category. There are 17 individual chemicals listed in the Dioxin and Dioxin-like compounds category. A value of '1' for this variable indicates that the facility did not have the speciation (distribution) information available.",
"DIOXIN_DISTRIBUTION_10": "Indicates the distribution (percentage) of 1,2,3,4,6,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 35822-46-9) in the reported dioxin or dioxin-like compounds.",
"FORM_TYPE_IND": "Indicates the type of form received. Values: L = Form R, S = Form A.",
"GOCO_FLAG": "Indicates whether the 'GOCO' box was checked on the submission. A GOCO facility is a Government-Owned, Contractor-Operated facility. Values: 1= box checked, 0= box not checked.",
"TRI_CHEM_ID": "The number assigned to chemicals regulated under Section 313 of the Emergency Planning and Community Right-to-Know Act (EPCRA). For most toxic chemicals or mixture of chemicals (e.g., xylene mixed isomers), the TRI_CHEM_ID is the Chemical Abstract Service Registry (CAS) number. A given listed toxic chemical or mixture may be known by many names but it will have only one CAS number. For example, methyl ethyl ketone and 2-butanone are synonyms for the same toxic chemical and thus have only one CAS number (78-93-3). For categories of chemicals for which CAS Registry numbers have not been assigned, a four-character category code, asssigned by TRI, is included in TRI_CHEM_ID. Form R section 1.1 will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 or 2.1.",
"SANITIZED_IND": "Indicator that shows whether the submission 'Sanitized Trade Secret' box was checked by the submitter. Note: Only Sanitized Trade Secret submissions are stored in the TRIS database. Values: 1 = box checked, 0 = box not checked.",
"DIOXIN_DISTRIBUTION_13": "Indicates the distribution (percentage) of 1,2,3,7,8-Pentachlorodibenzofuran (CAS Number: 57117-41-6) in the reported dioxin or dioxin-like compounds.",
"PRODUCTION_RATIO": "Indicates the level of increase or decrease from the previous year, of the production process or other activity in which the toxic chemical is used. This number is usually around 1.0. For example, a production ratio or activity index of 1.5 would indicate that production associated with the use of the toxic chemical has increased by about 50 percent. Conversely, a production ratio or activity index of 0.3 would indicate that production associated with the use of the toxic chemical has decreased by about 70 percent."
},
"TRI_FACILITY_UIC_HISTORY": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_UIC_IND": "Indicates that the associated UIC_NUM represents the principal underground injection code identification number (UIC ID) as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"UIC_NUM": "The unique number assigned to a specific underground injection well under the Safe Drinking Water Act (SDWA). A facility with multiple injection wells will have multiple underground injection code identification number (UIC ID) Numbers. If the facility does not have an underground injection well regulated by the SDWA, it will not have a UIC ID number.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste."
},
"TRI_CHEM_INFO": {
"CAAC_IND": "Indicates whether the chemical is reportable under the Clean Air Act. Values: 1 = 'Yes', 0 = 'No'.",
"CARC_IND": "Indicates whether the chemical is reportable as a carcinogen under the CARC. Values: 1 = 'Yes', 0 = 'No'.",
"UNIT_OF_MEASURE": "Indicates the unit of measure used to quantify the chemical. Values: {Pounds, Grams}",
"CLASSIFICATION": "Indicates the classification of the chemical. Chemicals can be classified as either a Dioxin or Dioxin-like compounds, a PBT (Persistent, Bioaccumulative and Toxic) chemical or a general EPCRA Section 313 chemical. Values: 0=TRI, 1=PBT, 2=Dioxin",
"FEDS_IND": "Indicates whether the chemical is a non-Section 313 chemical submitted by a federal facility under Executive Order 12856. Values: 1 = 'Yes', 0 = 'No'.",
"METAL_IND": "Indicates whether the chemical is a metal or metal compound. Values: 1 = 'Yes', 0 = 'No'.",
"NO_DECIMALS": "Indicates the maximum number of decimals that can be used to quantify a chemical. This measurement applies to release, transfer and source reduction quantities. PBT (Persistent, Bioaccumulative and Toxic) chemicals, including Dioxins and Dioxin-like Compounds, can be quantified using numbers to the right of the decimal point. The measurement expresses the maximum number of positions to the right of the decimal point that a PBT chemical can be expressed in. All other Non-PBT chemicals are reported as whole numbers.",
"R3350_IND": "Indicates whether the chemical is reportable under Regulation 3350. Values: 1 = 'Yes', 0 = 'No'.",
"TRI_CHEM_ID": "The number assigned to chemicals regulated under Section 313 of the Emergency Planning and Community Right-to-Know Act (EPCRA). For most toxic chemicals or mixture of chemicals (e.g., xylene mixed isomers), the TRI_CHEM_ID is the Chemical Abstract Service Registry (CAS) number. A given listed toxic chemical or mixture may be known by many names but it will have only one CAS number. For example, methyl ethyl ketone and 2-butanone are synonyms for the same toxic chemical and thus have only one CAS number (78-93-3). For categories of chemicals for which CAS Registry numbers have not been assigned, a four-character category code, asssigned by TRI, is included in TRI_CHEM_ID. Form R section 1.1 will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 or 2.1.",
"ACTIVE_DATE": "First year that this chemical must be reported to TRI.",
"INACTIVE_DATE": "Final year that this chemical must be reported to TRI.",
"PBT_END_YEAR": "Indicates the year that a PBT (Persistent, Bioaccumulative and Toxic) chemical was dropped as an EPCRA Section 313 PBT Chemical, Toxics Release Inventory.",
"PBT_START_YEAR": "Indicates the year that a PBT (Persistent, Bioaccumulative and Toxic) chemical was designated as an EPCRA Section 313 PBT Chemical, Toxics Release Inventory.",
"CHEM_NAME": "The official name of the toxic chemical, toxic chemical mixture, (e.g., xylene mixed isomers), or chemical category as it appears on the EPCRA Section 313 list."
},
"TRI_FACILITY_UIC": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_UIC_IND": "Indicates that the associated UIC_NUM represents the principal underground injection code identification number (UIC ID) as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"UIC_NUM": "The unique number assigned to a specific underground injection well under the Safe Drinking Water Act (SDWA). A facility with multiple injection wells will have multiple underground injection code identification number (UIC ID) Numbers. If the facility does not have an underground injection well regulated by the SDWA, it will not have a UIC ID number."
},
"TRI_FACILITY": {
"PREF_DESC_CATEGORY": "The EPA's preferred geographic coordinate description category. Describes the category of feature referenced by the latitude and longitude.",
"ASGN_PARTIAL_IND": "Indicates that the facility reports by establishment or groups of establishments as assigned by TRI from Form R submisions. Partial facilities may have more than one submission for the same chemical in one reporting year. Values: 0 = 'Entire facility', 1 = 'Partial facility'.",
"FACILITY_NAME": "The name of the facility or establishment for which the form was submitted. For purposes of TRI a \"facility\" is generally considered to be all buildings and equipment owned or operated by a company on a single piece of property. The facility may be only one building in an industrial park or it may be a large complex covering many acres. At some larger facilities there may be several different businesses that are all run by the same company. These different businesses are referred to as \"establishments.\" Generally, a company will submit one Form R for the entire facility. A facility may choose, however, to submit a Form R for each establishment separately. The name in this section will either be the name used for the entire facility or the name of the specific establishment, depending on how the facility chooses to report.",
"STATE_COUNTY_FIPS_CODE": "Combination of the two-letter state abbreviation and the county code.",
"MAIL_STATE_ABBR": "The state abbreviation the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the State box.",
"MAIL_ZIP_CODE": "The zip code the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the Zip Code box.",
"CITY_NAME": "The city where the facility or establishment is physically located.",
"MAIL_COUNTRY": "The country the facility or establishment uses to receive mail.",
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"PREF_HORIZONTAL_DATUM": "The EPA's preferred geographic coordinate horizontal datum. Reference datum of the latitude and longitude.",
"FAC_CLOSED_IND": "A flag that indicates whether a facility is open (value =' 0'), closed (value = '1'), or inactive for TRI (value = '2').",
"FAC_LONGITUDE": "The series of numbers which identifies the exact physical location of the facility as a measure of the arc or portion of the earth's equator between the meridian of the center of the facility and the prime meridian. The right-justified value is stored as degrees, minutes and seconds (0DDDMMSS). Tenths of seconds are not stored. The value is negative for locations in the Western hemisphere.",
"MAIL_STREET_ADDRESS": "The address the facility or establishment uses for receiving mail. Form R instructs the submitter to enter the address used for mail only if different than in the Street box. The TRIS database stores the address from the Street box (STREET_ADDRESS) in MAILING_STREET_ADDRESS even when the facility Mailing address is not different.",
"STATE_ABBR": "The state abbreviation where the facility or establishment is physically located.",
"COUNTY_NAME": "The standardized name of the county where the facility is located.",
"FAC_LATITUDE": "The series of numbers that identifies the exact physical location of the facility as a measure of the angular distance north form the earth's equator to the center of the facility. The value is stored as degrees, minutes and seconds (0DDMMSS), and the first position is zero-filled. The value is positive for locations north of the equator.",
"PREF_LATITUDE": "The EPA's preferred geographic latitude estimation of the reporting facility. Value for latitude is in decimal degrees. This is a signed field.",
"PREF_COLLECT_METH": "The EPA's preferred geographic coordinate collection method code for the reporting facility. Method used to determine the latitude and longitude.",
"ASGN_PUBLIC_PHONE": "The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON), as assigned by TRI from Form R submissions.",
"PREF_ACCURACY": "The EPA's preferred geographic coordinate accuracy estimation for the reporting facility. Describes the accuracy value as a range (+/) in meters of the latitude and longitude.",
"ASGN_FEDERAL_IND": "An identifier that indicates the ownership status of a facility. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: C = 'Commercial', F = 'Federal facility', and G = 'Government owned/contractor operated' (GOCO).",
"ASGN_AGENCY": "An abbreviation for the name of the agency supported by a federal or Government Owned/Contractor Operated (GOCO) reporting site.",
"MAIL_PROVINCE": "The province the facility or establishment uses to receive mail. A facility may receive mail at an address outside of the United States. The province field gives a facility the flexibility needed to enter a correct mailing address outside the United States.",
"PREF_LONGITUDE": "The EPA's preferred geographic longitude estimation of the reporting facility. Value for longitude is in decimal degrees. This is a signed field.",
"STREET_ADDRESS": "The street address for the physical location of the facility or establishment.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.",
"MAIL_NAME": "The name which the facility or establishment uses for receiving mail if the address used for mail is different than in the Street box. This may or may not be the same as the name listed in the Facility or Establishment Name box.",
"PREF_SOURCE_SCALE": "The EPA's preferred geographic coordinate source map scale code. This is the scale of the source used to determine the latitude and longitude.",
"MAIL_CITY": "The city the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the City box.",
"PARENT_CO_NAME": "Name of the corporation or other business company that is the ultimate parent company, located in the United States, of the facility or establishment submitting the data. The parent company is the company that directly owns at least 50 percent of the voting stock of the reporting company. This does not include foreign parent companies. 'NA' indicates that the facility does not have a parent company.",
"PREF_QA_CODE": "Contains the results of four quality assurance tests (Test 1 through Test 4 below) used to determine facility location. \"ZIP Code Bounding Box\" is a rectangle generated from the ZIP Code boundaries, which is defined by the extreme north-south latitude and east-west longitudes, plus 1 kilometer (km) in each direction. The quality assurance tests are:",
"FRS_ID": "A unique code used to identify the facility in the Facility Registry System (FRS). Note: The column will be populated in the future when values have been established.",
"PARENT_CO_DB_NUM": "The number which has been assigned to the parent company by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all parent companies will have a Dun & Bradstreet number. 'NA' indicates that the facility or establishment's parent company does not have a Dun & Bradstreet number.",
"ASGN_PUBLIC_CONTACT": "The name of the individual who may be contacted by the general public with questions regarding the company and the information reported to TRI as assigned by TRI from Form R submissions.. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.",
"REGION": "The EPA region in which the facility is located."
},
"TRI_SUBMISSION_SIC": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"SIC_SEQUENCE_NUM": "The sequence of the facility's Standard Industrial Classification (SIC) code as entered on Form R or Form A.",
"SIC_CODE": "The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.",
"PRIMARY_IND": "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."
},
"TRI_RECYCLING_PROCESS": {
"ONSITE_RECYCLING_PROC_CODE": "Indicates the specific on-site recycling method or methods applied to the toxic chemical. Similar to section 7B and unlike section 7A, on-site recycling under section 7C refers only to recycling activities directed at the specific toxic chemical being reported, not all recycling methods applied to the waste stream. Section 7C is not completed unless the specific toxic chemical being reported is recovered from the waste stream for reuse.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit."
},
"TRI_ENERGY_RECOVERY": {
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"ONSITE_ENERGY_PROC_CODE": "Code for the specific energy recovery method applied to the toxic chemical. Unlike section 7A which includes all treatment methods applied to the waste stream, the energy recovery must be directed at the specific toxic chemical being reported. This means that the toxic chemical must have significant heating value. Section 7B should not be used for chemicals that do not have significant heating values such as metals. Values: U01 = Industrial Kiln, U02 = Industrial Furnace, U03 = Industrial Boiler, U09 = Other Energy Recovery Methods, NA = not applicable, no on-site energy recovery applied to the toxic chemical."
},
"TRI_FACILITY_DB": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_DB_IND": "Indicates that the associated DB_NUM represents the principal Dun & Bradstreet number assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"DB_NUM": "The number or numbers which have been assigned to the facility by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all facilities will have Dun & Bradstreet numbers."
},
"TRI_CHEM_ACTIVITY": {
"REACTANT": "Indicates the toxic chemical is used in chemical reactions to create another chemical substance or product that is then sold or otherwise distributed to other facilities. Some examples of reactants include feedstocks, raw materials, intermediates, and initiators. Values: 1 = 'Yes', 0 = 'No'.",
"MANUFACTURE_AID": "Indicates the toxic chemical is used to aid in the manufacturing process but does not come into contact with the product during manufacture. Some examples include valve lubricants, refrigerants, metalworking fluids, coolants, and hydraulic fluids. Values: 1 = 'Yes', 0 = 'No'.",
"IMPORTED": "Indicates the toxic chemical was imported into the Customs Territory of the United States by the facility. This includes the facility directly importing the toxic chemical or specifically requesting a broker or other party to obtain the toxic chemical from a foreign source. The Customs Territory of the United States includes the 50 States, Guam, Puerto Rico, American Samoa, and the U.S. Virgin Islands. Values: 1 = 'Yes', 0 = 'No'.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"USED_PROCESSED": "Indicates the toxic chemical was produced or imported by the facility and then further processed or otherwise used at the same facility. If this box is checked, at least one box in section 3.2 or section 3.3 will be checked. Values: 1 = 'Yes', 0 = 'No'.",
"PRODUCE": "Indicates the toxic chemical was created by the facility. A toxic chemical is considered manufactured even if the toxic chemical is created unintentionally or exists only for a short period of time. Values: 1 = 'Yes', 0 = 'No'.",
"FORMULATION_COMPONENT": "Indicates the toxic chemical is used as an ingredient in a product mixture to enhance performance of the product during its use, such as dyes in ink, solvents in paint, additions, reaction diluents, initiators, inhibitors, emulsifiers, surfactants, lubricants, flame retardants, and rheological modifiers. Values: 1 = 'Yes', 0 = 'No'.",
"MANUFACTURE_IMPURITY": "Indicator that shows whether the facility produces the reported chemical as a result of the manufacture, processing, or otherwise use of another chemical, but does not separate the chemical and it remains primarily in the mixture or product with that other chemical. Values: 1 = 'Yes', 0 = 'No'.",
"CHEM_PROCESSING_AID": "Indicates the toxic chemical is used to aid in the manufacture or synthesis of another chemical substance such that it comes into contact with the product during manufacture, but is not intended to remain with or become part of the final product or mixture. Some examples of chemical processing aids are process solvents, catalysts, solution buffers, inhibitors, and reaction terminators. Values: 1 = 'Yes', 0 = 'No'.",
"BYPRODUCT": "Indicates the toxic chemical is produced coincidentally during the manufacture, process, or otherwise use of another chemical substance or mixture and, following its production, is separated from that other chemical substance or mixture. This includes toxic chemicals that may be created as the result of waste management. Values: 1 = 'Yes', 0 = 'No'.",
"ANCILLARY": "Indicates the toxic chemical is used at the facility for purposes other than as a manufacturing aid or chemical processing aid, such as cleaners, degreasers, lubricants, fuels, toxic chemicals used for treating wastes, and toxic chemicals used to treat water at the facility. Values: 1 = 'Yes', 0 = 'No'.",
"REPACKAGING": "Indicates the toxic chemical has been received by the facility and subsequently prepared for distribution into commerce in a different form, state, or quantity than it was received, such as petroleum being transferred from a storage tank to tanker trucks. Values: 1 = 'Yes', 0 = 'No'.",
"ARTICLE_COMPONENT": "Indicates the toxic chemical becomes an integral part of an article distributed into commerce, such as copper in wire or resins in a plastic pen, or the pigment components of paint applied to a chair that is sold. Values: 1 = 'Yes', 0 = 'No'.",
"PROCESS_IMPURITY": "Indicator that shows whether the facility processed the reported chemical but did not separate it and it remains as an impurity in the primary the mixture or trade name product. Values: 1 = 'Yes', 0 = 'No'.",
"SALE_DISTRIBUTION": "Indicates the toxic chemical was produced or imported by the facility specifically to be sold or distributed to other outside facilities. Values: 1 = 'Yes', 0 = 'No'."
},
"TRI_COUNTY": {
"COUNTY_NAME": "The standardized name of the county where the facility is located.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility."
},
"TRI_FACILITY_HISTORY": {
"PREF_DESC_CATEGORY": "The EPA's preferred geographic coordinate description category. Describes the category of feature referenced by the latitude and longitude.",
"ASGN_PARTIAL_IND": "Indicates that the facility reports by establishment or groups of establishments as assigned by TRI from Form R submisions. Partial facilities may have more than one submission for the same chemical in one reporting year. Values: 0 = 'Entire facility', 1 = 'Partial facility'.",
"FACILITY_NAME": "The name of the facility or establishment for which the form was submitted. For purposes of TRI a \"facility\" is generally considered to be all buildings and equipment owned or operated by a company on a single piece of property. The facility may be only one building in an industrial park or it may be a large complex covering many acres. At some larger facilities there may be several different businesses that are all run by the same company. These different businesses are referred to as \"establishments.\" Generally, a company will submit one Form R for the entire facility. A facility may choose, however, to submit a Form R for each establishment separately. The name in this section will either be the name used for the entire facility or the name of the specific establishment, depending on how the facility chooses to report.",
"STATE_COUNTY_FIPS_CODE": "Combination of the two-letter state abbreviation and the county code.",
"MAIL_STATE_ABBR": "The state abbreviation the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the State box.",
"MAIL_ZIP_CODE": "The zip code the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the Zip Code box.",
"CITY_NAME": "The city where the facility or establishment is physically located.",
"MAIL_COUNTRY": "The country the facility or establishment uses to receive mail.",
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"PREF_HORIZONTAL_DATUM": "The EPA's preferred geographic coordinate horizontal datum. Reference datum of the latitude and longitude.",
"FAC_LONGITUDE": "The series of numbers which identifies the exact physical location of the facility as a measure of the arc or portion of the earth's equator between the meridian of the center of the facility and the prime meridian. The right-justified value is stored as degrees, minutes and seconds (0DDDMMSS). Tenths of seconds are not stored. The value is negative for locations in the Western hemisphere.",
"MAIL_STREET_ADDRESS": "The address the facility or establishment uses for receiving mail. Form R instructs the submitter to enter the address used for mail only if different than in the Street box. The TRIS database stores the address from the Street box (STREET_ADDRESS) in MAILING_STREET_ADDRESS even when the facility Mailing address is not different.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.",
"STATE_ABBR": "The state abbreviation where the facility or establishment is physically located.",
"COUNTY_NAME": "The standardized name of the county where the facility is located.",
"FAC_LATITUDE": "The series of numbers that identifies the exact physical location of the facility as a measure of the angular distance north form the earth's equator to the center of the facility. The value is stored as degrees, minutes and seconds (0DDMMSS), and the first position is zero-filled. The value is positive for locations north of the equator.",
"PREF_LATITUDE": "The EPA's preferred geographic latitude estimation of the reporting facility. Value for latitude is in decimal degrees. This is a signed field.",
"PREF_COLLECT_METH": "The EPA's preferred geographic coordinate collection method code for the reporting facility. Method used to determine the latitude and longitude.",
"ASGN_PUBLIC_PHONE": "The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON), as assigned by TRI from Form R submissions.",
"PREF_ACCURACY": "The EPA's preferred geographic coordinate accuracy estimation for the reporting facility. Describes the accuracy value as a range (+/) in meters of the latitude and longitude.",
"ASGN_FEDERAL_IND": "An identifier that indicates the ownership status of a facility. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: C = 'Commercial', F = 'Federal facility', and G = 'Government owned/contractor operated' (GOCO).",
"ASGN_AGENCY": "An abbreviation for the name of the agency supported by a federal or Government Owned/Contractor Operated (GOCO) reporting site.",
"MAIL_PROVINCE": "The province the facility or establishment uses to receive mail. A facility may receive mail at an address outside of the United States. The province field gives a facility the flexibility needed to enter a correct mailing address outside the United States.",
"PREF_LONGITUDE": "The EPA's preferred geographic longitude estimation of the reporting facility. Value for longitude is in decimal degrees. This is a signed field.",
"STREET_ADDRESS": "The street address for the physical location of the facility or establishment.",
"ZIP_CODE": "The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.",
"MAIL_NAME": "The name which the facility or establishment uses for receiving mail if the address used for mail is different than in the Street box. This may or may not be the same as the name listed in the Facility or Establishment Name box.",
"PREF_SOURCE_SCALE": "The EPA's preferred geographic coordinate source map scale code. This is the scale of the source used to determine the latitude and longitude.",
"MAIL_CITY": "The city the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the City box.",
"PARENT_CO_NAME": "Name of the corporation or other business company that is the ultimate parent company, located in the United States, of the facility or establishment submitting the data. The parent company is the company that directly owns at least 50 percent of the voting stock of the reporting company. This does not include foreign parent companies. 'NA' indicates that the facility does not have a parent company.",
"PREF_QA_CODE": "Contains the results of four quality assurance tests (Test 1 through Test 4 below) used to determine facility location. \"ZIP Code Bounding Box\" is a rectangle generated from the ZIP Code boundaries, which is defined by the extreme north-south latitude and east-west longitudes, plus 1 kilometer (km) in each direction. The quality assurance tests are:",
"PARENT_CO_DB_NUM": "The number which has been assigned to the parent company by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all parent companies will have a Dun & Bradstreet number. 'NA' indicates that the facility or establishment's parent company does not have a Dun & Bradstreet number.",
"ASGN_PUBLIC_CONTACT": "The name of the individual who may be contacted by the general public with questions regarding the company and the information reported to TRI as assigned by TRI from Form R submissions.. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.",
"REGION": "The EPA region in which the facility is located."
},
"TRI_SUBMISSION_NAICS": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"NAICS_CODE": "The North American Industry Classification System (NAICS) Codes(s) that best describe the business activities conducted ata facility or establishment. NAICS codes are 6 digit numbers used by the Bureau of Census as part of a system to categorizeand track the types of business activities conducted in the United States. ",
"NAICS_SEQUENCE_NUM": "The sequence of the facility's North American Industry Classification System (NAICS) code as entered in section 4.5 of part I of the Form R or Form A.",
"PRIMARY_IND": "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."
},
"TRI_WATER_STREAM": {
"WATER_SEQUENCE_NUM": "Sequence in which a release to water is reported on a Form R submission.",
"STREAM_NAME": "The name of the stream, river, lake, or other water body to which the chemical is discharged. The name is listed as it appears on the NPDES permit, or, if the facility does not have a NPDES permit, as the water body is publicly known. This is not a list of all streams through which the toxic chemical flows but is a list of direct discharges. If more than one name is listed on form R, the facility has a separate discharge to each water body listed.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"STORM_WATER_PERCENT": "The amount of the release, by weight percent, to water bodies, that came from stormwater runoff. This figure is only required when data are available.",
"STORM_WATER_NA": "Indicates that 'NA' (Not Applicable) was entered on Form R for the percent of a release that came from stormwater runoff. Values: 1 = 'Yes', 0 = 'No'."
},
"TRI_SOURCE_REDUCT_QTY": {
"ENERGY_OFFSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) for which the report was submitted. This includes all amounts of the toxic chemical that were intended to be recovered for energy and were sent offsite for that purpose. This figure includes all transfers offsite reported in section 6.2 which are classified with an energy recovery code. This does not include quantities of the toxic chemical that are combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81A_CURR_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste expected to be burned for energy recovery onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This should not include quantities of the toxic chemical that will be combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"RECYC_OFFSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be sent offsite for recycling during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"RECYC_ONSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical recycled onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually recovered for reuse, not the total amount of the toxic chemical in the wastestream entering recycling units onsite. This amount does not include quantities of the toxic chemical that were recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"RECYC_ONSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be recycled onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81A_PREV_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, prior year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be released by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) following the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"ENERGY_OFFSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"RECYC_OFFSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical sent offsite for recycling during the calendar year (January 1 - December 31) for which the report was submitted. This includes all amounts of the toxic chemical intended to be recycled, not just the amount of the toxic chemical actually recovered. This figure includes all transfers offsite reported in section 6.2 which are classified with an recycling code. This amount does not include quantities of the toxic chemical that were transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"RECYC_ONSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"TREATED_ONSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"RECYC_ONSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical recycled onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually recovered, not the total amount of the toxic chemical in the wastestream sent for recycling activities. This amount does not include quantities of the toxic chemical that were recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81C_CURR_YR_QTY": "The total amount of the toxic chemical released off-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.",
"REL_81C_SECD_YR_QTY": "The total amount of the toxic chemical expected to be released off-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.",
"REL_81B_SECD_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, second following year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81D_FOLL_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, following year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81C_FOLL_YR_QTY": "The total amount of the toxic chemical expected to be released off-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.",
"REL_81B_CURR_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"TREATED_OFFSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical sent for treatment offsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes the total amount of the toxic chemical intended to be treated (destroyed) and sent offsite for that purpose, not the amount of the toxic chemical actually treated (destroyed) by offsite processes. This figure includes all transfers offsite reported in section 6.2 which are classified with treatment waste management codes and most transfers to POTWs reported in section 6.1, except for metals. This does not include transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that were transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81C_PREV_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, prior year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical released due to production related events by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).",
"TREATED_OFFSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_OFFSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste expected to be sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) following the year for which the report was submitted. This does not include quantities of the toxic chemical that will be combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"RECYC_ONSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be recycled onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81B_SECD_YR_QTY": "The total amount of the toxic chemical expected to be released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.",
"REL_81B_FOLL_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, following year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81D_CURR_YR_QTY": "The total amount of the toxic chemical released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.",
"REL_81C_SECD_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, second following year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81C_CURR_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"ENERGY_OFFSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes all amounts of the toxic chemical that were intended to be recovered for energy and were sent offsite for that purpose. This figure includes all transfers offsite reported in section 6.2 which are classified with an energy recovery code. This does not include quantities of the toxic chemical that are combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"TREATED_ONSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81B_PREV_YR_QTY": "The total amount of the toxic chemical released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.",
"RECYC_ONSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"RECYC_OFFSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81D_CURR_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81A_CURR_YR_QTY": "The total amount of the toxic chemical released on-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.",
"RECYC_OFFSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be sent offsite for recycling during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"TREATED_OFFSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be sent for treatment offsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This does not include expected transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Expected transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that will be transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81D_SECD_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, second following year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"RECYC_OFFSITE_CURR_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"TREATED_OFFSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical sent for treatment offsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes the total amount of the toxic chemical intended to be treated (destroyed) and sent offsite for that purpose, not the amount of the toxic chemical actually treated (destroyed) by offsite processes. This figure includes all transfers offsite reported in section 6.2 which are classified with treatment waste management codes and most transfers to POTWs reported in section 6.1, except for metals. This does not include transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that were transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"TREATED_OFFSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_OFFSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical released due to production related events by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) for which the report was submitted. This includes both fugitive and stack air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs, because metals cannot be treated (destroyed) and will ultimately be disposed (metals reported in 6.1).",
"TREATED_ONSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical treated onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually treated (destroyed) by processes at the facility, not the total amount of the toxic chemical present in wastestreams sent to those processes. This amount does not include quantities of the toxic chemical that were treated for destruction onsite as the result of a catastrophic event,remedial action or other, one-time event not associated with production.",
"TREATED_ONSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81B_FOLL_YR_QTY": "The total amount of the toxic chemical expected to be released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.",
"ENERGY_OFFSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81A_FOLL_YR_QTY": "The total amount of the toxic chemical expected to be released on-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.",
"RECYC_OFFSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81D_PREV_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, prior year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"RECYC_OFFSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical sent offsite for recycling during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes all amounts of the toxic chemical intended to be recycled and sent offsite for that purpose, not just the amount of the toxic chemical actually recovered. This figure includes all transfers offsite reported in section 6.2 which are classified with a recycling code. This amount does not include quantities of the toxic chemical that were transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production",
"RECYC_OFFSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81D_SECD_YR_QTY": "The total amount of the toxic chemical expected to be released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.",
"REL_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be released by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).",
"REL_81D_PREV_YR_QTY": "The total amount of the toxic chemical released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.",
"TREATED_OFFSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be sent for treatment offsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This does not include expected transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Expected transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that will be transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81B_CURR_YR_QTY": "The total amount of the toxic chemical released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.",
"TREATED_OFFSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81C_FOLL_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, following year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81B_PREV_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, prior year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"RECYC_ONSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_OFFSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_SECD_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81A_SECD_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, second following year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"REL_81D_FOLL_YR_QTY": "The total amount of the toxic chemical expected to be released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.",
"ENERGY_ONSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste expected to be burned for energy recovery onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This should not include quantities of the toxic chemical that will be combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81A_SECD_YR_QTY": "The total amount of the toxic chemical expected to be released on-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.",
"REL_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"REL_81A_FOLL_YR_NA": "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, following year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste burned for energy recovery onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually combusted in the unit, not the total amount of the toxic chemical in the wastestream sent for energy recovery. This also does not include quantities of the toxic chemical that are combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"ENERGY_ONSITE_CURR_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste burned for energy recovery onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually combusted in the unit, not the total amount of the toxic chemical in the wastestream sent for energy recovery. This also does not include quantities of the toxic chemical that are combusted for energy recovery onsite as the result of a catastrophic event,remedial action or other, one-time event not associated with production.",
"TREATED_ONSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"TREATED_ONSITE_PREV_YR_QTY": "The total amount (in pounds) of the toxic chemical treated onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually treated (destroyed) by processes at the facility, not the total amount of the toxic chemical present in wastestreams sent to those processes. This amount does not include quantities of the toxic chemical that were treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81A_PREV_YR_QTY": "The total amount of the toxic chemical released on-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.",
"TREATED_ONSITE_FOLL_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be treated onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"REL_81C_PREV_YR_QTY": "The total amount of the toxic chemical released off-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.",
"RECYC_ONSITE_FOLL_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"TREATED_ONSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical expected to be treated onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.",
"TREATED_OFFSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_ONSITE_PREV_YR_NA": "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.",
"ENERGY_OFFSITE_SECD_YR_QTY": "The total amount (in pounds) of the toxic chemical in waste expected to be sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This does not include quantities of the toxic chemical that will be combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production."
},
"TRI_RELEASE_QTY": {
"RELEASE_BASIS_EST_CODE": "The code representing the technique used to develop theestimate of releases reported in the 'Total Release' box (TOTAL_RELEASE). Thevalues are as follows:",
"WATER_SEQUENCE_NUM": "Sequence in which a release to water is reported on a Form R submission.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"TOTAL_RELEASE": "The total amount (in pounds) of the toxic chemical released to air, water, land, and underground injection wells during the calendar year (January 1 - December 31). Release amounts may be reported as specific numbers or as ranges (RELEASE_RANGE_CODE). Descriptions by Form R Section number for each environmental medium follow.",
"RELEASE_NA": "Indicates whether 'NA' (Not Applicable) was entered on Form R for the release estimate. Values: 1 = 'Yes', 0 = 'No'. Descriptions by Form R Section number for each environmental medium follow.",
"RELEASE_RANGE_CODE": "The code that corresponds to the amount of toxic chemical released annually by the reporting facility, reported as a range for releases less than 1,000 pounds. When a facility uses a range code, the amount reported to TRI is the midpoint of the range. On Form R, letter codes are used to represent ranges: A = 1-10 pounds, B = 11-499 pounds, and C = 500-999 pounds. The letters are converted to numbers for storage in the TRIS database where '1' represents range 'A', '3' represents range 'B', and '4' represents range 'C'. The historical value '2' = 1-499 pounds.",
"ENVIRONMENTAL_MEDIUM": "Code indicating the environmental medium to which the toxic chemical is released from the facility."
},
"TRI_ONSITE_WASTESTREAM": {
"OPERATING_DATA_IND": "Indicates if the waste treatment efficiency estimate (TREATMENT_EFFCIENCY_EST) is based on actual operating data, such as monitoring influent and effluent toxic chemical levels in the waste stream; or, indicates if TREATMENT_EFFCIENCY_EST is not based on actual operating or monitoring data, but rather some other technique, such as published data for similar processes or the equipment supplier's literature. Values: 1 = 'Yes', 0 = 'No'', 2 = blank or not entered.",
"WASTESTREAM_CODE": "Indicates the general waste stream type containing the toxic chemical. The four codes used to indicate the general waste stream types are: A = Gaseous (gases, vapors, airborne particles), W = Wastewater (aqueous waste), L = Liquid (non-aqueous, liquid waste), and S = Solid (including sludges and slurries).",
"WASTESTREAM_SEQ_NUM": "Sequence in which an on-site waste treatment process is reported on a Form R submission.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"INFLUENT_CONC_RANGE": "Indicates the range of concentration of the toxic chemical in the waste stream as it typically enters the waste treatment step or sequence. The concentration is based on the amount or mass of the toxic chemical in the waste stream as compared to the total amount or mass of the waste stream and is determined prior to the application of any waste management methods. Facilities report using one of the following five codes:",
"TREATMENT_EFFICIENCY_EST_NA": "Indicates whether 'NA' (Not Applicable) was entered on Form R for the waste treatment efficiency estimate. Values: 1 = 'Yes', 0 = 'No'.",
"SEQUENTIAL_TREAT_87_90": "Indicator that shows whether treatment steps were used in sequence, for Reporting Years 1987 through 1990, to estimate treatment efficiency of the overall treatment process.",
"TREATMENT_EFFICIENCY_EST": "The percentage of the toxic chemical removed from the waste stream through destruction, biological degradation, chemical conversion, or physical removal. This estimate represents the overall percentage of the toxic chemical destroyed or removed (based on amount or mass) throughout all waste management methods, not merely changes in volume or concentration and not merely the efficiency of one method in a sequence of activities. This also does not represent the waste treatment efficiency for the entire waste stream but only the removal or destruction of this specific toxic chemical in that waste stream. This does not include energy recovery or recycling activities. Energy recovery and recycling activities are reported in sections 7B and 7C, respectively. The value is calculated as follows: ((I - E)/1) * 100, where I equals the amount of toxic chemical in the influent waste stream, and E equals the amount of the toxic chemical in the effluent waste stream.",
"EFFICIENCY_RANGE_CODE": "The range code representing the percentage of the toxic chemical removed from the waste stream through destruction, biological degradation, chemical conversion, or physical removal. This range code represents the overall percentage of the toxic chemical destroyed or removed (based on amount or mass) throughout all waste management methods, not merely changes in volume or concentration and not merely the efficiency of one method in a sequence of activities. This also does not represent the waste treatment efficiency for the entire waste stream but only the removal or destruction of this specific toxic chemical in that waste stream. This does not include energy recovery or recycling activities. Energy recovery and recycling activities are reported in sections 7B and 7C, respectively. "
},
"TRI_FACILITY_RCRA": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_RCRA_IND": "Indicates that the associated RCRA_NUM represents the principal RCRA Identification Number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"RCRA_NUM": "The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number."
},
"TRI_FACILITY_DB_HISTORY": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"ASGN_DB_IND": "Indicates that the associated DB_NUM represents the principal Dun & Bradstreet number assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.",
"DB_NUM": "The number or numbers which have been assigned to the facility by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all facilities will have Dun & Bradstreet numbers."
},
"TRI_FACILITY_SIC": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"SIC_CODE": "The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.",
"PRIMARY_IND": "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."
},
"TRI_ONSITE_WASTE_TREATMENT_MET": {
"WASTESTREAM_SEQ_NUM": "Sequence in which an on-site waste treatment process is reported on a Form R submission.",
"DOC_CTRL_NUM": "DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.",
"TREATMENT_METHOD_CODE": "The on-site waste treatment activity that is applied to the waste stream containing the toxic chemical. This includes all waste treatment methods through which the toxic chemical passes as part of that waste stream, regardless of whether or not the method has, or is intended to have, any effect on the toxic chemical. If the waste stream moves through a series of waste treatment activities, each method will be listed sequentially.",
"TREATMENT_SEQUENCE": "Sequence in which a TREATMENT_METHOD_CODE is entered on a Form R submission, and indicates the on-site order of treatment."
},
"TRI_FACILITY_SIC_HISTORY": {
"TRI_FACILITY_ID": "The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.",
"REPORTING_YEAR": "The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.",
"SIC_CODE": "The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.",
"PRIMARY_IND": "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."
}
}
| lookup_table = {'TRI_FACILITY_NPDES': {'ASGN_NPDES_IND': "Indicates that the associated NPDES_NUM represents the principal NPDES permit number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'NPDES_NUM': 'The permit number of a specific discharge to a water body under the National Pollutant Discharge Elimination System (NPDES) of the Clean Water Act (CWA). Not all facilities will have a NPDES permit number. A facility may have multiple NPDES permit numbers. The NPDES permit number may not pertain to the toxic chemical reported to TRI.'}, 'TRI_OFF_SITE_TRANSFER_LOCATION': {'PROVINCE': 'The province of the location to which the toxic chemical in wastes is transferred. A facility may transfer toxic chemicals in waste to off-site locations that are outside of the United States. The province field gives a facility the flexibility needed to enter a correct off-site location address that is outside the United States.', 'TRANSFER_LOC_NUM': 'The sequence in which an off-site transfer is reported on a Form R submission.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'CONTROLLED_LOC': "Indicator that shows whether the off-site location to which toxic chemicals are transferred in wastes is owned or controlled by the facility or the parent company. Values: 1 = 'Yes', 0 = 'No', 2 = blank or not entered.", 'OFF_SITE_STREET_ADDRESS': 'The street address for the physical location of the entity receiving the toxic chemical.', 'COUNTRY_CODE': 'The country code where the entity receiving the toxic chemical is located.', 'COUNTY_NAME': 'The standardized name of the county where the facility is located.', 'CITY_NAME': 'The city where the facility or establishment is physically located.', 'OFF_SITE_NAME': 'The name of the entity receiving the toxic chemical.', 'RCRA_NUM': 'The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number.', 'STATE_ABBR': 'The state abbreviation where the facility or establishment is physically located.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.'}, 'TRI_TRANSFER_QTY': {'TRANSFER_LOC_NUM': 'The sequence in which an off-site transfer is reported on a Form R submission.', 'TRANSFER_BASIS_EST_CODE': "The code representing the technique used to develop the estimate of the release amount reported in the 'Total Transfers' box (TOTAL_TRANSFER). The values are as follows:", 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'TOTAL_TRANSFER': 'The total amount (in pounds) of the toxic chemical transferred from the facility to Publicly Owned Treatment Works (POTW) or to an off-site location (non-POTW) during the calendar year (January 1 - December 31). POTW refers to a municipal sewage treatment plant. The most common transfers will be conveyances of the toxic chemical in facility wastewater through underground sewage pipes, however, trucked or other direct shipments to a POTW are also included in this estimate.', 'TRANSFER_RANGE_CODE': "Code that corresponds to the amount of toxic chemical released annually by the reporting facility, reported as a range for releases less than 1,000 pounds. When a facility uses a range code, the amount reported to TRI is the midpoint of the range. On Form R, letter codes are used to represent ranges: A = 1-10 pounds, B = 11-499 pounds, and C = 500-999 pounds. The letters are converted to numbers for storage in the TRIS database where '1' represents range 'A', '3' represents range 'B', and'4' represents range 'C'. The historical value '2' = 1-499 pounds.", 'OFF_SITE_AMOUNT_SEQUENCE': 'Sequence in which an off-site transfer amount is reported on a submission.', 'TRANSFER_EST_NA': "Indicates that 'NA' (Not Applicable) was entered on Form R when a facility does not discharge wastewater containing the toxic chemical to Publicly Owned Treatment Works (Section 6.1.B_) or in wastes to other off-site facilities (section 6.2_). Values: 1 = 'Yes', 0 = 'No'.", 'TYPE_OF_WASTE_MANAGEMENT': "The type of waste treatment, disposal, recycling, or energy recovery methods the off-site location uses to manage the toxic chemical. A two-digit code is used to indicate the type of waste management activity employed. This refers to the ultimate disposition of the toxic chemical, not the intermediate activities used for the waste stream. (In Envirofacts, the code 'P91' indicates a transfer to a POTW. All other codes refer to off-site transfers.)"}, 'TRI_SOURCE_REDUCT_METHOD': {'SOURCE_REDUCT_METHOD_1': 'Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.', 'SOURCE_REDUCT_METHOD_2': 'Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.', 'SOURCE_REDUCT_METHOD_3': 'Indicates the method or methods used at the facility to identify the possibility for a source reduction activity implementation at the facility. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a method used to identify source reduction opportunities would be an internal pollution prevention audit.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'SOURCE_REDUCT_ACTIVITY': 'Indicates the type of source reduction activity implemented at the facility during the reporting year. This does not include all source reduction activities ongoing at the facility but only those activities related to the reported toxic chemical. An example of a source reduction activity would include a spill and leak prevention program such as the installation of a vapor recovery system.', 'REDUCTION_SEQUENCE_NUM': 'Sequence in which a source reduction method is reported on a submission.'}, 'TRI_POTW_LOCATION': {'POTW_NAME': 'The name of the publicly owned treatment works (POTW) receiving the toxic chemical.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'POTW_STREET_ADDRESS': 'The street address for the physical location of the publicly owned treatment works (POTW) receiving the toxic chemical.', 'STATE_ABBR': 'The state abbreviation where the facility or establishment is physically located.', 'COUNTY_NAME': 'The standardized name of the county where the facility is located.', 'CITY_NAME': 'The city where the facility or establishment is physically located.', 'POTW_LOC_NUM': 'The sequence in which an POTW transfer is reported on a Form R submission.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.'}, 'TRI_TABLE_ID_NAME': {'TABLE_ID': 'A designation for a related group of permissible values. The name that identifies this group is located in TRI_TABLE_ID_NAME.', 'TABLE_NAME': 'The table description for the TRI_CODE_DESC.TABLE_ID .'}, 'TRI_CODE_DESC': {'DESCRIPT': 'The text description of a permissible value contained in CODE.', 'CODE': 'The permissible values for a column.', 'TABLE_ID': 'A designation for a related group of permissible values. The name that identifies this group is located in TRI_TABLE_ID_NAME.'}, 'TRI_FACILITY_NPDES_HISTORY': {'ASGN_NPDES_IND': "Indicates that the associated NPDES_NUM represents the principal NPDES permit number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'NPDES_NUM': 'The permit number of a specific discharge to a water body under the National Pollutant Discharge Elimination System (NPDES) of the Clean Water Act (CWA). Not all facilities will have a NPDES permit number. A facility may have multiple NPDES permit numbers. The NPDES permit number may not pertain to the toxic chemical reported to TRI.', 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.'}, 'TRI_ZIP_CODE': {'TRI_CENTROID_LAT': 'The assigned centroid latitude based on zip code.', 'REGION': 'The EPA region in which the facility is located.', 'CITY_NAME': 'The city where the facility or establishment is physically located.', 'STATE_ABBR': 'The state abbreviation where the facility or establishment is physically located.', 'TRI_CENTROID_LONG': 'The assigned centroid longitude based on zip code.', 'COUNTRY_NAME': 'The country where the facility is located, if outside the United States.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.'}, 'TRI_FACILITY_RCRA_HISTORY': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_RCRA_IND': "Indicates that the associated RCRA_NUM represents the principal RCRA Identification Number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.', 'RCRA_NUM': 'The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number.'}, 'TRI_REPORTING_FORM': {'PRODUCTION_RATIO_NA': "Indicator that shows whether 'NA' was entered in Section 8.9, Production Ratio or Activity Index (PRODUCTION_RATIO). Values: 1 = 'Yes', 0 = 'No'.", 'PUBLIC_CONTACT_PHONE': 'The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON).', 'FEDERAL_FAC_IND': "Indicates whether the 'Federal' box was checked on the submission. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: 1 = box checked, 0 = box not checked.", 'TRADE_SECRET_IND': "Indicator that shows whether the identity of the toxic chemical has been claimed a trade secret. If the facility has indicated that the chemical name is a trade secret, the chemical name will not be released to the public. Values: 1 = 'Trade Secret' box checked, 0 = 'Trade Secret' box not checked.", 'MAX_AMOUNT_OF_CHEM': 'The two digit code indicating a range for the maximum amount of the chemical present at the facility at any one time during the calendar year (January 1 - December 31) for which the report was submitted.', 'CERTIF_NAME': 'The name of the owner, operator, or senior management official who is certifying that the information provided is true and complete and that the values reported are accurate based on reasonable estimates. This individual has management responsibility for the person or persons completing the report.', 'CERTIF_OFFICIAL_TITLE': 'The title of the owner, operator, or senior management official who is certifying that the information provided is true and complete and that the values reported are accurate based on reasonable estimates. This individual has management responsibility for the person or persons completing the report.', 'ONE_TIME_RELEASE_QTY': 'The total amount (in pounds) of the toxic chemical released directly to the environment or sent offsite for recycling, energy recovery, treatment, or disposal during the reporting year due to remedial actions, catastrophic events such as earthquakes or floods, and one-time events not associated with normal or routine production processes. These amounts are not included in the amounts reported in sections 8.1-8.7 (TRI_SOURCE_REDUCTION_QTY).', 'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ORIG_RECEIVED': 'The original received date for a submission for this chemical from this facility and this reporting year.', 'REVISION_NA': "Indicator that shows whether the submission 'Revision' box on form R was checked by the submitter. Values: 1 = box checked, 0 = box not checked.", 'PUBLIC_CONTACT_PERSON': 'The name of the individual who may be contacted by the general public with questions regarding the information reported to TRI on this chemical. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.', 'ENTIRE_FAC': "Indicates that only one Form R was filed for this chemical for the entire facility. Values: 1 = Form R 'Entire' box check, 0 = box not checked.", 'ACTIVE_STATUS': "Indicates the status of the submitted Form R. Value: 1 = 'Active submission'.", 'POSTMARK_DATE': 'The most recent postmark date for a submission for this chemical from this facility and this reporting year . The date may represent a revised submission or be the same as the ORIG_POSTMARK.', 'DIOXIN_DISTRIBUTION_2': 'Indicates the distribution (percentage) of 1,2,3,4,7,8,9-Heptachlorodibenzofuran (CAS Number: 55673-89-7) in the reported dioxin or dioxin-like compounds.', 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.', 'RECEIVED_DATE': 'The date the submission was received at the EPCRA Reporting Center.', 'DIOXIN_DISTRIBUTION_14': 'Indicates the distribution (percentage) of 2,3,4,7,8-Pentachlorodibenzofuran (CAS Number: 57117-31-4) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_15': 'Indicates the distribution (percentage) of 1,2,3,7,8-Pentachlorodibenzo- p-dioxin (CAS Number: 40321-76-4) in the reported dioxin or dioxin-like compounds.', 'ORIG_POSTMARK': 'The original postmark date for a submission for this chemical from this facility and this reporting year.', 'DIOXIN_DISTRIBUTION_17': 'Indicates the distribution (percentage) of 2,3,7,8-Tetrachlorodibenzo- p-dioxin (CAS Number: 01746-01-6) in the reported dioxin or dioxin-like compounds.', 'CERTIF_SIGNATURE': 'Indicator for the signature of the individual who is certifying that the information being provided in the form is true and complete and that the values reported are accurate based on reasonable estimates.', 'DIOXIN_DISTRIBUTION_11': 'Indicates the distribution (percentage) of 1,2,3,4,6,7,8,9-Octachlorodibenzofuran (CAS Number: 39001-02-0) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_12': 'Indicates the distribution (percentage) of 1,2,3,4,6,7,8,9-Octachlorodibenzo- p-dioxin (CAS Number: 03268-87-9) in the reported dioxin or dioxin-like compounds.', 'ADDITIONAL_DATA_IND': "For reporting years beginning in 1991, the indicator that shows whether additional optional information on source reduction, pollution control, or recycling activities implemented during the reporting year or prior years has been attached to the submission. For reporting years 1987 through 1990, the indicator shows whether waste minimization data was reported on Form R and has since been archived. Values: 1 = 'Yes', 0 = 'No'', 2 = blank or not entered.", 'CAS_CHEM_NAME': 'The official name of the toxic chemical, toxic chemical mixture, (e.g., xylene mixed isomers), or chemical category as it appears on the EPCRA Section 313 list. ) or 2.1 . This space will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 (MIXTURE_NAME) or 2.1 (GENERIC_CHEM_NAME).', 'DIOXIN_DISTRIBUTION_8': 'Indicates the distribution (percentage) of 1,2,3,6,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 57653-85-7) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_9': 'Indicates the distribution (percentage) of 1,2,3,7,8,9-Hexachlorodibenzo- p-dioxin (CAS Number: 19408-74-3) in the reported dioxin or dioxin-like compounds.', 'GENERIC_CHEM_NAME': "The generic, structurally descriptive term used in place of the toxic chemical name when a trade secret was claimed for the toxic chemical. The name must appear on both sanitized and unsanitized Form Rs and be the same as that used on the substantiation form. Section 1.3 will be 'NA' or blank if information is provided in Sections 1.1 (TRI_CHEM_ID) and 1.2 (CAS_CHEM_NAME), or 2.1 (MIXTURE_NAME). Note: Only Sanitized Trade Secret submissions are stored in the TRIS database.", 'DIOXIN_DISTRIBUTION_3': 'Indicates the distribution (percentage) of 1,2,3,4,7,8-Hexachlorodibenzofuran (CAS Number: 70648-26-9) in the reported dioxin or dioxin-like compounds.', 'MIXTURE_NAME': 'The generic term used in place of the toxic chemical name when a trade secret was claimed for the toxic chemical by the supplier of the toxic chemical. This is generally used when the supplier of a chemical formulation wishes to keep the identity of a particular ingredient in the formulation a secret. It is only used when the supplier, not the reporting facility, is claiming the trade secret. If the reporting facility is claiming a trade secret for the toxic chemical, the generic name is provided in Section 1.3 (GENERIC_CHEM_NAME) and this section (MIXTURE_NAME) is left blank. This space will also be left blank if a trade secret is not being claimed for the toxic chemical.', 'DIOXIN_DISTRIBUTION_1': 'Indicates the distribution (percentage) of 1,2,3,4,6,7,8-Heptachlorodibenzofuran (CAS Number: 67562-39-4) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_6': 'Indicates the distribution (percentage) of 2,3,4,6,7,8-Hexachlorodibenzofuran (CAS Number: 60851-34-5) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_7': 'Indicates the distribution (percentage) of 1,2,3,4,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 39227-28-6) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_4': 'Indicates the distribution (percentage) of 1,2,3,6,7,8-Hextachlorodibenzofuran (CAS Number: 57117-44-9) in the reported dioxin or dioxin-like compounds.', 'DIOXIN_DISTRIBUTION_5': 'Indicates the distribution (percentage) of 1,2,3,7,8,9-Hexachlorodibenzofuran (CAS Number: 72918-21-9) in the reported dioxin or dioxin-like compounds.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'PUBLIC_CONTACT_EMAIL': 'The Email address of the PUBLIC_CONTACT_PERSON.', 'PARTIAL_FAC': "Indicates that the facility has chosen to report by establishment or groups of establishments. Therefore, there may be other reports filed for this chemical by other establishments of the facility. Values: 1 = Form R 'Partial' box checked, 0 = box not checked.", 'REVISION_CODE': 'Facilities that filed a Form R and/or a Form A Certification Statement under EPCRA section 313 may submit a request to the revise the data. The REVISION_CODE is a code indicating the current form is a revision of a previous form and the reason it was revised. Added in reporting year 2007, the data element can have the following values:', 'DIOXIN_DISTRIBUTION_16': 'Indicates the distribution (percentage) of 2,3,7,8-Tetrachlorodibenzofuran (CAS Number: 51207-31-9) in the reported dioxin or dioxin-like compounds.', 'ONE_TIME_RELEASE_QTY_NA': "Indicator that shows whether 'NA' was entered in Section 8.8, Quantity Released to the Environment as Result of Remedial Actions, Catastrophic Events, or One-Time Events Not Associated with Production Process (ONE_TIME_RELEASE_QTY). Values: 1 = 'Yes', 0 = 'No'.", 'CERTIF_DATE_SIGNED': 'The date that the senior management official signed the certification statement.', 'DIOXIN_DISTRIBUTION_NA': "Indicates whether 'NA' (Not Applicable) was entered on the Form R for the Distribution of Each Member of the Dioxin and Dioxin-like Compounds Category. The Form R asks facilities to report a distribution of chemicals included in the Dioxin and Dioxin-like compounds category. There are 17 individual chemicals listed in the Dioxin and Dioxin-like compounds category. A value of '1' for this variable indicates that the facility did not have the speciation (distribution) information available.", 'DIOXIN_DISTRIBUTION_10': 'Indicates the distribution (percentage) of 1,2,3,4,6,7,8-Hexachlorodibenzo- p-dioxin (CAS Number: 35822-46-9) in the reported dioxin or dioxin-like compounds.', 'FORM_TYPE_IND': 'Indicates the type of form received. Values: L = Form R, S = Form A.', 'GOCO_FLAG': "Indicates whether the 'GOCO' box was checked on the submission. A GOCO facility is a Government-Owned, Contractor-Operated facility. Values: 1= box checked, 0= box not checked.", 'TRI_CHEM_ID': 'The number assigned to chemicals regulated under Section 313 of the Emergency Planning and Community Right-to-Know Act (EPCRA). For most toxic chemicals or mixture of chemicals (e.g., xylene mixed isomers), the TRI_CHEM_ID is the Chemical Abstract Service Registry (CAS) number. A given listed toxic chemical or mixture may be known by many names but it will have only one CAS number. For example, methyl ethyl ketone and 2-butanone are synonyms for the same toxic chemical and thus have only one CAS number (78-93-3). For categories of chemicals for which CAS Registry numbers have not been assigned, a four-character category code, asssigned by TRI, is included in TRI_CHEM_ID. Form R section 1.1 will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 or 2.1.', 'SANITIZED_IND': "Indicator that shows whether the submission 'Sanitized Trade Secret' box was checked by the submitter. Note: Only Sanitized Trade Secret submissions are stored in the TRIS database. Values: 1 = box checked, 0 = box not checked.", 'DIOXIN_DISTRIBUTION_13': 'Indicates the distribution (percentage) of 1,2,3,7,8-Pentachlorodibenzofuran (CAS Number: 57117-41-6) in the reported dioxin or dioxin-like compounds.', 'PRODUCTION_RATIO': 'Indicates the level of increase or decrease from the previous year, of the production process or other activity in which the toxic chemical is used. This number is usually around 1.0. For example, a production ratio or activity index of 1.5 would indicate that production associated with the use of the toxic chemical has increased by about 50 percent. Conversely, a production ratio or activity index of 0.3 would indicate that production associated with the use of the toxic chemical has decreased by about 70 percent.'}, 'TRI_FACILITY_UIC_HISTORY': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_UIC_IND': "Indicates that the associated UIC_NUM represents the principal underground injection code identification number (UIC ID) as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'UIC_NUM': 'The unique number assigned to a specific underground injection well under the Safe Drinking Water Act (SDWA). A facility with multiple injection wells will have multiple underground injection code identification number (UIC ID) Numbers. If the facility does not have an underground injection well regulated by the SDWA, it will not have a UIC ID number.', 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.'}, 'TRI_CHEM_INFO': {'CAAC_IND': "Indicates whether the chemical is reportable under the Clean Air Act. Values: 1 = 'Yes', 0 = 'No'.", 'CARC_IND': "Indicates whether the chemical is reportable as a carcinogen under the CARC. Values: 1 = 'Yes', 0 = 'No'.", 'UNIT_OF_MEASURE': 'Indicates the unit of measure used to quantify the chemical. Values: {Pounds, Grams}', 'CLASSIFICATION': 'Indicates the classification of the chemical. Chemicals can be classified as either a Dioxin or Dioxin-like compounds, a PBT (Persistent, Bioaccumulative and Toxic) chemical or a general EPCRA Section 313 chemical. Values: 0=TRI, 1=PBT, 2=Dioxin', 'FEDS_IND': "Indicates whether the chemical is a non-Section 313 chemical submitted by a federal facility under Executive Order 12856. Values: 1 = 'Yes', 0 = 'No'.", 'METAL_IND': "Indicates whether the chemical is a metal or metal compound. Values: 1 = 'Yes', 0 = 'No'.", 'NO_DECIMALS': 'Indicates the maximum number of decimals that can be used to quantify a chemical. This measurement applies to release, transfer and source reduction quantities. PBT (Persistent, Bioaccumulative and Toxic) chemicals, including Dioxins and Dioxin-like Compounds, can be quantified using numbers to the right of the decimal point. The measurement expresses the maximum number of positions to the right of the decimal point that a PBT chemical can be expressed in. All other Non-PBT chemicals are reported as whole numbers.', 'R3350_IND': "Indicates whether the chemical is reportable under Regulation 3350. Values: 1 = 'Yes', 0 = 'No'.", 'TRI_CHEM_ID': 'The number assigned to chemicals regulated under Section 313 of the Emergency Planning and Community Right-to-Know Act (EPCRA). For most toxic chemicals or mixture of chemicals (e.g., xylene mixed isomers), the TRI_CHEM_ID is the Chemical Abstract Service Registry (CAS) number. A given listed toxic chemical or mixture may be known by many names but it will have only one CAS number. For example, methyl ethyl ketone and 2-butanone are synonyms for the same toxic chemical and thus have only one CAS number (78-93-3). For categories of chemicals for which CAS Registry numbers have not been assigned, a four-character category code, asssigned by TRI, is included in TRI_CHEM_ID. Form R section 1.1 will be empty if a trade secret was claimed for the toxic chemical and information is provided in Section 1.3 or 2.1.', 'ACTIVE_DATE': 'First year that this chemical must be reported to TRI.', 'INACTIVE_DATE': 'Final year that this chemical must be reported to TRI.', 'PBT_END_YEAR': 'Indicates the year that a PBT (Persistent, Bioaccumulative and Toxic) chemical was dropped as an EPCRA Section 313 PBT Chemical, Toxics Release Inventory.', 'PBT_START_YEAR': 'Indicates the year that a PBT (Persistent, Bioaccumulative and Toxic) chemical was designated as an EPCRA Section 313 PBT Chemical, Toxics Release Inventory.', 'CHEM_NAME': 'The official name of the toxic chemical, toxic chemical mixture, (e.g., xylene mixed isomers), or chemical category as it appears on the EPCRA Section 313 list.'}, 'TRI_FACILITY_UIC': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_UIC_IND': "Indicates that the associated UIC_NUM represents the principal underground injection code identification number (UIC ID) as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'UIC_NUM': 'The unique number assigned to a specific underground injection well under the Safe Drinking Water Act (SDWA). A facility with multiple injection wells will have multiple underground injection code identification number (UIC ID) Numbers. If the facility does not have an underground injection well regulated by the SDWA, it will not have a UIC ID number.'}, 'TRI_FACILITY': {'PREF_DESC_CATEGORY': "The EPA's preferred geographic coordinate description category. Describes the category of feature referenced by the latitude and longitude.", 'ASGN_PARTIAL_IND': "Indicates that the facility reports by establishment or groups of establishments as assigned by TRI from Form R submisions. Partial facilities may have more than one submission for the same chemical in one reporting year. Values: 0 = 'Entire facility', 1 = 'Partial facility'.", 'FACILITY_NAME': 'The name of the facility or establishment for which the form was submitted. For purposes of TRI a "facility" is generally considered to be all buildings and equipment owned or operated by a company on a single piece of property. The facility may be only one building in an industrial park or it may be a large complex covering many acres. At some larger facilities there may be several different businesses that are all run by the same company. These different businesses are referred to as "establishments." Generally, a company will submit one Form R for the entire facility. A facility may choose, however, to submit a Form R for each establishment separately. The name in this section will either be the name used for the entire facility or the name of the specific establishment, depending on how the facility chooses to report.', 'STATE_COUNTY_FIPS_CODE': 'Combination of the two-letter state abbreviation and the county code.', 'MAIL_STATE_ABBR': 'The state abbreviation the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the State box.', 'MAIL_ZIP_CODE': 'The zip code the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the Zip Code box.', 'CITY_NAME': 'The city where the facility or establishment is physically located.', 'MAIL_COUNTRY': 'The country the facility or establishment uses to receive mail.', 'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'PREF_HORIZONTAL_DATUM': "The EPA's preferred geographic coordinate horizontal datum. Reference datum of the latitude and longitude.", 'FAC_CLOSED_IND': "A flag that indicates whether a facility is open (value =' 0'), closed (value = '1'), or inactive for TRI (value = '2').", 'FAC_LONGITUDE': "The series of numbers which identifies the exact physical location of the facility as a measure of the arc or portion of the earth's equator between the meridian of the center of the facility and the prime meridian. The right-justified value is stored as degrees, minutes and seconds (0DDDMMSS). Tenths of seconds are not stored. The value is negative for locations in the Western hemisphere.", 'MAIL_STREET_ADDRESS': 'The address the facility or establishment uses for receiving mail. Form R instructs the submitter to enter the address used for mail only if different than in the Street box. The TRIS database stores the address from the Street box (STREET_ADDRESS) in MAILING_STREET_ADDRESS even when the facility Mailing address is not different.', 'STATE_ABBR': 'The state abbreviation where the facility or establishment is physically located.', 'COUNTY_NAME': 'The standardized name of the county where the facility is located.', 'FAC_LATITUDE': "The series of numbers that identifies the exact physical location of the facility as a measure of the angular distance north form the earth's equator to the center of the facility. The value is stored as degrees, minutes and seconds (0DDMMSS), and the first position is zero-filled. The value is positive for locations north of the equator.", 'PREF_LATITUDE': "The EPA's preferred geographic latitude estimation of the reporting facility. Value for latitude is in decimal degrees. This is a signed field.", 'PREF_COLLECT_METH': "The EPA's preferred geographic coordinate collection method code for the reporting facility. Method used to determine the latitude and longitude.", 'ASGN_PUBLIC_PHONE': 'The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON), as assigned by TRI from Form R submissions.', 'PREF_ACCURACY': "The EPA's preferred geographic coordinate accuracy estimation for the reporting facility. Describes the accuracy value as a range (+/) in meters of the latitude and longitude.", 'ASGN_FEDERAL_IND': "An identifier that indicates the ownership status of a facility. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: C = 'Commercial', F = 'Federal facility', and G = 'Government owned/contractor operated' (GOCO).", 'ASGN_AGENCY': 'An abbreviation for the name of the agency supported by a federal or Government Owned/Contractor Operated (GOCO) reporting site.', 'MAIL_PROVINCE': 'The province the facility or establishment uses to receive mail. A facility may receive mail at an address outside of the United States. The province field gives a facility the flexibility needed to enter a correct mailing address outside the United States.', 'PREF_LONGITUDE': "The EPA's preferred geographic longitude estimation of the reporting facility. Value for longitude is in decimal degrees. This is a signed field.", 'STREET_ADDRESS': 'The street address for the physical location of the facility or establishment.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.', 'MAIL_NAME': 'The name which the facility or establishment uses for receiving mail if the address used for mail is different than in the Street box. This may or may not be the same as the name listed in the Facility or Establishment Name box.', 'PREF_SOURCE_SCALE': "The EPA's preferred geographic coordinate source map scale code. This is the scale of the source used to determine the latitude and longitude.", 'MAIL_CITY': 'The city the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the City box.', 'PARENT_CO_NAME': "Name of the corporation or other business company that is the ultimate parent company, located in the United States, of the facility or establishment submitting the data. The parent company is the company that directly owns at least 50 percent of the voting stock of the reporting company. This does not include foreign parent companies. 'NA' indicates that the facility does not have a parent company.", 'PREF_QA_CODE': 'Contains the results of four quality assurance tests (Test 1 through Test 4 below) used to determine facility location. "ZIP Code Bounding Box" is a rectangle generated from the ZIP Code boundaries, which is defined by the extreme north-south latitude and east-west longitudes, plus 1 kilometer (km) in each direction. The quality assurance tests are:', 'FRS_ID': 'A unique code used to identify the facility in the Facility Registry System (FRS). Note: The column will be populated in the future when values have been established.', 'PARENT_CO_DB_NUM': "The number which has been assigned to the parent company by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all parent companies will have a Dun & Bradstreet number. 'NA' indicates that the facility or establishment's parent company does not have a Dun & Bradstreet number.", 'ASGN_PUBLIC_CONTACT': 'The name of the individual who may be contacted by the general public with questions regarding the company and the information reported to TRI as assigned by TRI from Form R submissions.. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.', 'REGION': 'The EPA region in which the facility is located.'}, 'TRI_SUBMISSION_SIC': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'SIC_SEQUENCE_NUM': "The sequence of the facility's Standard Industrial Classification (SIC) code as entered on Form R or Form A.", 'SIC_CODE': 'The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.', 'PRIMARY_IND': "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."}, 'TRI_RECYCLING_PROCESS': {'ONSITE_RECYCLING_PROC_CODE': 'Indicates the specific on-site recycling method or methods applied to the toxic chemical. Similar to section 7B and unlike section 7A, on-site recycling under section 7C refers only to recycling activities directed at the specific toxic chemical being reported, not all recycling methods applied to the waste stream. Section 7C is not completed unless the specific toxic chemical being reported is recovered from the waste stream for reuse.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.'}, 'TRI_ENERGY_RECOVERY': {'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'ONSITE_ENERGY_PROC_CODE': 'Code for the specific energy recovery method applied to the toxic chemical. Unlike section 7A which includes all treatment methods applied to the waste stream, the energy recovery must be directed at the specific toxic chemical being reported. This means that the toxic chemical must have significant heating value. Section 7B should not be used for chemicals that do not have significant heating values such as metals. Values: U01 = Industrial Kiln, U02 = Industrial Furnace, U03 = Industrial Boiler, U09 = Other Energy Recovery Methods, NA = not applicable, no on-site energy recovery applied to the toxic chemical.'}, 'TRI_FACILITY_DB': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_DB_IND': "Indicates that the associated DB_NUM represents the principal Dun & Bradstreet number assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'DB_NUM': 'The number or numbers which have been assigned to the facility by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all facilities will have Dun & Bradstreet numbers.'}, 'TRI_CHEM_ACTIVITY': {'REACTANT': "Indicates the toxic chemical is used in chemical reactions to create another chemical substance or product that is then sold or otherwise distributed to other facilities. Some examples of reactants include feedstocks, raw materials, intermediates, and initiators. Values: 1 = 'Yes', 0 = 'No'.", 'MANUFACTURE_AID': "Indicates the toxic chemical is used to aid in the manufacturing process but does not come into contact with the product during manufacture. Some examples include valve lubricants, refrigerants, metalworking fluids, coolants, and hydraulic fluids. Values: 1 = 'Yes', 0 = 'No'.", 'IMPORTED': "Indicates the toxic chemical was imported into the Customs Territory of the United States by the facility. This includes the facility directly importing the toxic chemical or specifically requesting a broker or other party to obtain the toxic chemical from a foreign source. The Customs Territory of the United States includes the 50 States, Guam, Puerto Rico, American Samoa, and the U.S. Virgin Islands. Values: 1 = 'Yes', 0 = 'No'.", 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'USED_PROCESSED': "Indicates the toxic chemical was produced or imported by the facility and then further processed or otherwise used at the same facility. If this box is checked, at least one box in section 3.2 or section 3.3 will be checked. Values: 1 = 'Yes', 0 = 'No'.", 'PRODUCE': "Indicates the toxic chemical was created by the facility. A toxic chemical is considered manufactured even if the toxic chemical is created unintentionally or exists only for a short period of time. Values: 1 = 'Yes', 0 = 'No'.", 'FORMULATION_COMPONENT': "Indicates the toxic chemical is used as an ingredient in a product mixture to enhance performance of the product during its use, such as dyes in ink, solvents in paint, additions, reaction diluents, initiators, inhibitors, emulsifiers, surfactants, lubricants, flame retardants, and rheological modifiers. Values: 1 = 'Yes', 0 = 'No'.", 'MANUFACTURE_IMPURITY': "Indicator that shows whether the facility produces the reported chemical as a result of the manufacture, processing, or otherwise use of another chemical, but does not separate the chemical and it remains primarily in the mixture or product with that other chemical. Values: 1 = 'Yes', 0 = 'No'.", 'CHEM_PROCESSING_AID': "Indicates the toxic chemical is used to aid in the manufacture or synthesis of another chemical substance such that it comes into contact with the product during manufacture, but is not intended to remain with or become part of the final product or mixture. Some examples of chemical processing aids are process solvents, catalysts, solution buffers, inhibitors, and reaction terminators. Values: 1 = 'Yes', 0 = 'No'.", 'BYPRODUCT': "Indicates the toxic chemical is produced coincidentally during the manufacture, process, or otherwise use of another chemical substance or mixture and, following its production, is separated from that other chemical substance or mixture. This includes toxic chemicals that may be created as the result of waste management. Values: 1 = 'Yes', 0 = 'No'.", 'ANCILLARY': "Indicates the toxic chemical is used at the facility for purposes other than as a manufacturing aid or chemical processing aid, such as cleaners, degreasers, lubricants, fuels, toxic chemicals used for treating wastes, and toxic chemicals used to treat water at the facility. Values: 1 = 'Yes', 0 = 'No'.", 'REPACKAGING': "Indicates the toxic chemical has been received by the facility and subsequently prepared for distribution into commerce in a different form, state, or quantity than it was received, such as petroleum being transferred from a storage tank to tanker trucks. Values: 1 = 'Yes', 0 = 'No'.", 'ARTICLE_COMPONENT': "Indicates the toxic chemical becomes an integral part of an article distributed into commerce, such as copper in wire or resins in a plastic pen, or the pigment components of paint applied to a chair that is sold. Values: 1 = 'Yes', 0 = 'No'.", 'PROCESS_IMPURITY': "Indicator that shows whether the facility processed the reported chemical but did not separate it and it remains as an impurity in the primary the mixture or trade name product. Values: 1 = 'Yes', 0 = 'No'.", 'SALE_DISTRIBUTION': "Indicates the toxic chemical was produced or imported by the facility specifically to be sold or distributed to other outside facilities. Values: 1 = 'Yes', 0 = 'No'."}, 'TRI_COUNTY': {'COUNTY_NAME': 'The standardized name of the county where the facility is located.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.'}, 'TRI_FACILITY_HISTORY': {'PREF_DESC_CATEGORY': "The EPA's preferred geographic coordinate description category. Describes the category of feature referenced by the latitude and longitude.", 'ASGN_PARTIAL_IND': "Indicates that the facility reports by establishment or groups of establishments as assigned by TRI from Form R submisions. Partial facilities may have more than one submission for the same chemical in one reporting year. Values: 0 = 'Entire facility', 1 = 'Partial facility'.", 'FACILITY_NAME': 'The name of the facility or establishment for which the form was submitted. For purposes of TRI a "facility" is generally considered to be all buildings and equipment owned or operated by a company on a single piece of property. The facility may be only one building in an industrial park or it may be a large complex covering many acres. At some larger facilities there may be several different businesses that are all run by the same company. These different businesses are referred to as "establishments." Generally, a company will submit one Form R for the entire facility. A facility may choose, however, to submit a Form R for each establishment separately. The name in this section will either be the name used for the entire facility or the name of the specific establishment, depending on how the facility chooses to report.', 'STATE_COUNTY_FIPS_CODE': 'Combination of the two-letter state abbreviation and the county code.', 'MAIL_STATE_ABBR': 'The state abbreviation the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the State box.', 'MAIL_ZIP_CODE': 'The zip code the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the Zip Code box.', 'CITY_NAME': 'The city where the facility or establishment is physically located.', 'MAIL_COUNTRY': 'The country the facility or establishment uses to receive mail.', 'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'PREF_HORIZONTAL_DATUM': "The EPA's preferred geographic coordinate horizontal datum. Reference datum of the latitude and longitude.", 'FAC_LONGITUDE': "The series of numbers which identifies the exact physical location of the facility as a measure of the arc or portion of the earth's equator between the meridian of the center of the facility and the prime meridian. The right-justified value is stored as degrees, minutes and seconds (0DDDMMSS). Tenths of seconds are not stored. The value is negative for locations in the Western hemisphere.", 'MAIL_STREET_ADDRESS': 'The address the facility or establishment uses for receiving mail. Form R instructs the submitter to enter the address used for mail only if different than in the Street box. The TRIS database stores the address from the Street box (STREET_ADDRESS) in MAILING_STREET_ADDRESS even when the facility Mailing address is not different.', 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.', 'STATE_ABBR': 'The state abbreviation where the facility or establishment is physically located.', 'COUNTY_NAME': 'The standardized name of the county where the facility is located.', 'FAC_LATITUDE': "The series of numbers that identifies the exact physical location of the facility as a measure of the angular distance north form the earth's equator to the center of the facility. The value is stored as degrees, minutes and seconds (0DDMMSS), and the first position is zero-filled. The value is positive for locations north of the equator.", 'PREF_LATITUDE': "The EPA's preferred geographic latitude estimation of the reporting facility. Value for latitude is in decimal degrees. This is a signed field.", 'PREF_COLLECT_METH': "The EPA's preferred geographic coordinate collection method code for the reporting facility. Method used to determine the latitude and longitude.", 'ASGN_PUBLIC_PHONE': 'The phone number to reach the person identified in the Public Contact Name box (PUBLIC_CONTACT_PERSON), as assigned by TRI from Form R submissions.', 'PREF_ACCURACY': "The EPA's preferred geographic coordinate accuracy estimation for the reporting facility. Describes the accuracy value as a range (+/) in meters of the latitude and longitude.", 'ASGN_FEDERAL_IND': "An identifier that indicates the ownership status of a facility. A Federal facility is a facility owned or operated by the Federal government. This includes facilities that are operated by contractors to the Federal government (i.e., a facility where the land is owned by the Federal government but a private company is under contract to run the facility's operations). The types of Federal facilities that report to TRI are broader than the types of private sector facilities that report to TRI (e.g., DOD military bases). Values: C = 'Commercial', F = 'Federal facility', and G = 'Government owned/contractor operated' (GOCO).", 'ASGN_AGENCY': 'An abbreviation for the name of the agency supported by a federal or Government Owned/Contractor Operated (GOCO) reporting site.', 'MAIL_PROVINCE': 'The province the facility or establishment uses to receive mail. A facility may receive mail at an address outside of the United States. The province field gives a facility the flexibility needed to enter a correct mailing address outside the United States.', 'PREF_LONGITUDE': "The EPA's preferred geographic longitude estimation of the reporting facility. Value for longitude is in decimal degrees. This is a signed field.", 'STREET_ADDRESS': 'The street address for the physical location of the facility or establishment.', 'ZIP_CODE': 'The Zone Improvement Plan (ZIP) code assigned by the U.S. Postal Service as part of the address of a facility.', 'MAIL_NAME': 'The name which the facility or establishment uses for receiving mail if the address used for mail is different than in the Street box. This may or may not be the same as the name listed in the Facility or Establishment Name box.', 'PREF_SOURCE_SCALE': "The EPA's preferred geographic coordinate source map scale code. This is the scale of the source used to determine the latitude and longitude.", 'MAIL_CITY': 'The city the facility or establishment uses to receive mail. This may or may not be the same as the information reported in the City box.', 'PARENT_CO_NAME': "Name of the corporation or other business company that is the ultimate parent company, located in the United States, of the facility or establishment submitting the data. The parent company is the company that directly owns at least 50 percent of the voting stock of the reporting company. This does not include foreign parent companies. 'NA' indicates that the facility does not have a parent company.", 'PREF_QA_CODE': 'Contains the results of four quality assurance tests (Test 1 through Test 4 below) used to determine facility location. "ZIP Code Bounding Box" is a rectangle generated from the ZIP Code boundaries, which is defined by the extreme north-south latitude and east-west longitudes, plus 1 kilometer (km) in each direction. The quality assurance tests are:', 'PARENT_CO_DB_NUM': "The number which has been assigned to the parent company by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all parent companies will have a Dun & Bradstreet number. 'NA' indicates that the facility or establishment's parent company does not have a Dun & Bradstreet number.", 'ASGN_PUBLIC_CONTACT': 'The name of the individual who may be contacted by the general public with questions regarding the company and the information reported to TRI as assigned by TRI from Form R submissions.. This person may or may not be familiar with the information provided in the form but has been designated by the facility or establishment to handle public inquiries.', 'REGION': 'The EPA region in which the facility is located.'}, 'TRI_SUBMISSION_NAICS': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'NAICS_CODE': 'The North American Industry Classification System (NAICS) Codes(s) that best describe the business activities conducted ata facility or establishment. NAICS codes are 6 digit numbers used by the Bureau of Census as part of a system to categorizeand track the types of business activities conducted in the United States. ', 'NAICS_SEQUENCE_NUM': "The sequence of the facility's North American Industry Classification System (NAICS) code as entered in section 4.5 of part I of the Form R or Form A.", 'PRIMARY_IND': "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."}, 'TRI_WATER_STREAM': {'WATER_SEQUENCE_NUM': 'Sequence in which a release to water is reported on a Form R submission.', 'STREAM_NAME': 'The name of the stream, river, lake, or other water body to which the chemical is discharged. The name is listed as it appears on the NPDES permit, or, if the facility does not have a NPDES permit, as the water body is publicly known. This is not a list of all streams through which the toxic chemical flows but is a list of direct discharges. If more than one name is listed on form R, the facility has a separate discharge to each water body listed.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'STORM_WATER_PERCENT': 'The amount of the release, by weight percent, to water bodies, that came from stormwater runoff. This figure is only required when data are available.', 'STORM_WATER_NA': "Indicates that 'NA' (Not Applicable) was entered on Form R for the percent of a release that came from stormwater runoff. Values: 1 = 'Yes', 0 = 'No'."}, 'TRI_SOURCE_REDUCT_QTY': {'ENERGY_OFFSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) for which the report was submitted. This includes all amounts of the toxic chemical that were intended to be recovered for energy and were sent offsite for that purpose. This figure includes all transfers offsite reported in section 6.2 which are classified with an energy recovery code. This does not include quantities of the toxic chemical that are combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81A_CURR_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste expected to be burned for energy recovery onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This should not include quantities of the toxic chemical that will be combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'RECYC_OFFSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be sent offsite for recycling during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'RECYC_ONSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical recycled onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually recovered for reuse, not the total amount of the toxic chemical in the wastestream entering recycling units onsite. This amount does not include quantities of the toxic chemical that were recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'RECYC_ONSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be recycled onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81A_PREV_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, prior year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be released by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) following the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'ENERGY_OFFSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'RECYC_OFFSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical sent offsite for recycling during the calendar year (January 1 - December 31) for which the report was submitted. This includes all amounts of the toxic chemical intended to be recycled, not just the amount of the toxic chemical actually recovered. This figure includes all transfers offsite reported in section 6.2 which are classified with an recycling code. This amount does not include quantities of the toxic chemical that were transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'RECYC_ONSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'TREATED_ONSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'RECYC_ONSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical recycled onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually recovered, not the total amount of the toxic chemical in the wastestream sent for recycling activities. This amount does not include quantities of the toxic chemical that were recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81C_CURR_YR_QTY': 'The total amount of the toxic chemical released off-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.', 'REL_81C_SECD_YR_QTY': 'The total amount of the toxic chemical expected to be released off-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.', 'REL_81B_SECD_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, second following year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81D_FOLL_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, following year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81C_FOLL_YR_QTY': 'The total amount of the toxic chemical expected to be released off-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.', 'REL_81B_CURR_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'TREATED_OFFSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical sent for treatment offsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes the total amount of the toxic chemical intended to be treated (destroyed) and sent offsite for that purpose, not the amount of the toxic chemical actually treated (destroyed) by offsite processes. This figure includes all transfers offsite reported in section 6.2 which are classified with treatment waste management codes and most transfers to POTWs reported in section 6.1, except for metals. This does not include transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that were transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81C_PREV_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, prior year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical released due to production related events by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).', 'TREATED_OFFSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_OFFSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste expected to be sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) following the year for which the report was submitted. This does not include quantities of the toxic chemical that will be combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'RECYC_ONSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be recycled onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be recycled onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81B_SECD_YR_QTY': 'The total amount of the toxic chemical expected to be released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.', 'REL_81B_FOLL_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, following year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81D_CURR_YR_QTY': 'The total amount of the toxic chemical released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.', 'REL_81C_SECD_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, second following year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81C_CURR_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'ENERGY_OFFSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes all amounts of the toxic chemical that were intended to be recovered for energy and were sent offsite for that purpose. This figure includes all transfers offsite reported in section 6.2 which are classified with an energy recovery code. This does not include quantities of the toxic chemical that are combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'TREATED_ONSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81B_PREV_YR_QTY': 'The total amount of the toxic chemical released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.', 'RECYC_ONSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'RECYC_OFFSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81D_CURR_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81A_CURR_YR_QTY': 'The total amount of the toxic chemical released on-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.', 'RECYC_OFFSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be sent offsite for recycling during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'TREATED_OFFSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be sent for treatment offsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This does not include expected transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Expected transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that will be transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81D_SECD_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, second following year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'RECYC_OFFSITE_CURR_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site current year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'TREATED_OFFSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical sent for treatment offsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes the total amount of the toxic chemical intended to be treated (destroyed) and sent offsite for that purpose, not the amount of the toxic chemical actually treated (destroyed) by offsite processes. This figure includes all transfers offsite reported in section 6.2 which are classified with treatment waste management codes and most transfers to POTWs reported in section 6.1, except for metals. This does not include transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that were transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'TREATED_OFFSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_OFFSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical released due to production related events by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) for which the report was submitted. This includes both fugitive and stack air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs, because metals cannot be treated (destroyed) and will ultimately be disposed (metals reported in 6.1).', 'TREATED_ONSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical treated onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually treated (destroyed) by processes at the facility, not the total amount of the toxic chemical present in wastestreams sent to those processes. This amount does not include quantities of the toxic chemical that were treated for destruction onsite as the result of a catastrophic event,remedial action or other, one-time event not associated with production.', 'TREATED_ONSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81B_FOLL_YR_QTY': 'The total amount of the toxic chemical expected to be released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.', 'ENERGY_OFFSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81A_FOLL_YR_QTY': 'The total amount of the toxic chemical expected to be released on-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.', 'RECYC_OFFSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81D_PREV_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.D, prior year off-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'RECYC_OFFSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical sent offsite for recycling during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes all amounts of the toxic chemical intended to be recycled and sent offsite for that purpose, not just the amount of the toxic chemical actually recovered. This figure includes all transfers offsite reported in section 6.2 which are classified with a recycling code. This amount does not include quantities of the toxic chemical that were transferred offsite for recycling as the result of a catastrophic event, remedial action or other, one-time event not associated with production', 'RECYC_OFFSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled off-site following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81D_SECD_YR_QTY': 'The total amount of the toxic chemical expected to be released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.', 'REL_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be released by the facility to all environmental media both on and off site during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This includes air emissions, discharges to water bodies, underground injection, and land disposal on site (all releases reported in section 5). It also includes transfers of the toxic chemical offsite for disposal (transfers reported in section 6.2 which are classified with a disposal waste management code) and amounts of metals transferred to POTWs (metals reported in 6.1).', 'REL_81D_PREV_YR_QTY': 'The total amount of the toxic chemical released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.', 'TREATED_OFFSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be sent for treatment offsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This does not include expected transfers of metals to publicly owned treatment works (POTWs) because metals cannot be treated (destroyed) and will ultimately be disposed. Expected transfers of metals to POTWs are included in section 8.1. This amount also does not include quantities of the toxic chemical that will be transferred off-site for treatment as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81B_CURR_YR_QTY': 'The total amount of the toxic chemical released on-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the calendar year (January 1 - December 31). These mediums include fugitive and stack air emissions, discharges to water bodies, underground injection to class II-V wells, land treatment/application farming, RCRA subtitle C surface impoundments, Other surface Impoundments and Other disposals. This total does not include on-site releases or disposal due to catastrophic events.', 'TREATED_OFFSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81C_FOLL_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.C, following year off-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81B_PREV_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.B, prior year on-site releases to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'RECYC_ONSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_OFFSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery offsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_SECD_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite second following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81A_SECD_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, second following year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'REL_81D_FOLL_YR_QTY': 'The total amount of the toxic chemical expected to be released off-site due to production related events by the facility to mediums other than Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the following calendar year (January 1 - December 31). These off-site mediums include Storage Only, Solidification/Stabilization (for metals only), Wastewater Treatment (Excluding POTWs) (for metals only), Subtitle C Surface Impoundment, Other Surface Impoundment, Land Treatment, Other Land Disposal, Underground Injection to Class II-V Wells, Other off-site Management, Transfers to Waste brokers for Disposal and Unknown. This total does not include off-site releases or disposal due to catastrophic events.', 'ENERGY_ONSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste expected to be burned for energy recovery onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This should not include quantities of the toxic chemical that will be combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81A_SECD_YR_QTY': 'The total amount of the toxic chemical expected to be released on-site by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the second following calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.', 'REL_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the released following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'REL_81A_FOLL_YR_NA': "Indicates if 'NA' ('not applicable') was entered for Section 8.1.A, following year on-site releases to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills. Values: 1 = 'NA'; 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste burned for energy recovery onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually combusted in the unit, not the total amount of the toxic chemical in the wastestream sent for energy recovery. This also does not include quantities of the toxic chemical that are combusted for energy recovery onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'ENERGY_ONSITE_CURR_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste burned for energy recovery onsite during the calendar year (January 1 - December 31) for which the report was submitted. This includes only the amount of the toxic chemical actually combusted in the unit, not the total amount of the toxic chemical in the wastestream sent for energy recovery. This also does not include quantities of the toxic chemical that are combusted for energy recovery onsite as the result of a catastrophic event,remedial action or other, one-time event not associated with production.', 'TREATED_ONSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated onsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'TREATED_ONSITE_PREV_YR_QTY': 'The total amount (in pounds) of the toxic chemical treated onsite during the calendar year (January 1 - December 31) prior to the year for which the report was submitted. This includes only the amount of the toxic chemical actually treated (destroyed) by processes at the facility, not the total amount of the toxic chemical present in wastestreams sent to those processes. This amount does not include quantities of the toxic chemical that were treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81A_PREV_YR_QTY': 'The total amount of the toxic chemical released on-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). This total does not include on-site releases or disposal due to catastrophic events.', 'TREATED_ONSITE_FOLL_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be treated onsite during the calendar year (January 1 - December 31) following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'REL_81C_PREV_YR_QTY': 'The total amount of the toxic chemical released off-site due to production related events by the facility to Class I Underground Injection Wells, RCRA Subtitle C landfills, and other landfills during the prior calendar year (January 1 - December 31). This total does not include off-site releases or disposal due to catastrophic events.', 'RECYC_ONSITE_FOLL_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the recycled on-site following year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'TREATED_ONSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical expected to be treated onsite during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This amount does not include quantities of the toxic chemical that will be treated for destruction onsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.', 'TREATED_OFFSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the treated offsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_ONSITE_PREV_YR_NA': "Indicates if '0' (zero) or 'NA' ('not applicable') was entered for the energy recovery onsite previous year quantity. Values: 1 = 'NA', 0 = '0' (zero) or not 'NA'.", 'ENERGY_OFFSITE_SECD_YR_QTY': 'The total amount (in pounds) of the toxic chemical in waste expected to be sent offsite to be burned for energy recovery during the calendar year (January 1 - December 31) two years following the year for which the report was submitted. This does not include quantities of the toxic chemical that will be combusted for energy recovery offsite as the result of a catastrophic event, remedial action or other, one-time event not associated with production.'}, 'TRI_RELEASE_QTY': {'RELEASE_BASIS_EST_CODE': "The code representing the technique used to develop theestimate of releases reported in the 'Total Release' box (TOTAL_RELEASE). Thevalues are as follows:", 'WATER_SEQUENCE_NUM': 'Sequence in which a release to water is reported on a Form R submission.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'TOTAL_RELEASE': 'The total amount (in pounds) of the toxic chemical released to air, water, land, and underground injection wells during the calendar year (January 1 - December 31). Release amounts may be reported as specific numbers or as ranges (RELEASE_RANGE_CODE). Descriptions by Form R Section number for each environmental medium follow.', 'RELEASE_NA': "Indicates whether 'NA' (Not Applicable) was entered on Form R for the release estimate. Values: 1 = 'Yes', 0 = 'No'. Descriptions by Form R Section number for each environmental medium follow.", 'RELEASE_RANGE_CODE': "The code that corresponds to the amount of toxic chemical released annually by the reporting facility, reported as a range for releases less than 1,000 pounds. When a facility uses a range code, the amount reported to TRI is the midpoint of the range. On Form R, letter codes are used to represent ranges: A = 1-10 pounds, B = 11-499 pounds, and C = 500-999 pounds. The letters are converted to numbers for storage in the TRIS database where '1' represents range 'A', '3' represents range 'B', and '4' represents range 'C'. The historical value '2' = 1-499 pounds.", 'ENVIRONMENTAL_MEDIUM': 'Code indicating the environmental medium to which the toxic chemical is released from the facility.'}, 'TRI_ONSITE_WASTESTREAM': {'OPERATING_DATA_IND': "Indicates if the waste treatment efficiency estimate (TREATMENT_EFFCIENCY_EST) is based on actual operating data, such as monitoring influent and effluent toxic chemical levels in the waste stream; or, indicates if TREATMENT_EFFCIENCY_EST is not based on actual operating or monitoring data, but rather some other technique, such as published data for similar processes or the equipment supplier's literature. Values: 1 = 'Yes', 0 = 'No'', 2 = blank or not entered.", 'WASTESTREAM_CODE': 'Indicates the general waste stream type containing the toxic chemical. The four codes used to indicate the general waste stream types are: A = Gaseous (gases, vapors, airborne particles), W = Wastewater (aqueous waste), L = Liquid (non-aqueous, liquid waste), and S = Solid (including sludges and slurries).', 'WASTESTREAM_SEQ_NUM': 'Sequence in which an on-site waste treatment process is reported on a Form R submission.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'INFLUENT_CONC_RANGE': 'Indicates the range of concentration of the toxic chemical in the waste stream as it typically enters the waste treatment step or sequence. The concentration is based on the amount or mass of the toxic chemical in the waste stream as compared to the total amount or mass of the waste stream and is determined prior to the application of any waste management methods. Facilities report using one of the following five codes:', 'TREATMENT_EFFICIENCY_EST_NA': "Indicates whether 'NA' (Not Applicable) was entered on Form R for the waste treatment efficiency estimate. Values: 1 = 'Yes', 0 = 'No'.", 'SEQUENTIAL_TREAT_87_90': 'Indicator that shows whether treatment steps were used in sequence, for Reporting Years 1987 through 1990, to estimate treatment efficiency of the overall treatment process.', 'TREATMENT_EFFICIENCY_EST': 'The percentage of the toxic chemical removed from the waste stream through destruction, biological degradation, chemical conversion, or physical removal. This estimate represents the overall percentage of the toxic chemical destroyed or removed (based on amount or mass) throughout all waste management methods, not merely changes in volume or concentration and not merely the efficiency of one method in a sequence of activities. This also does not represent the waste treatment efficiency for the entire waste stream but only the removal or destruction of this specific toxic chemical in that waste stream. This does not include energy recovery or recycling activities. Energy recovery and recycling activities are reported in sections 7B and 7C, respectively. The value is calculated as follows: ((I - E)/1) * 100, where I equals the amount of toxic chemical in the influent waste stream, and E equals the amount of the toxic chemical in the effluent waste stream.', 'EFFICIENCY_RANGE_CODE': 'The range code representing the percentage of the toxic chemical removed from the waste stream through destruction, biological degradation, chemical conversion, or physical removal. This range code represents the overall percentage of the toxic chemical destroyed or removed (based on amount or mass) throughout all waste management methods, not merely changes in volume or concentration and not merely the efficiency of one method in a sequence of activities. This also does not represent the waste treatment efficiency for the entire waste stream but only the removal or destruction of this specific toxic chemical in that waste stream. This does not include energy recovery or recycling activities. Energy recovery and recycling activities are reported in sections 7B and 7C, respectively. '}, 'TRI_FACILITY_RCRA': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_RCRA_IND': "Indicates that the associated RCRA_NUM represents the principal RCRA Identification Number as assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'RCRA_NUM': 'The number assigned to the facility by EPA for purposes of the Resource Conservation and Recovery Act (RCRA). Not all facilities will have a RCRA Identification Number. A facility will only have a RCRA Identification Number if it manages RCRA regulated hazardous waste. Some facilities may have more than one RCRA Identification Number.'}, 'TRI_FACILITY_DB_HISTORY': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'ASGN_DB_IND': "Indicates that the associated DB_NUM represents the principal Dun & Bradstreet number assigned to the facility by TRI from Form R or Form A submissions. Values: 1 = 'Yes', 0 = 'No'.", 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.', 'DB_NUM': 'The number or numbers which have been assigned to the facility by Dun & Bradstreet. Dun & Bradstreet is a private financial tracking and accounting firm. Not all facilities will have Dun & Bradstreet numbers.'}, 'TRI_FACILITY_SIC': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'SIC_CODE': 'The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.', 'PRIMARY_IND': "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."}, 'TRI_ONSITE_WASTE_TREATMENT_MET': {'WASTESTREAM_SEQ_NUM': 'Sequence in which an on-site waste treatment process is reported on a Form R submission.', 'DOC_CTRL_NUM': 'DOC_CTRL_NUM is a unique identification number assigned to each submission. The format is TTYYNNNNNNNNN, where TT = document type, YY = reporting year, and NNNNNNNNN = assigned number with a check digit.', 'TREATMENT_METHOD_CODE': 'The on-site waste treatment activity that is applied to the waste stream containing the toxic chemical. This includes all waste treatment methods through which the toxic chemical passes as part of that waste stream, regardless of whether or not the method has, or is intended to have, any effect on the toxic chemical. If the waste stream moves through a series of waste treatment activities, each method will be listed sequentially.', 'TREATMENT_SEQUENCE': 'Sequence in which a TREATMENT_METHOD_CODE is entered on a Form R submission, and indicates the on-site order of treatment.'}, 'TRI_FACILITY_SIC_HISTORY': {'TRI_FACILITY_ID': 'The unique number assigned to each facility for purposes of the TRI program. Usually, only one number is assigned to each facility and the number is for the entire facility. One company may have multiple TRI Facility Identification (ID) numbers if they have multiple facilities. One facility with many establishments will usually have only one TRI Facility ID number. They will then use this number for all of their Form Rs even if they are submitting a Form R for different establishments with different names. In a few instances different establishments of the same facility will have different TRI Facility ID numbers. The format is ZZZZZNNNNNSSSSS, where ZZZZZ = ZIP code, NNNNN = the first 5 consonants of the name, and SSSSS = the first 5 non-blank non-special characters in the street address.', 'REPORTING_YEAR': 'The year for which the form was submitted. This is not the year in which the form was filed but rather it is the calendar year (January 1 - December 31) during which the toxic chemical was, manufactured, processed and/or otherwise used and released or otherwise managed as a waste.', 'SIC_CODE': 'The Standard Industrial Classification (SIC) code or codes which best describes the activities conducted at the facility. SIC codes are 4 digit numbers used by the Bureau of Census as part of a system to categorize and track the types of business activities conducted in the United States. The first two digits of the code represent the major industry group (e.g., SIC code 25XX indicates Furniture and Fixtures) and the second two digits represent the specific subset of that group (e.g., 2511 indicates wood household furniture). EPA instructs facilities to enter their primary SIC code first. Many facilities do not report their primary SIC code first.', 'PRIMARY_IND': "Indicates whether the associated SIC_CODE/NAICS_CODE represents the facility's primary business activity as entered by the submitter. EPA instructs facilities to enter their primary SIC/NAICS on the Form R or Form A in part I, section 4.5, box a. Values: 1 = 'Yes', 0 = 'No'."}} |
#Write a function that takes a two-dimensional list (list of lists) of numbers as argument and returns a list
#which includes the sum of each row. You can assume that the number of columns in each row is the same.
def sum_of_two_lists_row(list2d):
final_list = []
for list_numbers in list2d:
sum_list = 0
for number in list_numbers:
sum_list += number
final_list.append(sum_list)
return final_list
print(sum_of_two_lists([[1,2],[3,4]])) | def sum_of_two_lists_row(list2d):
final_list = []
for list_numbers in list2d:
sum_list = 0
for number in list_numbers:
sum_list += number
final_list.append(sum_list)
return final_list
print(sum_of_two_lists([[1, 2], [3, 4]])) |
"""
Given two lists Aand B, and B is an anagram of A. B is an anagram of A means B is made by randomizing the order of the elements in A.
We want to find an index mapping P, from A to B. A mapping P[i] = j means the ith element in A appears in B at index j.
These lists A and B may contain duplicates. If there are multiple answers, output any of them.
For example, given
A = [12, 28, 46, 32, 50]
B = [50, 12, 32, 46, 28]
We should return
[1, 4, 3, 2, 0]
Solution:
Create a hasb table based on B, {value_in_b, index}
"""
# time-O(n) n is the length of list A
# space-O(n)
class Solution:
def anagramMappings(self, A: List[int], B: List[int]) -> List[int]:
d = dict()
for i in range(len(B)):
d[B[i]] = i
return [d[A[i]] for i in range(len(A))]
| """
Given two lists Aand B, and B is an anagram of A. B is an anagram of A means B is made by randomizing the order of the elements in A.
We want to find an index mapping P, from A to B. A mapping P[i] = j means the ith element in A appears in B at index j.
These lists A and B may contain duplicates. If there are multiple answers, output any of them.
For example, given
A = [12, 28, 46, 32, 50]
B = [50, 12, 32, 46, 28]
We should return
[1, 4, 3, 2, 0]
Solution:
Create a hasb table based on B, {value_in_b, index}
"""
class Solution:
def anagram_mappings(self, A: List[int], B: List[int]) -> List[int]:
d = dict()
for i in range(len(B)):
d[B[i]] = i
return [d[A[i]] for i in range(len(A))] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gives the interface of the rans coder."""
class Bitstream():
"""Bitstream object that manages encoding/decoding."""
def __init__(self, scale_bits):
self.scale_bits = scale_bits
def encode_cat(self, x, probs):
raise NotImplementedError
def decode_cat(self, probs):
raise NotImplementedError
def __len__(self):
"""Should return the length of the bitstream (in bits)."""
raise NotImplementedError
| """Gives the interface of the rans coder."""
class Bitstream:
"""Bitstream object that manages encoding/decoding."""
def __init__(self, scale_bits):
self.scale_bits = scale_bits
def encode_cat(self, x, probs):
raise NotImplementedError
def decode_cat(self, probs):
raise NotImplementedError
def __len__(self):
"""Should return the length of the bitstream (in bits)."""
raise NotImplementedError |
def count_words(input_str):
return len(input_str.split())
print(count_words('this is a string'))
demo_str = 'hellow world'
print(count_words(demo_str))
def find_min(num_list):
min_item = num_list[0]
for num in num_list:
if type(num) is not str:
if min_item>= num:
min_item=num
return(min_item)
# print(find_min([1,2,3,]))
demo_list=[1,2,3,4,5,6]
print(find_min(demo_list))
mix_list=[1,2,3,'a',5,6]
print(find_min(mix_list)) | def count_words(input_str):
return len(input_str.split())
print(count_words('this is a string'))
demo_str = 'hellow world'
print(count_words(demo_str))
def find_min(num_list):
min_item = num_list[0]
for num in num_list:
if type(num) is not str:
if min_item >= num:
min_item = num
return min_item
demo_list = [1, 2, 3, 4, 5, 6]
print(find_min(demo_list))
mix_list = [1, 2, 3, 'a', 5, 6]
print(find_min(mix_list)) |
class Solution(object):
def pourWater(self, heights, V, K):
"""
:type heights: List[int]
:type V: int
:type K: int
:rtype: List[int]
"""
for i in range(V):
t = K
#left peak [0: k]
for left in range(K - 1, -1, -1):
if heights[left] < heights[t]:
t = left
if heights[left] > heights[t]:
break
if t == K:
# right peak [k + 1: ]
for right in range(K + 1, len(heights)):
if heights[right] < heights[t]:
t = right
if heights[right] > heights[t]:
break
heights[t] += 1
return heights | class Solution(object):
def pour_water(self, heights, V, K):
"""
:type heights: List[int]
:type V: int
:type K: int
:rtype: List[int]
"""
for i in range(V):
t = K
for left in range(K - 1, -1, -1):
if heights[left] < heights[t]:
t = left
if heights[left] > heights[t]:
break
if t == K:
for right in range(K + 1, len(heights)):
if heights[right] < heights[t]:
t = right
if heights[right] > heights[t]:
break
heights[t] += 1
return heights |
class InvalidInputError(Exception):
"""
This will be raised when one tries to input a type thats not in its
list of types that can be used.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message | class Invalidinputerror(Exception):
"""
This will be raised when one tries to input a type thats not in its
list of types that can be used.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message |
"""
nydus.db.backends.base
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
__all__ = ('BaseConnection',)
class BasePipeline(object):
"""
Base Pipeline class.
This basically is absolutely useless, and just provides a sample
API for dealing with pipelined commands.
"""
def __init__(self, connection):
self.pending = []
self.connection = connection
def add(self, command):
self.pending.append(command)
def execute(self):
results = {}
for command in self.pending:
results[command._ident] = command(*command._args, **command._kwargs)
return results
class BaseConnection(object):
"""
Base connection class.
Child classes should implement at least
connect() and disconnect() methods.
"""
retryable_exceptions = ()
supports_pipelines = False
def __init__(self, num, **options):
self._connection = None
self.num = num
@property
def identifier(self):
return repr(self)
@property
def connection(self):
if self._connection is None:
self._connection = self.connect()
return self._connection
def close(self):
if self._connection:
self.disconnect()
self._connection = None
def connect(self):
"""
connect() must return a connection object
"""
raise NotImplementedError
def disconnect(self):
raise NotImplementedError
def get_pipeline(self):
return BasePipeline(self)
def __getattr__(self, name):
return getattr(self.connection, name)
| """
nydus.db.backends.base
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
__all__ = ('BaseConnection',)
class Basepipeline(object):
"""
Base Pipeline class.
This basically is absolutely useless, and just provides a sample
API for dealing with pipelined commands.
"""
def __init__(self, connection):
self.pending = []
self.connection = connection
def add(self, command):
self.pending.append(command)
def execute(self):
results = {}
for command in self.pending:
results[command._ident] = command(*command._args, **command._kwargs)
return results
class Baseconnection(object):
"""
Base connection class.
Child classes should implement at least
connect() and disconnect() methods.
"""
retryable_exceptions = ()
supports_pipelines = False
def __init__(self, num, **options):
self._connection = None
self.num = num
@property
def identifier(self):
return repr(self)
@property
def connection(self):
if self._connection is None:
self._connection = self.connect()
return self._connection
def close(self):
if self._connection:
self.disconnect()
self._connection = None
def connect(self):
"""
connect() must return a connection object
"""
raise NotImplementedError
def disconnect(self):
raise NotImplementedError
def get_pipeline(self):
return base_pipeline(self)
def __getattr__(self, name):
return getattr(self.connection, name) |
def find_minimum_number_of_moves(rows, cols, start_row, start_col, end_row, end_col):
# row_low = start_row if start_row < end_row else end_row
# row_high = start_row if start_row > end_row else end_row
# col_low = start_col if start_col < end_col else end_col
# col_high = start_col if start_col > end_col else end_col
deltas = [(-2, -1), (-2, +1), (+2, -1), (+2, +1), (-1, -2), (-1, +2), (+1, -2), (+1, +2)]
def getAllValidMoves(y0, x0):
validPositions = []
for (x, y) in deltas:
xCandidate = x0 + x
yCandidate = y0 + y
if 0 <= xCandidate < end_col and 0 <= yCandidate < end_row:
validPositions.append([yCandidate, xCandidate])
return validPositions
q = [(start_row, start_col, 0)]
while q:
row, col, level = q.pop(0)
if row == end_row and col == end_col:
return level
for move in getAllValidMoves(row, col):
# if move[1] >= row_low and move[1] <= row_high or move[0] >= col_low and move[0] <= col_high:
q.append((move[0], move[1], level + 1))
return -1
| def find_minimum_number_of_moves(rows, cols, start_row, start_col, end_row, end_col):
deltas = [(-2, -1), (-2, +1), (+2, -1), (+2, +1), (-1, -2), (-1, +2), (+1, -2), (+1, +2)]
def get_all_valid_moves(y0, x0):
valid_positions = []
for (x, y) in deltas:
x_candidate = x0 + x
y_candidate = y0 + y
if 0 <= xCandidate < end_col and 0 <= yCandidate < end_row:
validPositions.append([yCandidate, xCandidate])
return validPositions
q = [(start_row, start_col, 0)]
while q:
(row, col, level) = q.pop(0)
if row == end_row and col == end_col:
return level
for move in get_all_valid_moves(row, col):
q.append((move[0], move[1], level + 1))
return -1 |
#
# subtag.py
# =========
#
# Python-3 module for loading and parsing the Language Subtag Registry
# from IANA.
#
# A current copy of the registry can be downloaded from IANA at the
# following address:
#
# https://www.iana.org/assignments/
# language-subtag-registry/language-subtag-registry
#
# The format of this registry file is defined in RFC 5646 "Tags for
# Identifying Languages"
#
# To use this module, import subtag and then call subtag.parse() with
# the path to the IANA data file. If this is successful, the result
# will be placed in the subtag.rec variable.
#
#
# Exceptions
# ----------
#
# Each exception overloads the __str__ operator so that it can be
# printed as a user-friendly error message. The error message includes
# line number information if relevant. It has punctuation at the end,
# but it does NOT have a line break at the end.
#
# All exceptions defined by this module are subclasses of SubtagError.
#
class SubtagError(Exception):
def __str__(self):
return 'Unknown subtag parsing error!'
class BadContinueLine(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Invalid location for continuation line!'
else:
return 'Invalid location for continuation line!'
class BadDataFile(SubtagError):
def __str__(self):
return 'Subtag data file has invalid UTF-8 encoding!'
class BadExtlangRemap(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'extlang record has improper remap!'
else:
return 'extlang record has improper remap!'
class BadExtlangSubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'extlang subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid extlang subtag!'
else:
return 'Record has invalid extlang subtag!'
class BadLanguageSubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Language subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid language subtag!'
else:
return 'Record has invalid language subtag!'
class BadPrefix(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Prefix ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid prefix!'
else:
return 'Record has invalid prefix!'
class BadRecordType(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record type ' + self.m_tname + ' is unrecognized!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has unrecognized type!'
else:
return 'Record has unrecognized type!'
class BadRegionSubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Region subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid region subtag!'
else:
return 'Record has invalid region subtag!'
class BadScriptSubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Script subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid script subtag!'
else:
return 'Record has invalid script subtag!'
class BadScriptSuppress(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Script suppression ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid script suppression!'
else:
return 'Record has invalid script suppression!'
class BadTagFormat(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Tag ' + self.m_tname + ' has invalid format!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid tag format!'
else:
return 'Record has invalid tag format!'
class BadVariantSubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if (self.m_line is not None) and (self.m_tname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Variant subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid variant subtag!'
else:
return 'Record has invalid variant subtag!'
class EmptyFieldName(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'One of the record field names is empty!'
else:
return 'One of the record field names is empty!'
class InvalidFieldName(SubtagError):
def __init__(self, line=None, fname=None):
self.m_line = line
self.m_fname = fname
def __str__(self):
if (self.m_line is not None) and (self.m_fname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record field ' + self.m_fname + ' has invalid field name!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has invalid field name!'
else:
return 'Record has invalid field name!'
class LogicError(SubtagError):
def __str__(self):
return 'Internal logic error within subtag module!'
class MissingKeyError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has broken foreign key!'
else:
return 'Record has broken foreign key!'
class MissingTypeError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record is missing a Type field!'
else:
return 'Record is missing a Type field!'
class MultiFieldError(SubtagError):
def __init__(self, line=None, fname=None):
self.m_line = line
self.m_fname = fname
def __str__(self):
if (self.m_line is not None) and (self.m_fname is not None):
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record field ' + self.m_fname + ' is defined more than once!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has redefined field!'
else:
return 'Record has redefined field!'
class NoColonError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + \
'Record has line without colon!'
else:
return 'Record has line without colon!'
class NoDataFileError(SubtagError):
def __init__(self, fpath=None):
self.m_fpath = fpath
def __str__(self):
if self.m_fpath is not None:
return 'Can\'t find subtag data file ' + self.m_fpath
else:
return 'Can\'t find subtag data file!'
class PrefixContextError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Prefix field can\'t be used with this kind of record!'
else:
return 'Prefix field can\'t be used with this kind of record!'
class PrefixMultiError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Multiple prefixes can\'t be used on this kind of record!'
else:
return 'Multiple prefixes can\'t be used on this kind of record!'
class RecursiveMappingError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Record has recursive remapping!'
else:
return 'Record has recursive remapping!'
class RedefinitionError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Record redefines key from previous record!'
else:
return 'Record redefines key from previous record!'
class ScriptContextError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Script suppression can\'t be used on this kind of record!'
else:
return 'Script suppression can\'t be used on this kind of record!'
class WrongTagTypeError(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + \
'Record has wrong type of tag data!'
else:
return 'Record has wrong type of tag data!'
#
# Module-level variables
# ----------------------
#
# The module-level variable that stores the result of parsing the data
# file, or None if the file hasn't been parsed yet.
#
# Use the parse() function to set this variable. Once it is set
# successfully, it will be a list of zero or more records, stored in the
# order they appear in the data file. However, the first record in the
# file will NOT be included in this list, so the first element in this
# list is actually the second record in the file. This is because the
# first record in the file is an exceptional record that simply provides
# a timestamp for when the data file was generated.
#
# Each record is a tuple with two elements. The first element is an
# integer that stores the line number of the first line of the record in
# the data file. The second element is a dictionary that maps field
# names to field values.
#
# The field names are the same as the field names given in the data
# file, except that they are converted to lowercase so that they are
# case insensitive. Field names do NOT include the colon.
#
# The field values are the same as the field values given in the data
# file. Continuation lines have been assembled so that a field value
# that appears across multiple lines in the input data file will be a
# single line in the value here, with each line break replaced by a
# single space character. The value will be trimmed of leading and
# trailing spaces, tabs, and line breaks.
#
# The fields named 'description' 'comments' and 'prefix' are special
# because multiple instances of the field is allowed in a single record.
# To handle this, the parsed field values of these fields will be a list
# of strings. If there is just one instance of the field, there will be
# a one-element list. This only applies to these three special field
# values. All other field values will be strings.
#
rec = None
#
# Local functions
# ---------------
#
# Check whether the given parameter is a string that contains a single
# lowercase ASCII letter.
#
# Parameters:
#
# c : str | mixed - the value to check
#
# Return:
#
# True if c is a lowercase ASCII letter, False otherwise
#
def is_lower_letter(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if (c >= ord('a')) and (c <= ord('z')):
return True
else:
return False
# Check whether the given parameter is a string that contains a single
# uppercase ASCII letter.
#
# Parameters:
#
# c : str | mixed - the value to check
#
# Return:
#
# True if c is a uppercase ASCII letter, False otherwise
#
def is_upper_letter(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if (c >= ord('A')) and (c <= ord('Z')):
return True
else:
return False
# Check whether the given parameter is a string that contains a single
# ASCII decimal digit.
#
# Parameters:
#
# c : str | mixed - the value to check
#
# Return:
#
# True if c is an ASCII decimal digit, False otherwise
#
def is_digit(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if (c >= ord('0')) and (c <= ord('9')):
return True
else:
return False
# Check whether the given parameter is a string that contains a validly
# formatted tag.
#
# Parameters:
#
# t : str | mixed - the value to check
#
# Return:
#
# True if t is a validly formatted tag, False otherwise
#
def is_format_tag(t):
# Check that t is a string
if not isinstance(t, str):
return False
# Check that t is not empty
if len(t) < 1:
return False
# Check that t contains only ASCII alphanumerics and hyphens, and
# furthermore that hyphen is neither first nor last character, nor
# does a hyphen ever occur immediately after another hyphen
tl = len(t)
for x in range(0, tl):
c = t[x]
if (not is_upper_letter(c)) and \
(not is_lower_letter(c)) and \
(not is_digit(c)) and \
(c != '-'):
return False
if c == '-':
if (x < 1) or (x >= tl - 1):
return False
if t[x - 1] == '-':
return False
# Split the tag into subtags using hyphen as separator
ta = t.split('-')
# Check subtag formatting
first_tag = True
found_singleton = False
for tg in ta:
# If this is the first tag, then just check that it doesn't have any
# uppercase letters, clear first_tag, set found_singleton if the
# first tag is only one character, and skip rest of checks
if first_tag:
first_tag = False
for c in tg:
if is_upper_letter(c):
return False
if len(tg) <= 1:
found_singleton = True
continue
# If we've encountered a singleton, then just make sure there are no
# uppercase letters and skip rest of checks
if found_singleton:
for c in tg:
if is_upper_letter(c):
return False
continue
# Different handling depending on length of subtag
if len(tg) < 2:
# Found a singleton, so set found_singleton flag and make sure not
# an uppercase letter
if is_upper_letter(tg):
return False
found_singleton = True
elif len(tg) == 2:
# Two-character subtag that is not first subtag and not after a
# singleton, so must not have lowercase letters
for c in tg:
if is_lower_letter(c):
return False
elif len(tg) == 4:
# Four-character subtag that is not first subtag and not after a
# singleton, so first character must not be lowercase and rest of
# characters must not be uppercase
if is_lower_letter(tg[0]) or \
is_upper_letter(tg[1]) or \
is_upper_letter(tg[2]) or \
is_upper_letter(tg[3]):
return False
else:
# In all other cases, do not allow uppercase letters
for c in tg:
if is_upper_letter(c):
return False
# If we got all the way here, tag checks out
return True
# Check whether the given parameter is a string that contains a validly
# formatted tag, without extensions or private-use or grandfathered
# formats.
#
# Parameters:
#
# t : str | mixed - the value to check
#
# Return:
#
# True if t is a validly formatted core tag, False otherwise
#
def is_core_tag(t):
# If not a formatted tag, then return False
if not is_format_tag(t):
return False
# Split the tag into subtags using hyphen as separator
ta = t.split('-')
# Make sure there are no singletons or private use flags
for tg in ta:
if len(tg) < 2:
return False
# If we got here, tag checks out
return True
# Check whether the given parameter is a string that is a case-sensitive
# match for one of the valid category names.
#
# Parameters:
#
# cname : str | mixed - the value to check
#
# Return:
#
# True if value is a string that is recognized, False otherwise
#
def valid_category(cname):
if not isinstance(cname, str):
return False
if (cname == 'language') or (cname == 'extlang') or \
(cname =='script') or (cname == 'region') or \
(cname == 'variant') or (cname == 'grandfathered') or \
(cname == 'redundant'):
return True
else:
return False
# Check whether the given string value has leading or trailing padding.
#
# This returns True if the first or last character is a space, tab, or
# line break. Otherwise, it returns False. Empty strings return False.
# Non-strings cause an exception.
#
# Parameters:
#
# s : str - the string value to check
#
# Return:
#
# True if value is padded, False if not
#
def has_padding(s):
# Check parameter
if not isinstance(s, str):
raise LogicError()
# Empty strings return False
if len(s) < 1:
return False
# Check first and last character
for x in range(0, 2):
# Get appropriate character
c = None
if x == 0:
c = s[0]
elif x == 1:
c = s[-1]
else:
raise LogicError() # shouldn't happen
# Check that character is not space, tab, or line break
if (c == ' ') or (c == '\t') or (c == '\n'):
return True
# If we got here, string is not padded
return False
# Check that a parsed record conforms to various expectations.
#
# Exceptions are thrown if there are problems with the record. The
# LogicError exception is used for situations that should never be
# possible from any input data.
#
# Parameters:
#
# lnum : int - a line number, greater than zero, at which the record
# starts, which is used for error reporting
#
# f : dict - maps lowercased record field names to their values
#
def check_record(lnum, f):
# Check parameters
if not isinstance(lnum, int):
raise LogicError()
if lnum < 1:
raise LogicError()
if not isinstance(f, dict):
raise LogicError()
# Main check of all keys and values in dictionary
for k in list(f):
# Each key must be a string
if not isinstance(k, str):
raise LogicError()
# Each key should be non-empty
if len(k) < 1:
raise EmptyFieldName(lnum)
# Each key must be non-padded
if has_padding(k):
raise LogicError()
# Each key must be only in lowercase and have at least one lowercase
# letter
if not k.islower():
raise InvalidFieldName(lnum, k)
# Each value must be a string without padding, except that
# "description" "comments" and "prefix" field values must be
# non-empty lists of strings without padding
val = f[k]
if (k == 'description') or (k == 'comments') or (k == 'prefix'):
# Value must be non-empty list of strings without padding
if not isinstance(val, list):
raise LogicError()
if len(val) < 1:
raise LogicError()
for e in val:
if not isinstance(e, str):
raise LogicError()
if has_padding(e):
raise LogicError()
else:
# Value must be string without padding
if not isinstance(val, str):
raise LogicError()
if has_padding(val):
raise LogicError()
# All records must have a "type" field that is one of the recognized
# categories
if 'type' not in f:
raise MissingTypeError(lnum)
if not valid_category(f['type']):
raise BadRecordType(lnum, f['type'])
# Grandfathered or redundant records must have a "tag" field but not a
# "subtag" field, while all other records must have a "subtag" field
# but not a "tag" field
if (f['type'] == 'grandfathered') or (f['type'] == 'redundant'):
# Must have tag field but not subtag
if ('tag' not in f) or ('subtag' in f):
raise WrongTagTypeError(lnum)
else:
# Must have subtag field but not tag
if ('subtag' not in f) or ('tag' in f):
raise WrongTagTypeError(lnum)
# If this is a subtag record, check the subtag value format
if 'subtag' in f:
ft = f['type']
sv = f['subtag']
if ft == 'language':
# Languages must be two or three lowercase ASCII letters (language
# tags that are longer are not used in practice); the only
# exception is 8-character language ranges where the first three
# chars are lowercase letters, the last three chars are lowercase
# letters, and the middle two chars are ".."
if ((len(sv) < 2) or (len(sv) > 3)) and (len(sv) != 8):
raise BadLanguageSubtag(lnum, sv)
if len(sv) == 8:
if (not is_lower_letter(sv[0])) or \
(not is_lower_letter(sv[1])) or \
(not is_lower_letter(sv[2])) or \
(sv[3] != '.') or (sv[4] != '.') or \
(not is_lower_letter(sv[5])) or \
(not is_lower_letter(sv[6])) or \
(not is_lower_letter(sv[7])):
raise BadLanguageSubtag(lnum, sv)
else:
for c in sv:
if not is_lower_letter(c):
raise BadLanguageSubtag(lnum, sv)
elif ft == 'extlang':
# extlang subtags must be three lowercase ASCII letters
if len(sv) != 3:
raise BadExtlangSubtag(lnum, sv)
for c in sv:
if not is_lower_letter(c):
raise BadExtlangSubtag(lnum, sv)
elif ft == 'script':
# Script subtags must be four ASCII letters, the first of which is
# uppercase and the rest of which are lowercase; the only
# exception is 10-character script subtag ranges, where the first
# four letters are a valid script tag, the last four letters are a
# valid script subtag, and the middle two characters are ".."
if len(sv) == 4:
if (not is_upper_letter(sv[0])) or \
(not is_lower_letter(sv[1])) or \
(not is_lower_letter(sv[2])) or \
(not is_lower_letter(sv[3])):
raise BadScriptSubtag(lnum, sv)
elif len(sv) == 10:
if (not is_upper_letter(sv[0])) or \
(not is_lower_letter(sv[1])) or \
(not is_lower_letter(sv[2])) or \
(not is_lower_letter(sv[3])) or \
(sv[4] != '.') or (sv[5] != '.') or \
(not is_upper_letter(sv[6])) or \
(not is_lower_letter(sv[7])) or \
(not is_lower_letter(sv[8])) or \
(not is_lower_letter(sv[9])):
raise BadScriptSubtag(lnum, sv)
else:
raise BadScriptSubtag(lnum, sv)
elif ft == 'region':
# Region subtags must be two uppercase ASCII letters or three
# ASCII digits or they must be a range
if len(sv) == 2:
if (not is_upper_letter(sv[0])) or (not is_upper_letter(sv[1])):
raise BadRegionSubtag(lnum, sv)
elif len(sv) == 3:
for c in sv:
if not is_digit(c):
raise BadRegionSubtag(lnum, sv)
elif len(sv) == 6:
if (not is_upper_letter(sv[0])) or \
(not is_upper_letter(sv[1])) or \
(sv[2] != '.') or (sv[3] != '.') or \
(not is_upper_letter(sv[4])) or \
(not is_upper_letter(sv[5])):
raise BadRegionSubtag(lnum, sv)
else:
raise BadRegionSubtag(lnum, sv)
elif ft == 'variant':
# Variants must either be four lowercase ASCII alphanumerics and
# begin with a digit, or 5-8 lowercase ASCII alphanumerics
if (len(sv) < 4) or (len(sv) > 8):
raise BadVariantSubtag(lnum, sv)
if len(sv) == 4:
if not is_digit(sv[0]):
raise BadVariantSubtag(lnum, sv)
for c in sv:
if (not is_lower_letter(c)) and (not is_digit(c)):
raise BadVariantSubtag(lnum, sv)
else:
raise LogicError() # shouldn't happen
# If this is a tag record, check tag format
if 'tag' in f:
if not is_format_tag(f['tag']):
raise BadTagFormat(lnum, f['tag'])
# If this record has prefixes, additional checks
if 'prefix' in f:
# Prefixes only possible on extlang and variant records
if (f['type'] != 'extlang') and (f['type'] != 'variant'):
raise PrefixContextError(lnum)
# If this is an extlang record, no more than one prefix allowed
if f['type'] == 'extlang':
if len(f['prefix']) > 1:
raise PrefixMultiError(lnum)
# All prefix values must be two or three lowercase letters for
# extlang prefixes
if f['type'] == 'extlang':
for p in f['prefix']:
if (len(p) < 2) or (len(p) > 3):
raise BadPrefix(lnum, p)
for c in p:
if not is_lower_letter(c):
raise BadPrefix(lnum, p)
# All prefix values must be core tags for variant records
if f['type'] == 'variant':
for p in f['prefix']:
if not is_core_tag(p):
raise BadPrefix(lnum, p)
# If this record has a suppress-script, additional checks
if 'suppress-script' in f:
# Script suppression only possible on language and extlang records
if (f['type'] != 'language') and (f['type'] != 'extlang'):
raise ScriptContextError(lnum)
# Script name must be four characters, first uppercase letter and
# the rest lowercase letters
sn = f['suppress-script']
if len(sn) != 4:
raise BadScriptSuppress(lnum, sn)
if (not is_upper_letter(sn[0])) or \
(not is_lower_letter(sn[1])) or \
(not is_lower_letter(sn[2])) or \
(not is_lower_letter(sn[3])):
raise BadScriptSuppress(lnum, sn)
# Function that processes a raw record.
#
# A raw record requires an array of record lines. Trailing whitespace
# and line breaks should have been stripped from these lines already.
# Furthermore, continuation lines should be assembled so that the lines
# here are logical record lines rather than the physical record lines
# that occur in the file.
#
# The module-level rec variable must already be defined as a list. If
# successful, this function adds the parsed record on to the list.
#
# Parameters:
#
# lnum : int - a line number, greater than zero, at which the record
# starts, which is used for error reporting
#
# lines : list of strings - the logical lines of the record
#
def raw_record(lnum, lines):
global rec
# Check state
if not isinstance(rec, list):
raise LogicError()
# Check parameters
if not isinstance(lnum, int):
raise LogicError()
if lnum < 1:
raise LogicError()
if not isinstance(lines, list):
raise LogicError()
for e in lines:
if not isinstance(e, str):
raise LogicError()
# Convert each logical line into a mapping of field names in lowercase
# to field values
rp = dict()
for e in lines:
# Find the location of the first : character, which must be present
ci = e.find(':')
if ci < 0:
raise NoColonError(lnum)
# Split into a field name and a field value around the colon
fname = ''
fval = ''
if ci > 0:
fname = e[0:ci]
if ci < len(e) - 1:
fval = e[ci + 1:]
# Trim field name and field value of leading and trailing space
fname = fname.strip(' \t\n')
fval = fval.strip(' \t\n')
# Convert field name to lowercase
fname = fname.lower()
# Different handling based on whether the field name is a special
# field that can occur multiple times
if (fname == 'description') or (fname == 'comments') or \
(fname == 'prefix'):
# This field can occur multiple times, so check if already present
# and handle differently
if fname in rp:
# We already have a previous instance of this field, so just add
# the new value as another array element
rp[fname].append(fval)
else:
# We don't have a previous instance of this field, so create a
# new field entry with our value as the first element of a list
rp[fname] = [fval]
else:
# This field can only occur once, so make sure it's not already
# present
if fname in rp:
raise MultiFieldError(lnum, fname)
# Add a mapping for this field name to value
rp[fname] = fval
# We got a mapping of field names to values, so check the record and
# store the record as a pair with record line number and record fields
check_record(lnum, rp)
rec.append((lnum, rp))
#
# Public functions
# ----------------
#
# Parse the given subtag data file and store the parsed result in the
# module-level rec variable.
#
# See the module documentation and the documentation of the rec variable
# for further information.
#
# If the rec value is already set, this function call will be ignored.
#
# If the function fails, the rec value will be set to None.
#
# Parameters:
#
# fpath : string - the path to the subtag data file
#
def parse(fpath):
global rec
# Ignore call if rec already set
if rec is not None:
return
# Check parameter
if not isinstance(fpath, str):
rec = None
raise LogicError()
# Clear the records variable to an empty list
rec = []
# Open the input file as a text file in UTF-8 encoding and parse all
# the records
try:
with open(fpath, mode='rt',
encoding='utf-8', errors='strict') as fin:
# We have the input file open -- read line by line
lbuf = [] # Buffers record lines
line_num = 0 # Current line number
rec_line = 1 # Line at start of current record
for line in fin:
# Update line count
line_num = line_num + 1
# Trim trailing whitespace and linebreaks, but NOT leading
# whitespace, which is significant in case of line continuations
line = line.rstrip(' \t\n')
# Filter out blank lines that are empty or contain only spaces,
# tabs, and line breaks
if len(line) < 1:
continue
# If this line is %% then handle end of record and continue to
# next record
if line == '%%':
# If the record line of this record is 1, then just clear the
# line buffer, update the record line, and continue to next
# line without any further processing so that we skip the
# special first record
if rec_line <= 1:
lbuf = []
rec_line = line_num + 1
continue
# If we got here, we're not in the special case of the first
# record, so we want to process the raw record
raw_record(rec_line, lbuf)
# Clear the line buffer and update the record line
lbuf = []
rec_line = line_num + 1
# Continue on to next line
continue
# If the first character of this line is a tab or a space, then
# we have a continuation line, so process that and continue to
# next line
fchar = line[0]
if (fchar == ' ') or (fchar == '\t'):
# Continuation line, so this must not be first line of record
if len(lbuf) < 1:
raise BadContinueLine(line_num)
# Drop leading whitespace and replace with a single leading
# space
line = ' ' + line.lstrip(' \t')
# Add this line to the end of the last line in the record line
# buffer
lbuf[-1] = lbuf[-1] + line
# Continue on to next line
continue
# If we got here, we have a regular record line, so just add
# that to the line buffer
lbuf.append(line)
# If after the loop will still have something in the record
# buffer, flush this last record
if len(lbuf) > 0:
raw_record(rec_line, lbuf)
lbuf = []
# All records have been read in and *individually* verified; now we
# need to build indices of each record type so we can begin
# validating table consistency across all records
index_language = dict()
index_extlang = dict()
index_script = dict()
index_region = dict()
index_variant = dict()
index_grandfathered = dict()
index_redundant = dict()
# Build the indices
rlen = len(rec)
for i in range(0, rlen):
r = rec[i][1]
vt = r['type']
if vt == 'language':
vn = r['subtag']
if vn in index_language:
raise RedefinitionError(rec[i][0])
index_language[vn] = i
elif vt == 'extlang':
vn = r['subtag']
if vn in index_extlang:
raise RedefinitionError(rec[i][0])
index_extlang[vn] = i
elif vt == 'script':
vn = r['subtag']
if vn in index_script:
raise RedefinitionError(rec[i][0])
index_script[vn] = i
elif vt == 'region':
vn = r['subtag']
if vn in index_region:
raise RedefinitionError(rec[i][0])
index_region[vn] = i
elif vt == 'variant':
vn = r['subtag']
if vn in index_variant:
raise RedefinitionError(rec[i][0])
index_variant[vn] = i
elif vt == 'grandfathered':
vn = r['tag']
if vn in index_grandfathered:
raise RedefinitionError(rec[i][0])
index_grandfathered[vn] = i
elif vt == 'redundant':
vn = r['tag']
if vn in index_redundant:
raise RedefinitionError(rec[i][0])
index_redundant[vn] = i
else:
raise LogicError()
# Now we can verify the foreign keys in each record to finish
# verifying the structural integrity of the data
for rf in rec:
r = rf[1]
rt = r['type']
# If record has a suppress-script field, make sure that it
# references an existing script
if 'suppress-script' in r:
if r['suppress-script'] not in index_script:
raise MissingKeyError(rf[0])
# If we have a prefix in an extlang record, make sure it
# references a language
if ('prefix' in r) and (rt == 'extlang'):
for p in r['prefix']:
if p not in index_language:
raise MissingKeyError(rf[0])
# If we have prefixes in a variant record, check their references
if ('prefix' in r) and (rt == 'variant'):
for p in r['prefix']:
# Split prefix into components around the hyphens
pa = p.split('-')
# Make sure first component is a defined language
if pa[0] not in index_language:
raise MissingKeyError(rf[0])
# Start at next component (if there is one) and proceed until
# all components checked
i = 1
pt = 'extlang'
while i < len(pa):
if pt == 'extlang':
# Check any extlang tags
if (len(pa[i]) == 3) and is_lower_letter(pa[i][0]):
if pa[i] in index_extlang:
i = i + 1
pt = 'script'
else:
raise MissingKeyError(rf[0])
else:
pt = 'script'
elif pt == 'script':
# Check any script tags
if (len(pa[i]) == 4) and is_upper_letter(pa[i][0]):
if pa[i] in index_script:
i = i + 1
pt = 'region'
else:
raise MissingKeyError(rf[0])
else:
pt = 'region'
elif pt == 'region':
# Check any region tags
if (len(pa[i]) == 2) or \
((len(pa[i]) == 3) and is_digit(pa[i][0])):
if pa[i] in index_region:
i = i + 1
pt = 'variant'
else:
raise MissingKeyError(rf[0])
else:
pt = 'variant'
elif pt == 'variant':
# Check any variant tags
if ((len(pa[i]) == 4) and is_digit(pa[i][0])) or \
(len(pa[i]) > 4):
if pa[i] in index_variant:
i = i + 1
else:
raise MissingKeyError(rf[0])
else:
raise MissingKeyError(rf[0])
else:
raise LogicError()
# If we have a preferred-value mapping, check that it references
# a record, and that the referenced record does not itself have a
# preferred value
if 'preferred-value' in r:
pv = r['preferred-value']
if rt == 'language':
# Language must refer to an existing language
if pv not in index_language:
raise MissingKeyError(rf[0])
# Referenced language must not have preferred value
if 'preferred-value' in rec[index_language[pv]][1]:
raise RecursiveMappingError(rf[0])
elif rt == 'script':
# Script must refer to an existing script
if pv not in index_script:
raise MissingKeyError(rf[0])
# Referenced script must not have preferred value
if 'preferred-value' in rec[index_script[pv]][1]:
raise RecursiveMappingError(rf[0])
elif rt == 'region':
# Region must refer to an existing region
if pv not in index_region:
raise MissingKeyError(rf[0])
# Referenced region must not have preferred value
if 'preferred-value' in rec[index_region[pv]][1]:
raise RecursiveMappingError(rf[0])
elif rt == 'variant':
# Variant must refer to an existing variant
if pv not in index_variant:
raise MissingKeyError(rf[0])
# Referenced variant must not have preferred value
if 'preferred-value' in rec[index_variant[pv]][1]:
raise RecursiveMappingError(rf[0])
elif rt == 'extlang':
# extlang must refer to an existing language
if pv not in index_language:
raise MissingKeyError(rf[0])
# Referenced language must not have preferred value
if 'preferred-value' in rec[index_language[pv]][1]:
raise RecursiveMappingError(rf[0])
elif rt == 'grandfathered':
# Grandfathered records must map to language that doesn't have
# its own preferred mapping; the weird en-GB-oxendict
# preferred value is an exception
if pv in index_language:
if 'preferred-value' in rec[index_language[pv]][1]:
raise RecursiveMappingError(rf[0])
elif pv == 'en-GB-oxendict':
if 'en' not in index_language:
raise MissingKeyError(rf[0])
if 'GB' not in index_region:
raise MissingKeyError(rf[0])
if 'oxendict' not in index_variant:
raise MissingKeyError(rf[0])
if ('preferred-value' in rec[index_language['en']][1]) or \
('preferred-value' in rec[index_region['GB']][1]) or \
('preferred-value' in \
rec[index_variant['oxendict']][1]):
raise RecursiveMappingError(rf[0])
else:
raise MissingKeyError(rf[0])
elif rt == 'redundant':
# Redundant mappings must refer to existing language that is
# not itself remapped, except for cmn-Hans and cmn-Hant
if pv in index_language:
if 'preferred-value' in rec[index_language[pv]][1]:
raise RecursiveMappingError(rf[0])
elif pv == 'cmn-Hans':
if 'cmn' not in index_language:
raise MissingKeyError(rf[0])
if 'Hans' not in index_script:
raise MissingKeyError(rf[0])
if ('preferred-value' in rec[index_language['cmn']][1]) or \
('preferred-value' in rec[index_script['Hans']][1]):
raise RecursiveMappingError(rf[0])
elif pv == 'cmn-Hant':
if 'cmn' not in index_language:
raise MissingKeyError(rf[0])
if 'Hant' not in index_script:
raise MissingKeyError(rf[0])
if ('preferred-value' in rec[index_language['cmn']][1]) or \
('preferred-value' in rec[index_script['Hant']][1]):
raise RecursiveMappingError(rf[0])
else:
raise MissingKeyError(rf[0])
else:
raise LogicError()
# If record is for an extlang, make sure it has a preferred-value
# and that the preferred-value (language) is equal to the extlang
# subtag
if rt == 'extlang':
if 'preferred-value' not in r:
raise BadExtlangRemap(rf[0])
if r['preferred-value'] != r['subtag']:
raise BadExtlangRemap(rf[0])
except FileNotFoundError:
rec = None
raise NoDataFileError(fpath)
except ValueError:
rec = None
raise BadDataFile()
except SubtagError as se:
rec = None
raise se
except Exception as exc:
rec = None
raise SubtagError() from exc
| class Subtagerror(Exception):
def __str__(self):
return 'Unknown subtag parsing error!'
class Badcontinueline(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + 'Invalid location for continuation line!'
else:
return 'Invalid location for continuation line!'
class Baddatafile(SubtagError):
def __str__(self):
return 'Subtag data file has invalid UTF-8 encoding!'
class Badextlangremap(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'extlang record has improper remap!'
else:
return 'extlang record has improper remap!'
class Badextlangsubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'extlang subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid extlang subtag!'
else:
return 'Record has invalid extlang subtag!'
class Badlanguagesubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Language subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid language subtag!'
else:
return 'Record has invalid language subtag!'
class Badprefix(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Prefix ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid prefix!'
else:
return 'Record has invalid prefix!'
class Badrecordtype(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record type ' + self.m_tname + ' is unrecognized!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has unrecognized type!'
else:
return 'Record has unrecognized type!'
class Badregionsubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Region subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid region subtag!'
else:
return 'Record has invalid region subtag!'
class Badscriptsubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Script subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid script subtag!'
else:
return 'Record has invalid script subtag!'
class Badscriptsuppress(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Script suppression ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid script suppression!'
else:
return 'Record has invalid script suppression!'
class Badtagformat(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Tag ' + self.m_tname + ' has invalid format!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid tag format!'
else:
return 'Record has invalid tag format!'
class Badvariantsubtag(SubtagError):
def __init__(self, line=None, tname=None):
self.m_line = line
self.m_tname = tname
def __str__(self):
if self.m_line is not None and self.m_tname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Variant subtag ' + self.m_tname + ' is invalid!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid variant subtag!'
else:
return 'Record has invalid variant subtag!'
class Emptyfieldname(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'One of the record field names is empty!'
else:
return 'One of the record field names is empty!'
class Invalidfieldname(SubtagError):
def __init__(self, line=None, fname=None):
self.m_line = line
self.m_fname = fname
def __str__(self):
if self.m_line is not None and self.m_fname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record field ' + self.m_fname + ' has invalid field name!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has invalid field name!'
else:
return 'Record has invalid field name!'
class Logicerror(SubtagError):
def __str__(self):
return 'Internal logic error within subtag module!'
class Missingkeyerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has broken foreign key!'
else:
return 'Record has broken foreign key!'
class Missingtypeerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record is missing a Type field!'
else:
return 'Record is missing a Type field!'
class Multifielderror(SubtagError):
def __init__(self, line=None, fname=None):
self.m_line = line
self.m_fname = fname
def __str__(self):
if self.m_line is not None and self.m_fname is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record field ' + self.m_fname + ' is defined more than once!'
elif self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has redefined field!'
else:
return 'Record has redefined field!'
class Nocolonerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag record at line ' + str(self.m_line) + ': ' + 'Record has line without colon!'
else:
return 'Record has line without colon!'
class Nodatafileerror(SubtagError):
def __init__(self, fpath=None):
self.m_fpath = fpath
def __str__(self):
if self.m_fpath is not None:
return "Can't find subtag data file " + self.m_fpath
else:
return "Can't find subtag data file!"
class Prefixcontexterror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + "Prefix field can't be used with this kind of record!"
else:
return "Prefix field can't be used with this kind of record!"
class Prefixmultierror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + "Multiple prefixes can't be used on this kind of record!"
else:
return "Multiple prefixes can't be used on this kind of record!"
class Recursivemappingerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + 'Record has recursive remapping!'
else:
return 'Record has recursive remapping!'
class Redefinitionerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + 'Record redefines key from previous record!'
else:
return 'Record redefines key from previous record!'
class Scriptcontexterror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + "Script suppression can't be used on this kind of record!"
else:
return "Script suppression can't be used on this kind of record!"
class Wrongtagtypeerror(SubtagError):
def __init__(self, line=None):
self.m_line = line
def __str__(self):
if self.m_line is not None:
return 'Subtag data line ' + str(self.m_line) + ': ' + 'Record has wrong type of tag data!'
else:
return 'Record has wrong type of tag data!'
rec = None
def is_lower_letter(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if c >= ord('a') and c <= ord('z'):
return True
else:
return False
def is_upper_letter(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if c >= ord('A') and c <= ord('Z'):
return True
else:
return False
def is_digit(c):
if not isinstance(c, str):
return False
if len(c) != 1:
return False
c = ord(c)
if c >= ord('0') and c <= ord('9'):
return True
else:
return False
def is_format_tag(t):
if not isinstance(t, str):
return False
if len(t) < 1:
return False
tl = len(t)
for x in range(0, tl):
c = t[x]
if not is_upper_letter(c) and (not is_lower_letter(c)) and (not is_digit(c)) and (c != '-'):
return False
if c == '-':
if x < 1 or x >= tl - 1:
return False
if t[x - 1] == '-':
return False
ta = t.split('-')
first_tag = True
found_singleton = False
for tg in ta:
if first_tag:
first_tag = False
for c in tg:
if is_upper_letter(c):
return False
if len(tg) <= 1:
found_singleton = True
continue
if found_singleton:
for c in tg:
if is_upper_letter(c):
return False
continue
if len(tg) < 2:
if is_upper_letter(tg):
return False
found_singleton = True
elif len(tg) == 2:
for c in tg:
if is_lower_letter(c):
return False
elif len(tg) == 4:
if is_lower_letter(tg[0]) or is_upper_letter(tg[1]) or is_upper_letter(tg[2]) or is_upper_letter(tg[3]):
return False
else:
for c in tg:
if is_upper_letter(c):
return False
return True
def is_core_tag(t):
if not is_format_tag(t):
return False
ta = t.split('-')
for tg in ta:
if len(tg) < 2:
return False
return True
def valid_category(cname):
if not isinstance(cname, str):
return False
if cname == 'language' or cname == 'extlang' or cname == 'script' or (cname == 'region') or (cname == 'variant') or (cname == 'grandfathered') or (cname == 'redundant'):
return True
else:
return False
def has_padding(s):
if not isinstance(s, str):
raise logic_error()
if len(s) < 1:
return False
for x in range(0, 2):
c = None
if x == 0:
c = s[0]
elif x == 1:
c = s[-1]
else:
raise logic_error()
if c == ' ' or c == '\t' or c == '\n':
return True
return False
def check_record(lnum, f):
if not isinstance(lnum, int):
raise logic_error()
if lnum < 1:
raise logic_error()
if not isinstance(f, dict):
raise logic_error()
for k in list(f):
if not isinstance(k, str):
raise logic_error()
if len(k) < 1:
raise empty_field_name(lnum)
if has_padding(k):
raise logic_error()
if not k.islower():
raise invalid_field_name(lnum, k)
val = f[k]
if k == 'description' or k == 'comments' or k == 'prefix':
if not isinstance(val, list):
raise logic_error()
if len(val) < 1:
raise logic_error()
for e in val:
if not isinstance(e, str):
raise logic_error()
if has_padding(e):
raise logic_error()
else:
if not isinstance(val, str):
raise logic_error()
if has_padding(val):
raise logic_error()
if 'type' not in f:
raise missing_type_error(lnum)
if not valid_category(f['type']):
raise bad_record_type(lnum, f['type'])
if f['type'] == 'grandfathered' or f['type'] == 'redundant':
if 'tag' not in f or 'subtag' in f:
raise wrong_tag_type_error(lnum)
elif 'subtag' not in f or 'tag' in f:
raise wrong_tag_type_error(lnum)
if 'subtag' in f:
ft = f['type']
sv = f['subtag']
if ft == 'language':
if (len(sv) < 2 or len(sv) > 3) and len(sv) != 8:
raise bad_language_subtag(lnum, sv)
if len(sv) == 8:
if not is_lower_letter(sv[0]) or not is_lower_letter(sv[1]) or (not is_lower_letter(sv[2])) or (sv[3] != '.') or (sv[4] != '.') or (not is_lower_letter(sv[5])) or (not is_lower_letter(sv[6])) or (not is_lower_letter(sv[7])):
raise bad_language_subtag(lnum, sv)
else:
for c in sv:
if not is_lower_letter(c):
raise bad_language_subtag(lnum, sv)
elif ft == 'extlang':
if len(sv) != 3:
raise bad_extlang_subtag(lnum, sv)
for c in sv:
if not is_lower_letter(c):
raise bad_extlang_subtag(lnum, sv)
elif ft == 'script':
if len(sv) == 4:
if not is_upper_letter(sv[0]) or not is_lower_letter(sv[1]) or (not is_lower_letter(sv[2])) or (not is_lower_letter(sv[3])):
raise bad_script_subtag(lnum, sv)
elif len(sv) == 10:
if not is_upper_letter(sv[0]) or not is_lower_letter(sv[1]) or (not is_lower_letter(sv[2])) or (not is_lower_letter(sv[3])) or (sv[4] != '.') or (sv[5] != '.') or (not is_upper_letter(sv[6])) or (not is_lower_letter(sv[7])) or (not is_lower_letter(sv[8])) or (not is_lower_letter(sv[9])):
raise bad_script_subtag(lnum, sv)
else:
raise bad_script_subtag(lnum, sv)
elif ft == 'region':
if len(sv) == 2:
if not is_upper_letter(sv[0]) or not is_upper_letter(sv[1]):
raise bad_region_subtag(lnum, sv)
elif len(sv) == 3:
for c in sv:
if not is_digit(c):
raise bad_region_subtag(lnum, sv)
elif len(sv) == 6:
if not is_upper_letter(sv[0]) or not is_upper_letter(sv[1]) or sv[2] != '.' or (sv[3] != '.') or (not is_upper_letter(sv[4])) or (not is_upper_letter(sv[5])):
raise bad_region_subtag(lnum, sv)
else:
raise bad_region_subtag(lnum, sv)
elif ft == 'variant':
if len(sv) < 4 or len(sv) > 8:
raise bad_variant_subtag(lnum, sv)
if len(sv) == 4:
if not is_digit(sv[0]):
raise bad_variant_subtag(lnum, sv)
for c in sv:
if not is_lower_letter(c) and (not is_digit(c)):
raise bad_variant_subtag(lnum, sv)
else:
raise logic_error()
if 'tag' in f:
if not is_format_tag(f['tag']):
raise bad_tag_format(lnum, f['tag'])
if 'prefix' in f:
if f['type'] != 'extlang' and f['type'] != 'variant':
raise prefix_context_error(lnum)
if f['type'] == 'extlang':
if len(f['prefix']) > 1:
raise prefix_multi_error(lnum)
if f['type'] == 'extlang':
for p in f['prefix']:
if len(p) < 2 or len(p) > 3:
raise bad_prefix(lnum, p)
for c in p:
if not is_lower_letter(c):
raise bad_prefix(lnum, p)
if f['type'] == 'variant':
for p in f['prefix']:
if not is_core_tag(p):
raise bad_prefix(lnum, p)
if 'suppress-script' in f:
if f['type'] != 'language' and f['type'] != 'extlang':
raise script_context_error(lnum)
sn = f['suppress-script']
if len(sn) != 4:
raise bad_script_suppress(lnum, sn)
if not is_upper_letter(sn[0]) or not is_lower_letter(sn[1]) or (not is_lower_letter(sn[2])) or (not is_lower_letter(sn[3])):
raise bad_script_suppress(lnum, sn)
def raw_record(lnum, lines):
global rec
if not isinstance(rec, list):
raise logic_error()
if not isinstance(lnum, int):
raise logic_error()
if lnum < 1:
raise logic_error()
if not isinstance(lines, list):
raise logic_error()
for e in lines:
if not isinstance(e, str):
raise logic_error()
rp = dict()
for e in lines:
ci = e.find(':')
if ci < 0:
raise no_colon_error(lnum)
fname = ''
fval = ''
if ci > 0:
fname = e[0:ci]
if ci < len(e) - 1:
fval = e[ci + 1:]
fname = fname.strip(' \t\n')
fval = fval.strip(' \t\n')
fname = fname.lower()
if fname == 'description' or fname == 'comments' or fname == 'prefix':
if fname in rp:
rp[fname].append(fval)
else:
rp[fname] = [fval]
else:
if fname in rp:
raise multi_field_error(lnum, fname)
rp[fname] = fval
check_record(lnum, rp)
rec.append((lnum, rp))
def parse(fpath):
global rec
if rec is not None:
return
if not isinstance(fpath, str):
rec = None
raise logic_error()
rec = []
try:
with open(fpath, mode='rt', encoding='utf-8', errors='strict') as fin:
lbuf = []
line_num = 0
rec_line = 1
for line in fin:
line_num = line_num + 1
line = line.rstrip(' \t\n')
if len(line) < 1:
continue
if line == '%%':
if rec_line <= 1:
lbuf = []
rec_line = line_num + 1
continue
raw_record(rec_line, lbuf)
lbuf = []
rec_line = line_num + 1
continue
fchar = line[0]
if fchar == ' ' or fchar == '\t':
if len(lbuf) < 1:
raise bad_continue_line(line_num)
line = ' ' + line.lstrip(' \t')
lbuf[-1] = lbuf[-1] + line
continue
lbuf.append(line)
if len(lbuf) > 0:
raw_record(rec_line, lbuf)
lbuf = []
index_language = dict()
index_extlang = dict()
index_script = dict()
index_region = dict()
index_variant = dict()
index_grandfathered = dict()
index_redundant = dict()
rlen = len(rec)
for i in range(0, rlen):
r = rec[i][1]
vt = r['type']
if vt == 'language':
vn = r['subtag']
if vn in index_language:
raise redefinition_error(rec[i][0])
index_language[vn] = i
elif vt == 'extlang':
vn = r['subtag']
if vn in index_extlang:
raise redefinition_error(rec[i][0])
index_extlang[vn] = i
elif vt == 'script':
vn = r['subtag']
if vn in index_script:
raise redefinition_error(rec[i][0])
index_script[vn] = i
elif vt == 'region':
vn = r['subtag']
if vn in index_region:
raise redefinition_error(rec[i][0])
index_region[vn] = i
elif vt == 'variant':
vn = r['subtag']
if vn in index_variant:
raise redefinition_error(rec[i][0])
index_variant[vn] = i
elif vt == 'grandfathered':
vn = r['tag']
if vn in index_grandfathered:
raise redefinition_error(rec[i][0])
index_grandfathered[vn] = i
elif vt == 'redundant':
vn = r['tag']
if vn in index_redundant:
raise redefinition_error(rec[i][0])
index_redundant[vn] = i
else:
raise logic_error()
for rf in rec:
r = rf[1]
rt = r['type']
if 'suppress-script' in r:
if r['suppress-script'] not in index_script:
raise missing_key_error(rf[0])
if 'prefix' in r and rt == 'extlang':
for p in r['prefix']:
if p not in index_language:
raise missing_key_error(rf[0])
if 'prefix' in r and rt == 'variant':
for p in r['prefix']:
pa = p.split('-')
if pa[0] not in index_language:
raise missing_key_error(rf[0])
i = 1
pt = 'extlang'
while i < len(pa):
if pt == 'extlang':
if len(pa[i]) == 3 and is_lower_letter(pa[i][0]):
if pa[i] in index_extlang:
i = i + 1
pt = 'script'
else:
raise missing_key_error(rf[0])
else:
pt = 'script'
elif pt == 'script':
if len(pa[i]) == 4 and is_upper_letter(pa[i][0]):
if pa[i] in index_script:
i = i + 1
pt = 'region'
else:
raise missing_key_error(rf[0])
else:
pt = 'region'
elif pt == 'region':
if len(pa[i]) == 2 or (len(pa[i]) == 3 and is_digit(pa[i][0])):
if pa[i] in index_region:
i = i + 1
pt = 'variant'
else:
raise missing_key_error(rf[0])
else:
pt = 'variant'
elif pt == 'variant':
if len(pa[i]) == 4 and is_digit(pa[i][0]) or len(pa[i]) > 4:
if pa[i] in index_variant:
i = i + 1
else:
raise missing_key_error(rf[0])
else:
raise missing_key_error(rf[0])
else:
raise logic_error()
if 'preferred-value' in r:
pv = r['preferred-value']
if rt == 'language':
if pv not in index_language:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_language[pv]][1]:
raise recursive_mapping_error(rf[0])
elif rt == 'script':
if pv not in index_script:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_script[pv]][1]:
raise recursive_mapping_error(rf[0])
elif rt == 'region':
if pv not in index_region:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_region[pv]][1]:
raise recursive_mapping_error(rf[0])
elif rt == 'variant':
if pv not in index_variant:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_variant[pv]][1]:
raise recursive_mapping_error(rf[0])
elif rt == 'extlang':
if pv not in index_language:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_language[pv]][1]:
raise recursive_mapping_error(rf[0])
elif rt == 'grandfathered':
if pv in index_language:
if 'preferred-value' in rec[index_language[pv]][1]:
raise recursive_mapping_error(rf[0])
elif pv == 'en-GB-oxendict':
if 'en' not in index_language:
raise missing_key_error(rf[0])
if 'GB' not in index_region:
raise missing_key_error(rf[0])
if 'oxendict' not in index_variant:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_language['en']][1] or 'preferred-value' in rec[index_region['GB']][1] or 'preferred-value' in rec[index_variant['oxendict']][1]:
raise recursive_mapping_error(rf[0])
else:
raise missing_key_error(rf[0])
elif rt == 'redundant':
if pv in index_language:
if 'preferred-value' in rec[index_language[pv]][1]:
raise recursive_mapping_error(rf[0])
elif pv == 'cmn-Hans':
if 'cmn' not in index_language:
raise missing_key_error(rf[0])
if 'Hans' not in index_script:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_language['cmn']][1] or 'preferred-value' in rec[index_script['Hans']][1]:
raise recursive_mapping_error(rf[0])
elif pv == 'cmn-Hant':
if 'cmn' not in index_language:
raise missing_key_error(rf[0])
if 'Hant' not in index_script:
raise missing_key_error(rf[0])
if 'preferred-value' in rec[index_language['cmn']][1] or 'preferred-value' in rec[index_script['Hant']][1]:
raise recursive_mapping_error(rf[0])
else:
raise missing_key_error(rf[0])
else:
raise logic_error()
if rt == 'extlang':
if 'preferred-value' not in r:
raise bad_extlang_remap(rf[0])
if r['preferred-value'] != r['subtag']:
raise bad_extlang_remap(rf[0])
except FileNotFoundError:
rec = None
raise no_data_file_error(fpath)
except ValueError:
rec = None
raise bad_data_file()
except SubtagError as se:
rec = None
raise se
except Exception as exc:
rec = None
raise subtag_error() from exc |
for _ in range(int(input())):
p,n=input(),int(input())
x=input()[1:-1].split(',')
pos,rpos,mode=0,n-1,0
error=False
for i in p:
if i=='R': mode=(mode+1)%2
else:
if pos>rpos:
print('error')
error=True
break
if mode==0: pos+=1
else: rpos-=1
if error: continue
print(end='[')
if mode==0:
for i in range(pos,rpos+1):
print(end=x[i])
if i!=rpos: print(end=',')
else:
for i in range(rpos,pos-1,-1):
print(end=x[i])
if i!=pos: print(end=',')
print(']') | for _ in range(int(input())):
(p, n) = (input(), int(input()))
x = input()[1:-1].split(',')
(pos, rpos, mode) = (0, n - 1, 0)
error = False
for i in p:
if i == 'R':
mode = (mode + 1) % 2
else:
if pos > rpos:
print('error')
error = True
break
if mode == 0:
pos += 1
else:
rpos -= 1
if error:
continue
print(end='[')
if mode == 0:
for i in range(pos, rpos + 1):
print(end=x[i])
if i != rpos:
print(end=',')
else:
for i in range(rpos, pos - 1, -1):
print(end=x[i])
if i != pos:
print(end=',')
print(']') |
# -*- coding: utf-8 -*-
# @Author: davidhansonc
# @Date: 2021-01-19 10:22:34
# @Last Modified by: davidhansonc
# @Last Modified time: 2021-01-19 10:56:52
my_string = 'abcde fgh'
def reverse_string(string):
rev_str = ''
for i in range(len(string)-1, -1, -1):
rev_str += string[i]
return rev_str
def reverse_string2(string):
return string[::-1]
print(reverse_string(my_string))
print(reverse_string2(my_string)) | my_string = 'abcde fgh'
def reverse_string(string):
rev_str = ''
for i in range(len(string) - 1, -1, -1):
rev_str += string[i]
return rev_str
def reverse_string2(string):
return string[::-1]
print(reverse_string(my_string))
print(reverse_string2(my_string)) |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def middleNode(self, head: ListNode) -> ListNode:
d = 1
current = head.next
middle = head
while current:
d += 1
if d % 2 == 0:
middle = middle.next
current = current.next
return middle | class Solution:
def middle_node(self, head: ListNode) -> ListNode:
d = 1
current = head.next
middle = head
while current:
d += 1
if d % 2 == 0:
middle = middle.next
current = current.next
return middle |
name = 'Sina'
last = 'Bakhshandeh'
age = 29
nationality = 'Iran'
a = 'I am Sina Bakhshandeh 29 years old, from iran.'
# print(a)
# print('I am', name, last, age ,'years old, from ', nationality)
b = 'I am {} {} {} years old, from {}.'
print( b.format(name, last, age, nationality) ) | name = 'Sina'
last = 'Bakhshandeh'
age = 29
nationality = 'Iran'
a = 'I am Sina Bakhshandeh 29 years old, from iran.'
b = 'I am {} {} {} years old, from {}.'
print(b.format(name, last, age, nationality)) |
#!usr/bin/python3
with open('01/input.txt', 'r') as file:
increases = 0
previous = file.readline()
for line in file.readlines():
delta = int(line) - int(previous)
if delta > 0:
increases += 1
previous = line
print(increases) | with open('01/input.txt', 'r') as file:
increases = 0
previous = file.readline()
for line in file.readlines():
delta = int(line) - int(previous)
if delta > 0:
increases += 1
previous = line
print(increases) |
"""
RHGamestation manager API
Well in fact it's not really an API, this is mostly JSON views for some
special jobs like executing some command scripts.
""" | """
RHGamestation manager API
Well in fact it's not really an API, this is mostly JSON views for some
special jobs like executing some command scripts.
""" |
# Basics
5 == 5 # True
5 == 4 # False
5 != 4 # True
5 > 3 # True
3 < 5 # True
5 >= 3 # True
5 >= 5 # True
[1, 2, 4] > [1, 2, 3] # True
1 < 2 and 5 > 4 # True
(1 < 2) and (5 > 4) # True
1 > 2 or 5 > 4 # True
#Chainging
x = 4
x > 3 and x < 5 # True
3 < x < 5 # True
# isinstance
isinstance("Will", str) # True
isinstance("Will", int) # False
isinstance(4.0, float) # True
# is operator checks for exact match
a = True
b = True
a is b # True, the id for a and b are the same
x = [1, 2, 3]
y = [1, 2, 3]
x is y # False, the id for x and y are different
# in
x = [1, 2, 3]
3 in x # True
5 in x # False
x = [1, 2, 3]
for value in x:
if (value == 2):
print("Value is 2") # Value is 2
car = { "model": "chevy", "year": 1970, "color": "red" }
if ("model" in car):
print("This is a {0}".format(car["model"])) # This is a chevy
| 5 == 5
5 == 4
5 != 4
5 > 3
3 < 5
5 >= 3
5 >= 5
[1, 2, 4] > [1, 2, 3]
1 < 2 and 5 > 4
1 < 2 and 5 > 4
1 > 2 or 5 > 4
x = 4
x > 3 and x < 5
3 < x < 5
isinstance('Will', str)
isinstance('Will', int)
isinstance(4.0, float)
a = True
b = True
a is b
x = [1, 2, 3]
y = [1, 2, 3]
x is y
x = [1, 2, 3]
3 in x
5 in x
x = [1, 2, 3]
for value in x:
if value == 2:
print('Value is 2')
car = {'model': 'chevy', 'year': 1970, 'color': 'red'}
if 'model' in car:
print('This is a {0}'.format(car['model'])) |
#!/usr/bin/python
# -*- coding: utf8
"""
Keywords reserved in any SQL standard
From http://www.postgresql.org/docs/9.4/static/sql-keywords-appendix.html
"""
sql_reserved_words = [
'ABS',
'ABSOLUTE',
'ACTION',
'ADD',
'ALL',
'ALLOCATE',
'ALTER',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARE',
'ARRAY',
'ARRAY_AGG',
'ARRAY_MAX_CARDINALITY',
'AS',
'ASC',
'ASENSITIVE',
'ASSERTION',
'ASYMMETRIC',
'AT',
'ATOMIC',
'AUTHORIZATION',
'AVG',
'BEGIN',
'BEGIN_FRAME',
'BEGIN_PARTITION',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BIT_LENGTH',
'BLOB',
'BOOLEAN',
'BOTH',
'BY',
'CALL',
'CALLED',
'CARDINALITY',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CEIL',
'CEILING',
'CHAR',
'CHARACTER',
'CHARACTER_LENGTH',
'CHAR_LENGTH',
'CHECK',
'CLOB',
'CLOSE',
'COALESCE',
'COLLATE',
'COLLATION',
'COLLECT',
'COLUMN',
'COMMIT',
'CONDITION',
'CONNECT',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTAINS',
'CONTINUE',
'CONVERT',
'CORR',
'CORRESPONDING',
'COUNT',
'COVAR_POP',
'COVAR_SAMP',
'CREATE',
'CROSS',
'CUBE',
'CUME_DIST',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_DEFAULT_TRANSFORM_GROUP',
'CURRENT_PATH',
'CURRENT_ROLE',
'CURRENT_ROW',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_TRANSFORM_GROUP_FOR_TYPE',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATALINK',
'DATE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFERRABLE',
'DEFERRED',
'DELETE',
'DENSE_RANK',
'DEREF',
'DESC',
'DESCRIBE',
'DESCRIPTOR',
'DETERMINISTIC',
'DIAGNOSTICS',
'DISCONNECT',
'DISTINCT',
'DLNEWCOPY',
'DLPREVIOUSCOPY',
'DLURLCOMPLETE',
'DLURLCOMPLETEONLY',
'DLURLCOMPLETEWRITE',
'DLURLPATH',
'DLURLPATHONLY',
'DLURLPATHWRITE',
'DLURLSCHEME',
'DLURLSERVER',
'DLVALUE',
'DO',
'DOMAIN',
'DOUBLE',
'DROP',
'DYNAMIC',
'EACH',
'ELEMENT',
'ELSE',
'END',
'END-EXEC',
'END_FRAME',
'END_PARTITION',
'EQUALS',
'ESCAPE',
'EVERY',
'EXCEPT',
'EXCEPTION',
'EXEC',
'EXECUTE',
'EXISTS',
'EXP',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FETCH',
'FILTER',
'FIRST',
'FIRST_VALUE',
'FLOAT',
'FLOOR',
'FOR',
'FOREIGN',
'FOUND',
'FRAME_ROW',
'FREE',
'FROM',
'FULL',
'FUNCTION',
'FUSION',
'GET',
'GLOBAL',
'GO',
'GOTO',
'GRANT',
'GROUP',
'GROUPING',
'GROUPS',
'HAVING',
'HOLD',
'HOUR',
'IDENTITY',
'IMMEDIATE',
'IMPORT',
'IN',
'INDICATOR',
'INITIALLY',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INT',
'INTEGER',
'INTERSECT',
'INTERSECTION',
'INTERVAL',
'INTO',
'IS',
'ISOLATION',
'JOIN',
'KEY',
'LAG',
'LANGUAGE',
'LARGE',
'LAST',
'LAST_VALUE',
'LATERAL',
'LEAD',
'LEADING',
'LEFT',
'LEVEL',
'LIKE',
'LIKE_REGEX',
'LIMIT',
'LN',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOWER',
'MATCH',
'MAX',
'MAX_CARDINALITY',
'MEMBER',
'MERGE',
'METHOD',
'MIN',
'MINUTE',
'MOD',
'MODIFIES',
'MODULE',
'MONTH',
'MULTISET',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NCLOB',
'NEW',
'NEXT',
'NO',
'NONE',
'NORMALIZE',
'NOT',
'NTH_VALUE',
'NTILE',
'NULL',
'NULLIF',
'NUMERIC',
'OCCURRENCES_REGEX',
'OCTET_LENGTH',
'OF',
'OFFSET',
'OLD',
'ON',
'ONLY',
'OPEN',
'OPTION',
'OR',
'ORDER',
'OUT',
'OUTER',
'OUTPUT',
'OVER',
'OVERLAPS',
'OVERLAY',
'PAD',
'PARAMETER',
'PARTIAL',
'PARTITION',
'PERCENT',
'PERCENTILE_CONT',
'PERCENTILE_DISC',
'PERCENT_RANK',
'PERIOD',
'PLACING',
'PORTION',
'POSITION',
'POSITION_REGEX',
'POWER',
'PRECEDES',
'PRECISION',
'PREPARE',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURE',
'PUBLIC',
'RANGE',
'RANK',
'READ',
'READS',
'REAL',
'RECURSIVE',
'REF',
'REFERENCES',
'REFERENCING',
'REGR_AVGX',
'REGR_AVGY',
'REGR_COUNT',
'REGR_INTERCEPT',
'REGR_R2',
'REGR_SLOPE',
'REGR_SXX',
'REGR_SXY',
'REGR_SYY',
'RELATIVE',
'RELEASE',
'RESTRICT',
'RESULT',
'RETURN',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLLBACK',
'ROLLUP',
'ROW',
'ROWS',
'ROW_NUMBER',
'SAVEPOINT',
'SCHEMA',
'SCOPE',
'SCROLL',
'SEARCH',
'SECOND',
'SECTION',
'SELECT',
'SENSITIVE',
'SESSION',
'SESSION_USER',
'SET',
'SIMILAR',
'SIZE',
'SMALLINT',
'SOME',
'SPACE',
'SPECIFIC',
'SPECIFICTYPE',
'SQL',
'SQLCODE',
'SQLERROR',
'SQLEXCEPTION',
'SQLSTATE',
'SQLWARNING',
'SQRT',
'START',
'STATIC',
'STDDEV_POP',
'STDDEV_SAMP',
'SUBMULTISET',
'SUBSTRING',
'SUBSTRING_REGEX',
'SUCCEEDS',
'SUM',
'SYMMETRIC',
'SYSTEM',
'SYSTEM_TIME',
'SYSTEM_USER',
'TABLE',
'TABLESAMPLE',
'TEMPORARY',
'THEN',
'TIME',
'TIMESTAMP',
'TIMEZONE_HOUR',
'TIMEZONE_MINUTE',
'TO',
'TRAILING',
'TRANSACTION',
'TRANSLATE',
'TRANSLATE_REGEX',
'TRANSLATION',
'TREAT',
'TRIGGER',
'TRIM',
'TRIM_ARRAY',
'TRUE',
'TRUNCATE',
'UESCAPE',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNNEST',
'UPDATE',
'UPPER',
'USAGE',
'USER',
'USING',
'VALUE',
'VALUES',
'VALUE_OF',
'VARBINARY',
'VARCHAR',
'VARIADIC',
'VARYING',
'VAR_POP',
'VAR_SAMP',
'VERSIONING',
'VIEW',
'WHEN',
'WHENEVER',
'WHERE',
'WIDTH_BUCKET',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRITE',
'XML',
'XMLAGG',
'XMLATTRIBUTES',
'XMLBINARY',
'XMLCAST',
'XMLCOMMENT',
'XMLCONCAT',
'XMLDOCUMENT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLITERATE',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLQUERY',
'XMLSERIALIZE',
'XMLTABLE',
'XMLTEXT',
'XMLVALIDATE',
'YEAR',
'ZONE',
]
| """
Keywords reserved in any SQL standard
From http://www.postgresql.org/docs/9.4/static/sql-keywords-appendix.html
"""
sql_reserved_words = ['ABS', 'ABSOLUTE', 'ACTION', 'ADD', 'ALL', 'ALLOCATE', 'ALTER', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARE', 'ARRAY', 'ARRAY_AGG', 'ARRAY_MAX_CARDINALITY', 'AS', 'ASC', 'ASENSITIVE', 'ASSERTION', 'ASYMMETRIC', 'AT', 'ATOMIC', 'AUTHORIZATION', 'AVG', 'BEGIN', 'BEGIN_FRAME', 'BEGIN_PARTITION', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BIT_LENGTH', 'BLOB', 'BOOLEAN', 'BOTH', 'BY', 'CALL', 'CALLED', 'CARDINALITY', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CEIL', 'CEILING', 'CHAR', 'CHARACTER', 'CHARACTER_LENGTH', 'CHAR_LENGTH', 'CHECK', 'CLOB', 'CLOSE', 'COALESCE', 'COLLATE', 'COLLATION', 'COLLECT', 'COLUMN', 'COMMIT', 'CONDITION', 'CONNECT', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTAINS', 'CONTINUE', 'CONVERT', 'CORR', 'CORRESPONDING', 'COUNT', 'COVAR_POP', 'COVAR_SAMP', 'CREATE', 'CROSS', 'CUBE', 'CUME_DIST', 'CURRENT', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_DEFAULT_TRANSFORM_GROUP', 'CURRENT_PATH', 'CURRENT_ROLE', 'CURRENT_ROW', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_TRANSFORM_GROUP_FOR_TYPE', 'CURRENT_USER', 'CURSOR', 'CYCLE', 'DATALINK', 'DATE', 'DAY', 'DEALLOCATE', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', 'DEFERRABLE', 'DEFERRED', 'DELETE', 'DENSE_RANK', 'DEREF', 'DESC', 'DESCRIBE', 'DESCRIPTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DISCONNECT', 'DISTINCT', 'DLNEWCOPY', 'DLPREVIOUSCOPY', 'DLURLCOMPLETE', 'DLURLCOMPLETEONLY', 'DLURLCOMPLETEWRITE', 'DLURLPATH', 'DLURLPATHONLY', 'DLURLPATHWRITE', 'DLURLSCHEME', 'DLURLSERVER', 'DLVALUE', 'DO', 'DOMAIN', 'DOUBLE', 'DROP', 'DYNAMIC', 'EACH', 'ELEMENT', 'ELSE', 'END', 'END-EXEC', 'END_FRAME', 'END_PARTITION', 'EQUALS', 'ESCAPE', 'EVERY', 'EXCEPT', 'EXCEPTION', 'EXEC', 'EXECUTE', 'EXISTS', 'EXP', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FETCH', 'FILTER', 'FIRST', 'FIRST_VALUE', 'FLOAT', 'FLOOR', 'FOR', 'FOREIGN', 'FOUND', 'FRAME_ROW', 'FREE', 'FROM', 'FULL', 'FUNCTION', 'FUSION', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'GROUPS', 'HAVING', 'HOLD', 'HOUR', 'IDENTITY', 'IMMEDIATE', 'IMPORT', 'IN', 'INDICATOR', 'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INT', 'INTEGER', 'INTERSECT', 'INTERSECTION', 'INTERVAL', 'INTO', 'IS', 'ISOLATION', 'JOIN', 'KEY', 'LAG', 'LANGUAGE', 'LARGE', 'LAST', 'LAST_VALUE', 'LATERAL', 'LEAD', 'LEADING', 'LEFT', 'LEVEL', 'LIKE', 'LIKE_REGEX', 'LIMIT', 'LN', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOWER', 'MATCH', 'MAX', 'MAX_CARDINALITY', 'MEMBER', 'MERGE', 'METHOD', 'MIN', 'MINUTE', 'MOD', 'MODIFIES', 'MODULE', 'MONTH', 'MULTISET', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NCLOB', 'NEW', 'NEXT', 'NO', 'NONE', 'NORMALIZE', 'NOT', 'NTH_VALUE', 'NTILE', 'NULL', 'NULLIF', 'NUMERIC', 'OCCURRENCES_REGEX', 'OCTET_LENGTH', 'OF', 'OFFSET', 'OLD', 'ON', 'ONLY', 'OPEN', 'OPTION', 'OR', 'ORDER', 'OUT', 'OUTER', 'OUTPUT', 'OVER', 'OVERLAPS', 'OVERLAY', 'PAD', 'PARAMETER', 'PARTIAL', 'PARTITION', 'PERCENT', 'PERCENTILE_CONT', 'PERCENTILE_DISC', 'PERCENT_RANK', 'PERIOD', 'PLACING', 'PORTION', 'POSITION', 'POSITION_REGEX', 'POWER', 'PRECEDES', 'PRECISION', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURE', 'PUBLIC', 'RANGE', 'RANK', 'READ', 'READS', 'REAL', 'RECURSIVE', 'REF', 'REFERENCES', 'REFERENCING', 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2', 'REGR_SLOPE', 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'RELATIVE', 'RELEASE', 'RESTRICT', 'RESULT', 'RETURN', 'RETURNING', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLLBACK', 'ROLLUP', 'ROW', 'ROWS', 'ROW_NUMBER', 'SAVEPOINT', 'SCHEMA', 'SCOPE', 'SCROLL', 'SEARCH', 'SECOND', 'SECTION', 'SELECT', 'SENSITIVE', 'SESSION', 'SESSION_USER', 'SET', 'SIMILAR', 'SIZE', 'SMALLINT', 'SOME', 'SPACE', 'SPECIFIC', 'SPECIFICTYPE', 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SQRT', 'START', 'STATIC', 'STDDEV_POP', 'STDDEV_SAMP', 'SUBMULTISET', 'SUBSTRING', 'SUBSTRING_REGEX', 'SUCCEEDS', 'SUM', 'SYMMETRIC', 'SYSTEM', 'SYSTEM_TIME', 'SYSTEM_USER', 'TABLE', 'TABLESAMPLE', 'TEMPORARY', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TIMEZONE_MINUTE', 'TO', 'TRAILING', 'TRANSACTION', 'TRANSLATE', 'TRANSLATE_REGEX', 'TRANSLATION', 'TREAT', 'TRIGGER', 'TRIM', 'TRIM_ARRAY', 'TRUE', 'TRUNCATE', 'UESCAPE', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNNEST', 'UPDATE', 'UPPER', 'USAGE', 'USER', 'USING', 'VALUE', 'VALUES', 'VALUE_OF', 'VARBINARY', 'VARCHAR', 'VARIADIC', 'VARYING', 'VAR_POP', 'VAR_SAMP', 'VERSIONING', 'VIEW', 'WHEN', 'WHENEVER', 'WHERE', 'WIDTH_BUCKET', 'WINDOW', 'WITH', 'WITHIN', 'WITHOUT', 'WORK', 'WRITE', 'XML', 'XMLAGG', 'XMLATTRIBUTES', 'XMLBINARY', 'XMLCAST', 'XMLCOMMENT', 'XMLCONCAT', 'XMLDOCUMENT', 'XMLELEMENT', 'XMLEXISTS', 'XMLFOREST', 'XMLITERATE', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI', 'XMLQUERY', 'XMLSERIALIZE', 'XMLTABLE', 'XMLTEXT', 'XMLVALIDATE', 'YEAR', 'ZONE'] |
# Region
# VPC
# Private Subnet
# Public Subnet
# Security Group
# Availability Zone
# AWS Step Functions Workflow
# Elastic Beanstalk container
# Auto Scaling Group
# Server contents
# EC2 instance contents
# Spot Fleet
groups = {
'AWS::AccountId': {'level': 0},
'AWS::Region': {'level': 1},
'AWS::IAM::': {'level': 2},
'AWS::EC2::VPC': {'level': 3},
'AvailabilityZone': {'level': 4},
'AWS::EC2::SecurityGroup': {'level': 4},
'AWS::EC2::Subnet': {'level': 5},
'Default': {'level': 6} # e.g. EC2 instance
}
blacklist_resource_types = [
'AWS::SSM::Parameter',
'AWS::Lambda::Permission',
'AWS::ApiGateway::Deployment',
'AWS::Lambda::Version',
'AWS::Lambda::LayerVersionPermission',
'AWS::IAM::ManagedPolicy',
'AWS::SQS::QueuePolicy'
]
# https://aws.amazon.com/architecture/icons/
resource_type_image = {
'AWS::Serverless::Function': 'AWS-Lambda_Lambda-Function_light-bg@4x.png',
'AWS::Serverless::LayerVersion': 'AWS-Lambda@4x.png',
# 'AWS::Lambda::LayerVersionPermission': '',
'AWS::Serverless::Api': 'Amazon-API-Gateway_Endpoint_light-bg@4x.png',
'AWS::IAM::Role': 'AWS-Identity-and-Access-Management-IAM_Role_light-bg@4x.png',
'AWS::ApiGateway::Account': 'Amazon-API-Gateway_Endpoint_light-bg@4x.png',
'AWS::Logs::LogGroup': 'Amazon-CloudWatch@4x.png',
'AWS::S3::Bucket': 'Amazon-Simple-Storage-Service-S3_Bucket_light-bg@4x.png',
'AWS::SNS::Topic': 'Amazon-Simple-Notification-Service-SNS_Topic_light-bg@4x.png',
'AWS::SQS::Queue': 'Amazon-Simple-Queue-Service-SQS_Queue_light-bg@4x.png',
# 'AWS::SQS::QueuePolicy': '',
'AWS::SNS::Subscription': 'Amazon-Simple-Notification-Service-SNS_light-bg@4x.png',
'AWS::IAM::ManagedPolicy': 'AWS-Identity-and-Access-Management-IAM_Permissions_light-bg@4x.png',
'AWS::SSM::Parameter': 'AWS-Systems-Manager_Parameter-Store_light-bg@4x.png'
}
| groups = {'AWS::AccountId': {'level': 0}, 'AWS::Region': {'level': 1}, 'AWS::IAM::': {'level': 2}, 'AWS::EC2::VPC': {'level': 3}, 'AvailabilityZone': {'level': 4}, 'AWS::EC2::SecurityGroup': {'level': 4}, 'AWS::EC2::Subnet': {'level': 5}, 'Default': {'level': 6}}
blacklist_resource_types = ['AWS::SSM::Parameter', 'AWS::Lambda::Permission', 'AWS::ApiGateway::Deployment', 'AWS::Lambda::Version', 'AWS::Lambda::LayerVersionPermission', 'AWS::IAM::ManagedPolicy', 'AWS::SQS::QueuePolicy']
resource_type_image = {'AWS::Serverless::Function': 'AWS-Lambda_Lambda-Function_light-bg@4x.png', 'AWS::Serverless::LayerVersion': 'AWS-Lambda@4x.png', 'AWS::Serverless::Api': 'Amazon-API-Gateway_Endpoint_light-bg@4x.png', 'AWS::IAM::Role': 'AWS-Identity-and-Access-Management-IAM_Role_light-bg@4x.png', 'AWS::ApiGateway::Account': 'Amazon-API-Gateway_Endpoint_light-bg@4x.png', 'AWS::Logs::LogGroup': 'Amazon-CloudWatch@4x.png', 'AWS::S3::Bucket': 'Amazon-Simple-Storage-Service-S3_Bucket_light-bg@4x.png', 'AWS::SNS::Topic': 'Amazon-Simple-Notification-Service-SNS_Topic_light-bg@4x.png', 'AWS::SQS::Queue': 'Amazon-Simple-Queue-Service-SQS_Queue_light-bg@4x.png', 'AWS::SNS::Subscription': 'Amazon-Simple-Notification-Service-SNS_light-bg@4x.png', 'AWS::IAM::ManagedPolicy': 'AWS-Identity-and-Access-Management-IAM_Permissions_light-bg@4x.png', 'AWS::SSM::Parameter': 'AWS-Systems-Manager_Parameter-Store_light-bg@4x.png'} |
f = open('latin_text', 'w')
for i in xrange(5000):
text = "Lorem ipsum dolor sit amet, est malis molestiae no,\nrebum" \
"mediocrem vituperatoribus qui et. Quando intellegam ne mea," \
" utroque\n voluptua sensibus nam te. In duo accusam accusamus," \
" mea ad iriure detracto\nsigniferumque. Veri complectitur" \
" concludaturque te sed. Ad pri intellegam\ncomprehensam. " \
"Detracto pertinax pri ex, usu ne animal mandamus, sit ut\n" \
"delectus forensibus.\n\n"
f.write(text)
f.close()
| f = open('latin_text', 'w')
for i in xrange(5000):
text = 'Lorem ipsum dolor sit amet, est malis molestiae no,\nrebummediocrem vituperatoribus qui et. Quando intellegam ne mea, utroque\n voluptua sensibus nam te. In duo accusam accusamus, mea ad iriure detracto\nsigniferumque. Veri complectitur concludaturque te sed. Ad pri intellegam\ncomprehensam. Detracto pertinax pri ex, usu ne animal mandamus, sit ut\ndelectus forensibus.\n\n'
f.write(text)
f.close() |
'''
Created on Dec 5, 2012
@author: arnaud
'''
"""
from UniShared_python.website.models import UserProfile
from django.contrib.auth.models import User
from django.test.testcases import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from social_auth.db.django_models import UserSocialAuth
class IntegrationLoginTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
cls.selenium = webdriver.Chrome('/Users/arnaud/Downloads/chromedriver')
super(IntegrationLoginTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
UserProfile.objects.all().delete()
UserSocialAuth.objects.all().delete()
User.objects.all().delete()
cls.selenium.quit()
super(IntegrationLoginTest, cls).tearDownClass()
def test_login_student(self):
self.selenium.get('%s%s' % (self.live_server_url, '/'))
self.selenium.find_element_by_css_selector("a.fb_connect").click()
self.selenium.find_element_by_id("email").clear()
self.selenium.find_element_by_id("email").send_keys("arnaud@unishared.com")
self.selenium.find_element_by_id("pass").clear()
self.selenium.find_element_by_id("pass").send_keys("makesense")
self.selenium.find_element_by_id("loginbutton").click()
self.selenium.find_element_by_link_text("Student").click()
school_field = self.selenium.find_element_by_id("school")
WebDriverWait(self.selenium, 10).until(
lambda driver : school_field.is_displayed()
)
school_field.send_keys("test")
self.selenium.find_element_by_xpath("(//button[@type='submit'])[1]").click()
email_field = self.selenium.find_element_by_id("email")
WebDriverWait(self.selenium, 10).until(
lambda driver : email_field.is_displayed()
)
email_field.send_keys("arnaud@unishared.com")
self.selenium.find_element_by_xpath("(//button[@type='submit'])[2]").click()
WebDriverWait(self.selenium, 10).until(
lambda driver : "profile" in self.selenium.current_url
)
self.assertTrue("profile" in self.selenium.current_url, 'Not on profile : {0}'.format(self.selenium.current_url))
""" | """
Created on Dec 5, 2012
@author: arnaud
"""
'\nfrom UniShared_python.website.models import UserProfile\nfrom django.contrib.auth.models import User\nfrom django.test.testcases import LiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom social_auth.db.django_models import UserSocialAuth\n\nclass IntegrationLoginTest(LiveServerTestCase):\n @classmethod\n def setUpClass(cls):\n cls.selenium = webdriver.Chrome(\'/Users/arnaud/Downloads/chromedriver\')\n super(IntegrationLoginTest, cls).setUpClass()\n\n @classmethod\n def tearDownClass(cls):\n UserProfile.objects.all().delete()\n UserSocialAuth.objects.all().delete()\n User.objects.all().delete()\n \n cls.selenium.quit()\n super(IntegrationLoginTest, cls).tearDownClass()\n\n def test_login_student(self):\n self.selenium.get(\'%s%s\' % (self.live_server_url, \'/\'))\n\n self.selenium.find_element_by_css_selector("a.fb_connect").click()\n self.selenium.find_element_by_id("email").clear()\n self.selenium.find_element_by_id("email").send_keys("arnaud@unishared.com")\n self.selenium.find_element_by_id("pass").clear()\n self.selenium.find_element_by_id("pass").send_keys("makesense")\n self.selenium.find_element_by_id("loginbutton").click()\n self.selenium.find_element_by_link_text("Student").click()\n \n school_field = self.selenium.find_element_by_id("school")\n \n WebDriverWait(self.selenium, 10).until(\n lambda driver : school_field.is_displayed()\n )\n \n school_field.send_keys("test")\n \n self.selenium.find_element_by_xpath("(//button[@type=\'submit\'])[1]").click()\n \n email_field = self.selenium.find_element_by_id("email")\n \n WebDriverWait(self.selenium, 10).until(\n lambda driver : email_field.is_displayed()\n )\n email_field.send_keys("arnaud@unishared.com")\n self.selenium.find_element_by_xpath("(//button[@type=\'submit\'])[2]").click()\n \n WebDriverWait(self.selenium, 10).until(\n lambda driver : "profile" in self.selenium.current_url \n )\n \n self.assertTrue("profile" in self.selenium.current_url, \'Not on profile : {0}\'.format(self.selenium.current_url))\n' |
def multi_inverse(b, n):
r1 = n
r2 = b
t1 = 0
t2 = 1
while(r1 > 0):
q = int(r1/r2)
r = r1 - q * r2
r1 = r2
r2 = r
t = t1 - q * t2
t1 = t2
t2 = t
if(r1 == 1):
inv_t = t1
break
return inv_t
| def multi_inverse(b, n):
r1 = n
r2 = b
t1 = 0
t2 = 1
while r1 > 0:
q = int(r1 / r2)
r = r1 - q * r2
r1 = r2
r2 = r
t = t1 - q * t2
t1 = t2
t2 = t
if r1 == 1:
inv_t = t1
break
return inv_t |
#Data : 2018-10-15
#Author : Fengyuan Zhang (Franklin)
#Email : franklinzhang@foxmail.com
class ModelDataHandler:
def __init__(self, context):
self.mContext = context
self.mExecutionPath = ''
self.mZipExecutionPath = ''
self.mExecutionName = ''
self.mSavePath = ''
self.mSaveName = ''
self.mReturnFileFullName = ''
def connectDataMappingMethod(self, execName):
self.mExecutionName = execName
self.mZipExecutionPath = self.mContext.getMappingLibrary()
if self.mZipExecutionPath[ : -1] != '\\':
self.mZipExecutionPath = self.mZipExecutionPath + '\\'
self.mExecutionPath = self.mContext.onGetModelAssembly(execName)
# self.mContext.onPostMessageInfo(execName)
if self.mExecutionPath[ : -1] != '\\':
self.mExecutionPath = self.mExecutionPath + '\\'
self.mSaveName = ''
self.mSavePath = ''
self.mReturnFileFullName = ''
def configureWorkingDirection(self, savePath):
if savePath == '':
self.mSavePath = self.mContext.getModelInstanceDirectory()
else :
self.mSavePath = savePath
if self.mSavePath[ : -1] != '\\':
self.mSavePath = self.mSavePath + '\\'
def conductUDXMapping(self, resultSaveName):
pass
def conductFileMapping(self, list_rawFiles):
pass
def getRealResultSaveName(self):
pass
def doRequestEvent_MappingData(self, resultSaveName, name, value, type):
pass
| class Modeldatahandler:
def __init__(self, context):
self.mContext = context
self.mExecutionPath = ''
self.mZipExecutionPath = ''
self.mExecutionName = ''
self.mSavePath = ''
self.mSaveName = ''
self.mReturnFileFullName = ''
def connect_data_mapping_method(self, execName):
self.mExecutionName = execName
self.mZipExecutionPath = self.mContext.getMappingLibrary()
if self.mZipExecutionPath[:-1] != '\\':
self.mZipExecutionPath = self.mZipExecutionPath + '\\'
self.mExecutionPath = self.mContext.onGetModelAssembly(execName)
if self.mExecutionPath[:-1] != '\\':
self.mExecutionPath = self.mExecutionPath + '\\'
self.mSaveName = ''
self.mSavePath = ''
self.mReturnFileFullName = ''
def configure_working_direction(self, savePath):
if savePath == '':
self.mSavePath = self.mContext.getModelInstanceDirectory()
else:
self.mSavePath = savePath
if self.mSavePath[:-1] != '\\':
self.mSavePath = self.mSavePath + '\\'
def conduct_udx_mapping(self, resultSaveName):
pass
def conduct_file_mapping(self, list_rawFiles):
pass
def get_real_result_save_name(self):
pass
def do_request_event__mapping_data(self, resultSaveName, name, value, type):
pass |
"""Path counting solutions."""
def count_path_recursive(m, n):
"""Count number of paths with the recursive method."""
def traverse(m, n, location=[1, 1]):
# return 0 if past edge
if location[0] > m or location[1] > n:
return 0
# return 1 if at end position
if location == [m, n]:
return 1
return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1])
return traverse(m, n)
def count_path_dynamic(m, n):
"""Count number of paths with dynamic method."""
# create 2d array to store values
paths = [[0 for x in range(n)] for y in range(m)]
# set num of paths on edges to one
for i in range(m):
paths[i][0] = 1
for i in range(n):
paths[0][i] = 1
# calculate num paths
for i in range(1, m):
for j in range(n):
paths[i][j] = paths[i - 1][j] + paths[i][j - 1]
return paths[m - 1][n - 1]
| """Path counting solutions."""
def count_path_recursive(m, n):
"""Count number of paths with the recursive method."""
def traverse(m, n, location=[1, 1]):
if location[0] > m or location[1] > n:
return 0
if location == [m, n]:
return 1
return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1])
return traverse(m, n)
def count_path_dynamic(m, n):
"""Count number of paths with dynamic method."""
paths = [[0 for x in range(n)] for y in range(m)]
for i in range(m):
paths[i][0] = 1
for i in range(n):
paths[0][i] = 1
for i in range(1, m):
for j in range(n):
paths[i][j] = paths[i - 1][j] + paths[i][j - 1]
return paths[m - 1][n - 1] |
class Solution:
def trap(self, height: List[int]) -> int:
if len(height) < 3:
return 0
max_left = [0] * len(height)
max_right = [0] * len(height)
for i in range(1, len(height)):
max_left[i] = max(height[i - 1], max_left[i - 1])
for i in range(len(height) - 2, 0, -1):
max_right[i] = max(height[i + 1], max_right[i + 1])
res = 0
for i in range(1, len(height) - 1):
min_height = min(max_left[i], max_right[i])
if min_height > height[i]:
res += min_height - height[i]
return res
| class Solution:
def trap(self, height: List[int]) -> int:
if len(height) < 3:
return 0
max_left = [0] * len(height)
max_right = [0] * len(height)
for i in range(1, len(height)):
max_left[i] = max(height[i - 1], max_left[i - 1])
for i in range(len(height) - 2, 0, -1):
max_right[i] = max(height[i + 1], max_right[i + 1])
res = 0
for i in range(1, len(height) - 1):
min_height = min(max_left[i], max_right[i])
if min_height > height[i]:
res += min_height - height[i]
return res |
__version__ = "0.2.2"
__license__ = "MIT License"
__website__ = "https://code.exrny.com/opensource/vulcan-builder/"
__download_url__ = ('https://github.com/exrny/vulcan-builder/archive/'
'{}.tar.gz'.format(__version__))
| __version__ = '0.2.2'
__license__ = 'MIT License'
__website__ = 'https://code.exrny.com/opensource/vulcan-builder/'
__download_url__ = 'https://github.com/exrny/vulcan-builder/archive/{}.tar.gz'.format(__version__) |
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
class Tag:
"""Represents a single Gherkin Tag"""
def __init__(self, name: str, path: str, line: int) -> None:
self.name = name
self.path = path
self.line = line
def __repr__(self) -> str:
return "<Tag: {name} @ {path}:{line}>".format(
name=self.name, path=self.path, line=self.line
) # pragma: no cover
| """
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
class Tag:
"""Represents a single Gherkin Tag"""
def __init__(self, name: str, path: str, line: int) -> None:
self.name = name
self.path = path
self.line = line
def __repr__(self) -> str:
return '<Tag: {name} @ {path}:{line}>'.format(name=self.name, path=self.path, line=self.line) |
'''
Created on Mar 27, 2015
@author: maxz
'''
def lim(x, perc=.1):
r = x.max() - x.min()
return x.min()-perc*r, x.max()+perc*r
| """
Created on Mar 27, 2015
@author: maxz
"""
def lim(x, perc=0.1):
r = x.max() - x.min()
return (x.min() - perc * r, x.max() + perc * r) |
def most_frequent(data: list) -> str:
"""
determines the most frequently occurring string in the sequence.
"""
# your code here
return max(data, key=lambda x: data.count(x))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
print('Example:')
print(most_frequent([
'a', 'b', 'c',
'a', 'b',
'a'
]))
assert most_frequent([
'a', 'b', 'c',
'a', 'b',
'a'
]) == 'a'
assert most_frequent(['a', 'a', 'bi', 'bi', 'bi']) == 'bi'
print('Done')
| def most_frequent(data: list) -> str:
"""
determines the most frequently occurring string in the sequence.
"""
return max(data, key=lambda x: data.count(x))
if __name__ == '__main__':
print('Example:')
print(most_frequent(['a', 'b', 'c', 'a', 'b', 'a']))
assert most_frequent(['a', 'b', 'c', 'a', 'b', 'a']) == 'a'
assert most_frequent(['a', 'a', 'bi', 'bi', 'bi']) == 'bi'
print('Done') |
def filter_positive_even_numbers(numbers):
"""Receives a list of numbers, and returns a filtered list of only the
numbers that are both positive and even (divisible by 2), try to use a
list comprehension."""
positive_even_numbers = [x for x in numbers if x > 0 and not x % 2]
return positive_even_numbers
numbers = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
x = filter_positive_even_numbers(numbers)
print(x)
| def filter_positive_even_numbers(numbers):
"""Receives a list of numbers, and returns a filtered list of only the
numbers that are both positive and even (divisible by 2), try to use a
list comprehension."""
positive_even_numbers = [x for x in numbers if x > 0 and (not x % 2)]
return positive_even_numbers
numbers = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
x = filter_positive_even_numbers(numbers)
print(x) |
# Copyright 2019 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Benchmark adapted from https://github.com/d5/tengobench/
doc="fib tail call recursion test"
def fib(n, a, b):
if n == 0:
return a
elif n == 1:
return b
return fib(n-1, b, a+b)
fib(35, 0, 1)
doc="finished"
| doc = 'fib tail call recursion test'
def fib(n, a, b):
if n == 0:
return a
elif n == 1:
return b
return fib(n - 1, b, a + b)
fib(35, 0, 1)
doc = 'finished' |
# Given a collection of distinct integers, return all possible permutations.
# Example:
# Input: [1,2,3]
# Output:
# [
# [1,2,3],
# [1,3,2],
# [2,1,3],
# [2,3,1],
# [3,1,2],
# [3,2,1]
# ]
class Solution:
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.helper(res, [], nums)
return res
def helper(self, res, cur, nums):
if len(cur) == len(nums):
res.append(cur + [])
return
for i in nums:
if i in cur:
continue
cur.append(i)
self.helper(res, cur, nums)
cur.pop()
# Time: O(n!)
# Space: O(n * n!)
# Difficulty: medium
| class Solution:
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
res = []
self.helper(res, [], nums)
return res
def helper(self, res, cur, nums):
if len(cur) == len(nums):
res.append(cur + [])
return
for i in nums:
if i in cur:
continue
cur.append(i)
self.helper(res, cur, nums)
cur.pop() |
class ParsingError(Exception):
pass
| class Parsingerror(Exception):
pass |
__all__ = [
"ConsulError",
"ConflictError",
"NotFound",
"SupportDisabled",
"TransactionError",
"UnauthorizedError"
]
class ConsulError(Exception):
"""Consul base error
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
def __init__(self, msg, *, meta=None):
self.value = msg
self.meta = meta or {}
if isinstance(msg, bytes):
msg = msg.decode("utf-8")
super().__init__(msg)
class NotFound(ConsulError):
"""Raised when object does not exists
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class ConflictError(ConsulError):
"""Raised when there is a conflict in agent
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class UnauthorizedError(ConsulError):
"""Raised when session with sufficent rights is required
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class SupportDisabled(Exception):
"""Endpoint is not active.
"""
class TransactionError(Exception):
"""Raised by failing transaction
Attributes:
errors (Mapping): The errors where index is the index in operations
operations (Collection): The operations
meta (Meta): meta of the error
For example token has not the sufficient rights for writing key::
errors = {
0: {"OpIndex": 0, "What": "Permission denied"}
}
operations = [
{"Verb": "get", "Key": "foo"},
{"Verb": "set", "Key": "bar", "Value": "YmFy", "Flags": None}
]
"""
def __init__(self, errors, operations, meta, *, msg=None):
self.errors = errors
self.operations = operations
self.meta = meta
msg = msg or "Transaction failed"
super().__init__(msg)
| __all__ = ['ConsulError', 'ConflictError', 'NotFound', 'SupportDisabled', 'TransactionError', 'UnauthorizedError']
class Consulerror(Exception):
"""Consul base error
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
def __init__(self, msg, *, meta=None):
self.value = msg
self.meta = meta or {}
if isinstance(msg, bytes):
msg = msg.decode('utf-8')
super().__init__(msg)
class Notfound(ConsulError):
"""Raised when object does not exists
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class Conflicterror(ConsulError):
"""Raised when there is a conflict in agent
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class Unauthorizederror(ConsulError):
"""Raised when session with sufficent rights is required
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class Supportdisabled(Exception):
"""Endpoint is not active.
"""
class Transactionerror(Exception):
"""Raised by failing transaction
Attributes:
errors (Mapping): The errors where index is the index in operations
operations (Collection): The operations
meta (Meta): meta of the error
For example token has not the sufficient rights for writing key::
errors = {
0: {"OpIndex": 0, "What": "Permission denied"}
}
operations = [
{"Verb": "get", "Key": "foo"},
{"Verb": "set", "Key": "bar", "Value": "YmFy", "Flags": None}
]
"""
def __init__(self, errors, operations, meta, *, msg=None):
self.errors = errors
self.operations = operations
self.meta = meta
msg = msg or 'Transaction failed'
super().__init__(msg) |
load("@bazelruby_rules_ruby//ruby:defs.bzl", "ruby_test")
# `dir` is path from WORKSPACE root.
def steep_check(name, bin, srcs, deps, dir = ".", rubyopt = []):
ruby_test(
name = name,
srcs = srcs,
deps = deps,
main = bin,
args = [
"check",
"--steepfile={}/Steepfile".format(dir),
"--steep-command={}/{}".format(dir, name),
],
rubyopt = rubyopt,
)
| load('@bazelruby_rules_ruby//ruby:defs.bzl', 'ruby_test')
def steep_check(name, bin, srcs, deps, dir='.', rubyopt=[]):
ruby_test(name=name, srcs=srcs, deps=deps, main=bin, args=['check', '--steepfile={}/Steepfile'.format(dir), '--steep-command={}/{}'.format(dir, name)], rubyopt=rubyopt) |
######################################################################
#
# File: b2sdk/transfer/inbound/file_metadata.py
#
# Copyright 2020 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
class FileMetadata(object):
"""
Hold information about a file which is being downloaded.
"""
UNVERIFIED_CHECKSUM_PREFIX = 'unverified:'
__slots__ = (
'file_id',
'file_name',
'content_type',
'content_length',
'content_sha1',
'content_sha1_verified',
'file_info',
)
def __init__(
self,
file_id,
file_name,
content_type,
content_length,
content_sha1,
file_info,
):
self.file_id = file_id
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.content_sha1, self.content_sha1_verified = self._decode_content_sha1(content_sha1)
self.file_info = file_info
@classmethod
def from_response(cls, response):
info = response.headers
return cls(
file_id=info['x-bz-file-id'],
file_name=info['x-bz-file-name'],
content_type=info['content-type'],
content_length=int(info['content-length']),
content_sha1=info['x-bz-content-sha1'],
file_info=dict((k[10:], info[k]) for k in info if k.startswith('x-bz-info-')),
)
def as_info_dict(self):
return {
'fileId': self.file_id,
'fileName': self.file_name,
'contentType': self.content_type,
'contentLength': self.content_length,
'contentSha1': self._encode_content_sha1(self.content_sha1, self.content_sha1_verified),
'fileInfo': self.file_info,
}
@classmethod
def _decode_content_sha1(cls, content_sha1):
if content_sha1.startswith(cls.UNVERIFIED_CHECKSUM_PREFIX):
return content_sha1[len(cls.UNVERIFIED_CHECKSUM_PREFIX):], False
return content_sha1, True
@classmethod
def _encode_content_sha1(cls, content_sha1, content_sha1_verified):
if not content_sha1_verified:
return '%s%s' % (cls.UNVERIFIED_CHECKSUM_PREFIX, content_sha1)
return content_sha1
| class Filemetadata(object):
"""
Hold information about a file which is being downloaded.
"""
unverified_checksum_prefix = 'unverified:'
__slots__ = ('file_id', 'file_name', 'content_type', 'content_length', 'content_sha1', 'content_sha1_verified', 'file_info')
def __init__(self, file_id, file_name, content_type, content_length, content_sha1, file_info):
self.file_id = file_id
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
(self.content_sha1, self.content_sha1_verified) = self._decode_content_sha1(content_sha1)
self.file_info = file_info
@classmethod
def from_response(cls, response):
info = response.headers
return cls(file_id=info['x-bz-file-id'], file_name=info['x-bz-file-name'], content_type=info['content-type'], content_length=int(info['content-length']), content_sha1=info['x-bz-content-sha1'], file_info=dict(((k[10:], info[k]) for k in info if k.startswith('x-bz-info-'))))
def as_info_dict(self):
return {'fileId': self.file_id, 'fileName': self.file_name, 'contentType': self.content_type, 'contentLength': self.content_length, 'contentSha1': self._encode_content_sha1(self.content_sha1, self.content_sha1_verified), 'fileInfo': self.file_info}
@classmethod
def _decode_content_sha1(cls, content_sha1):
if content_sha1.startswith(cls.UNVERIFIED_CHECKSUM_PREFIX):
return (content_sha1[len(cls.UNVERIFIED_CHECKSUM_PREFIX):], False)
return (content_sha1, True)
@classmethod
def _encode_content_sha1(cls, content_sha1, content_sha1_verified):
if not content_sha1_verified:
return '%s%s' % (cls.UNVERIFIED_CHECKSUM_PREFIX, content_sha1)
return content_sha1 |
class Location:
pass
class DecimalLocation:
pass
class GridLocation:
pass
| class Location:
pass
class Decimallocation:
pass
class Gridlocation:
pass |
# -*- coding: utf-8 -*-
"""Exceptions.
"""
class TinderException(Exception):
"""
"""
pass
class TinderAuthenticationException(TinderException):
"""
"""
pass
class TinderConnectionException(TinderException):
"""
"""
pass
| """Exceptions.
"""
class Tinderexception(Exception):
"""
"""
pass
class Tinderauthenticationexception(TinderException):
"""
"""
pass
class Tinderconnectionexception(TinderException):
"""
"""
pass |
def make_complex1(*args):
x, y = args
return dict(**locals())
def make_complex2(x, y):
return {'x': x, 'y': y}
print(make_complex1(5, 6))
print(make_complex2(5, 6))
| def make_complex1(*args):
(x, y) = args
return dict(**locals())
def make_complex2(x, y):
return {'x': x, 'y': y}
print(make_complex1(5, 6))
print(make_complex2(5, 6)) |
# -*- coding: utf-8 -*-
"""
fbone.modules.frontend
~~~~~~~~~~~~~~~~~~~~~~~~
frontend management commands
"""
| """
fbone.modules.frontend
~~~~~~~~~~~~~~~~~~~~~~~~
frontend management commands
""" |
"""
* @author: Shashank Jain
* @date: 25/12/2018
"""
a=input("Enter the string to count no. of vowels?")
b=list(a.replace(" ","").lower())
c=['a','e','i','o','u']
count=0
for i in b:
for j in c:
if (j==i):
count=count+1
print(count)
| """
* @author: Shashank Jain
* @date: 25/12/2018
"""
a = input('Enter the string to count no. of vowels?')
b = list(a.replace(' ', '').lower())
c = ['a', 'e', 'i', 'o', 'u']
count = 0
for i in b:
for j in c:
if j == i:
count = count + 1
print(count) |
# -*- coding: utf-8 -*-
"""
Student Do: Trading Log.
This script demonstrates how to perform basic analysis of trading profits/losses
over the course of a month (20 business days).
"""
# @TODO: Initialize the metric variables
# @TODO: Initialize lists to hold profitable and unprofitable day profits/losses
# List of trading profits/losses
trading_pnl = [ -224, 352, 252, 354, -544,
-650, 56, 123, -43, 254,
325, -123, 47, 321, 123,
133, -151, 613, 232, -311 ]
# @TODO: Iterate over each element of the list
# @TODO: Cumulatively sum up the total and count
# @TODO: Write logic to determine minimum and maximum values
# @TODO: Write logic to determine profitable vs. unprofitable days
# @TODO: Calculate the average
# @TODO: Calculate count metrics
# @TODO: Calculate percentage metrics
# @TODO: Print out the summary statistics
| """
Student Do: Trading Log.
This script demonstrates how to perform basic analysis of trading profits/losses
over the course of a month (20 business days).
"""
trading_pnl = [-224, 352, 252, 354, -544, -650, 56, 123, -43, 254, 325, -123, 47, 321, 123, 133, -151, 613, 232, -311] |
"""
This file defines the structure of the JSON
responses expected by the Searcher module.
They are generated using the JSON Schema Tool,
available here https://jsonschema.net/
"""
# pylint: skip-file
SEARCH_RESULT_SCHEMA = {
"definitions": {},
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://example.com/root.json",
"type": "object",
"title": "The Root Schema",
"required": ["header", "esearchresult"],
"properties": {
"header": {
"$id": "#/properties/header",
"type": "object",
"title": "The Header Schema",
"required": ["type", "version"],
"properties": {
"type": {
"$id": "#/properties/header/properties/type",
"type": "string",
"title": "The Type Schema",
"default": "",
"examples": ["esearch"],
"pattern": "^(.*)$",
},
"version": {
"$id": "#/properties/header/properties/version",
"type": "string",
"title": "The Version Schema",
"default": "",
"examples": ["0.3"],
"pattern": "^(.*)$",
},
},
},
"esearchresult": {
"$id": "#/properties/esearchresult",
"type": "object",
"title": "The Esearchresult Schema",
"required": [
"count",
"retmax",
"retstart",
"idlist",
"translationset",
"translationstack",
"querytranslation",
],
"properties": {
"count": {
"$id": "#/properties/esearchresult/properties/count",
"type": "string",
"title": "The Count Schema",
"default": "",
"examples": ["2211223"],
"pattern": "^(.*)$",
},
"retmax": {
"$id": "#/properties/esearchresult/properties/retmax",
"type": "string",
"title": "The Retmax Schema",
"default": "",
"examples": ["3"],
"pattern": "^(.*)$",
},
"retstart": {
"$id": "#/properties/esearchresult/properties/retstart",
"type": "string",
"title": "The Retstart Schema",
"default": "",
"examples": ["0"],
"pattern": "^(.*)$",
},
"idlist": {
"$id": "#/properties/esearchresult/properties/idlist",
"type": "array",
"title": "The Idlist Schema",
"items": {
"$id": "#/properties/esearchresult/properties/idlist/items",
"type": "string",
"title": "The Items Schema",
"default": "",
"examples": ["8818505", "8818504", "8818503"],
"pattern": "^(.*)$",
},
},
"translationset": {
"$id": "#/properties/esearchresult/properties/translationset",
"type": "array",
"title": "The Translationset Schema",
"items": {
"$id": "#/properties/esearchresult/properties/translationset/items",
"type": "object",
"title": "The Items Schema",
"required": ["from", "to"],
"properties": {
"from": {
"$id": "#/properties/esearchresult/properties/translationset/items/properties/from",
"type": "string",
"title": "The From Schema",
"default": "",
"examples": ["Human"],
"pattern": "^(.*)$",
},
"to": {
"$id": "#/properties/esearchresult/properties/translationset/items/properties/to",
"type": "string",
"title": "The To Schema",
"default": "",
"examples": [
"'Homo sapiens' [Organism] OR Human[All Fields]"
],
"pattern": "^(.*)$",
},
},
},
},
"translationstack": {
"$id": "#/properties/esearchresult/properties/translationstack",
"type": "array",
"title": "The Translationstack Schema",
},
"querytranslation": {
"$id": "#/properties/esearchresult/properties/querytranslation",
"type": "string",
"title": "The Querytranslation Schema",
"default": "",
"examples": ["'Homo sapiens'[Organism] OR Human[All Fields]"],
"pattern": "^(.*)$",
},
},
},
},
}
| """
This file defines the structure of the JSON
responses expected by the Searcher module.
They are generated using the JSON Schema Tool,
available here https://jsonschema.net/
"""
search_result_schema = {'definitions': {}, '$schema': 'http://json-schema.org/draft-07/schema#', '$id': 'http://example.com/root.json', 'type': 'object', 'title': 'The Root Schema', 'required': ['header', 'esearchresult'], 'properties': {'header': {'$id': '#/properties/header', 'type': 'object', 'title': 'The Header Schema', 'required': ['type', 'version'], 'properties': {'type': {'$id': '#/properties/header/properties/type', 'type': 'string', 'title': 'The Type Schema', 'default': '', 'examples': ['esearch'], 'pattern': '^(.*)$'}, 'version': {'$id': '#/properties/header/properties/version', 'type': 'string', 'title': 'The Version Schema', 'default': '', 'examples': ['0.3'], 'pattern': '^(.*)$'}}}, 'esearchresult': {'$id': '#/properties/esearchresult', 'type': 'object', 'title': 'The Esearchresult Schema', 'required': ['count', 'retmax', 'retstart', 'idlist', 'translationset', 'translationstack', 'querytranslation'], 'properties': {'count': {'$id': '#/properties/esearchresult/properties/count', 'type': 'string', 'title': 'The Count Schema', 'default': '', 'examples': ['2211223'], 'pattern': '^(.*)$'}, 'retmax': {'$id': '#/properties/esearchresult/properties/retmax', 'type': 'string', 'title': 'The Retmax Schema', 'default': '', 'examples': ['3'], 'pattern': '^(.*)$'}, 'retstart': {'$id': '#/properties/esearchresult/properties/retstart', 'type': 'string', 'title': 'The Retstart Schema', 'default': '', 'examples': ['0'], 'pattern': '^(.*)$'}, 'idlist': {'$id': '#/properties/esearchresult/properties/idlist', 'type': 'array', 'title': 'The Idlist Schema', 'items': {'$id': '#/properties/esearchresult/properties/idlist/items', 'type': 'string', 'title': 'The Items Schema', 'default': '', 'examples': ['8818505', '8818504', '8818503'], 'pattern': '^(.*)$'}}, 'translationset': {'$id': '#/properties/esearchresult/properties/translationset', 'type': 'array', 'title': 'The Translationset Schema', 'items': {'$id': '#/properties/esearchresult/properties/translationset/items', 'type': 'object', 'title': 'The Items Schema', 'required': ['from', 'to'], 'properties': {'from': {'$id': '#/properties/esearchresult/properties/translationset/items/properties/from', 'type': 'string', 'title': 'The From Schema', 'default': '', 'examples': ['Human'], 'pattern': '^(.*)$'}, 'to': {'$id': '#/properties/esearchresult/properties/translationset/items/properties/to', 'type': 'string', 'title': 'The To Schema', 'default': '', 'examples': ["'Homo sapiens' [Organism] OR Human[All Fields]"], 'pattern': '^(.*)$'}}}}, 'translationstack': {'$id': '#/properties/esearchresult/properties/translationstack', 'type': 'array', 'title': 'The Translationstack Schema'}, 'querytranslation': {'$id': '#/properties/esearchresult/properties/querytranslation', 'type': 'string', 'title': 'The Querytranslation Schema', 'default': '', 'examples': ["'Homo sapiens'[Organism] OR Human[All Fields]"], 'pattern': '^(.*)$'}}}}} |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def addOneRow(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
if d ==1:
temp = TreeNode(v)
temp.left = root
return temp
self.helper(root, v, d)
return root
def helper (self, root, v, d):
if not root:
return
if d == 2:
rootLeft = root.left
rootRight = root.right
root.left = TreeNode(v)
root.right = TreeNode(v)
root.left.left = rootLeft
root.right.right = rootRight
return
self.helper(root.left, v, d-1)
self.helper(root.right, v, d-1) | class Solution(object):
def add_one_row(self, root, v, d):
"""
:type root: TreeNode
:type v: int
:type d: int
:rtype: TreeNode
"""
if d == 1:
temp = tree_node(v)
temp.left = root
return temp
self.helper(root, v, d)
return root
def helper(self, root, v, d):
if not root:
return
if d == 2:
root_left = root.left
root_right = root.right
root.left = tree_node(v)
root.right = tree_node(v)
root.left.left = rootLeft
root.right.right = rootRight
return
self.helper(root.left, v, d - 1)
self.helper(root.right, v, d - 1) |
if __name__ == '__main__':
def uninit_switch(*args):
raise TypeError("executed a case stmt outside switch's context")
class switch:
@property
def default(self):
if self.finished:
raise SyntaxError("multiple 'default' cases were provided")
self.finished = True
return True
def __init__(self, target):
self.__targ = target
self.compare = uninit_switch
def __iter__(self):
self.islocked = True # when True, must test for equality on each
self.finished = False # default has been evaluated
def cmp_unlocked(targ):
assert not self.islocked
if self.finished:
raise SyntaxError("switch continued after execution of default")
return True
def cmp_locked(targ, cmp=self.__targ.__eq__):
assert self.islocked
if self.islocked and cmp(targ):
self.islocked = False
self.compare = cmp_unlocked
return True
return False
self.compare = cmp_locked
yield self
def __call__(self, comp):
return self.compare(comp)
def switch_demo(x):
for case in switch(x):
if case(-1): print('below zero')
case(0);
if case(1): print(1)
if case(2): print(2)
case.default
print('default')
switch_demo(1)
| if __name__ == '__main__':
def uninit_switch(*args):
raise type_error("executed a case stmt outside switch's context")
class Switch:
@property
def default(self):
if self.finished:
raise syntax_error("multiple 'default' cases were provided")
self.finished = True
return True
def __init__(self, target):
self.__targ = target
self.compare = uninit_switch
def __iter__(self):
self.islocked = True
self.finished = False
def cmp_unlocked(targ):
assert not self.islocked
if self.finished:
raise syntax_error('switch continued after execution of default')
return True
def cmp_locked(targ, cmp=self.__targ.__eq__):
assert self.islocked
if self.islocked and cmp(targ):
self.islocked = False
self.compare = cmp_unlocked
return True
return False
self.compare = cmp_locked
yield self
def __call__(self, comp):
return self.compare(comp)
def switch_demo(x):
for case in switch(x):
if case(-1):
print('below zero')
case(0)
if case(1):
print(1)
if case(2):
print(2)
case.default
print('default')
switch_demo(1) |
stamina = 6
alive=False
def report(stamina):
if stamina > 8:
print ("The alien is strong! It resists your pathetic attack!")
elif stamina > 5:
print ("With a loud grunt, the alien stands firm.")
elif stamina > 3:
print ("Your attack seems to be having an effect! The alien stumbles!")
elif stamina > 0:
print ("The alien is certain to fall soon! It staggers and reels!")
else:
print ("That's it! The alien is finished! ")
def fight(stamina):
while stamina > 0:
response = input("> Enter a move 1.Hit 2.attack 3.fight 4.run--")
if "hit" in response or "attack" in response:
less = report(stamina-1)
elif "fight" in response:
print ("Fight how? You have no weapons, silly space traveler!")
elif "run" in response:
print ("Sadly, there is nowhere to run."),
print ("The spaceship is not very big.")
else:
print ("The alien zaps you with its powerful ray gun!")
return True
stamina-=1
return False
n=int(input("how many times to want play code--"))
fight(n)
print ("A threatening alien wants to fight you!\n") | stamina = 6
alive = False
def report(stamina):
if stamina > 8:
print('The alien is strong! It resists your pathetic attack!')
elif stamina > 5:
print('With a loud grunt, the alien stands firm.')
elif stamina > 3:
print('Your attack seems to be having an effect! The alien stumbles!')
elif stamina > 0:
print('The alien is certain to fall soon! It staggers and reels!')
else:
print("That's it! The alien is finished! ")
def fight(stamina):
while stamina > 0:
response = input('> Enter a move 1.Hit 2.attack 3.fight 4.run--')
if 'hit' in response or 'attack' in response:
less = report(stamina - 1)
elif 'fight' in response:
print('Fight how? You have no weapons, silly space traveler!')
elif 'run' in response:
(print('Sadly, there is nowhere to run.'),)
print('The spaceship is not very big.')
else:
print('The alien zaps you with its powerful ray gun!')
return True
stamina -= 1
return False
n = int(input('how many times to want play code--'))
fight(n)
print('A threatening alien wants to fight you!\n') |
class EpisodeQuality:
def __init__(self, title: str, url: str):
self.title = title
self.url = url
| class Episodequality:
def __init__(self, title: str, url: str):
self.title = title
self.url = url |
#
# PySNMP MIB module APSLB-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/APSLB-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:24:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
acmepacketMgmt, = mibBuilder.importSymbols("ACMEPACKET-SMI", "acmepacketMgmt")
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
InetAddress, InetAddressPrefixLength, InetVersion, InetZoneIndex, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressPrefixLength", "InetVersion", "InetZoneIndex", "InetAddressType")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
ObjectIdentity, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Gauge32, IpAddress, iso, MibIdentifier, Bits, Counter64, NotificationType, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Gauge32", "IpAddress", "iso", "MibIdentifier", "Bits", "Counter64", "NotificationType", "Counter32", "Unsigned32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
apSLBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 9148, 3, 11))
if mibBuilder.loadTexts: apSLBModule.setLastUpdated('201103090000Z')
if mibBuilder.loadTexts: apSLBModule.setOrganization('Acme Packet, Inc')
if mibBuilder.loadTexts: apSLBModule.setContactInfo(' Customer Service Postal: Acme Packet, Inc 100 Crosby Drive Bedford, MA 01730 US Tel: 1-781-328-4400 E-mail: support@acmepacket.com')
if mibBuilder.loadTexts: apSLBModule.setDescription('The Session Load Balancer MIB for Acme Packet.')
apSLBMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1))
apSLBNotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 2))
apSLBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3))
apSLBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4))
apSLBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0))
apSLBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1))
apSLBNotificationGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2))
apSLBMIBGeneralObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1))
apSLBStatsEndpointsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 1), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsEndpointsCurrent.setDescription('Number of endpoints currently on the SLB.')
apSLBStatsEndpointsDenied = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 2), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsEndpointsDenied.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsEndpointsDenied.setDescription('Number of endpoints denied by the SLB because the system has reached the maximum endpoint capacity.')
apSLBEndpointCapacity = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 3), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBEndpointCapacity.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacity.setDescription('The maximum number of endpoints allowed on the SLB. This value is based on the installed SLB license(s).')
apSLBEndpointCapacityUpperThresh = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBEndpointCapacityUpperThresh.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityUpperThresh.setDescription('The configured endpoint capacity upper threshold percentage.')
apSLBEndpointCapacityLowerThresh = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBEndpointCapacityLowerThresh.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityLowerThresh.setDescription('The configured endpoint capacity lower threshold percentage.')
apSLBStatsUntrustedEndpointsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 6), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsCurrent.setDescription('Number of untrusted endpoints currently on the SLB.')
apSLBStatsTrustedEndpointsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 7), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsTrustedEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsTrustedEndpointsCurrent.setDescription('Number of trusted endpoints currently on the SLB.')
apSLBStatsUntrustedEndpointsDenied = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 8), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsDenied.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsDenied.setDescription('The number of untrusted endpoints denied by the SLB due to the total number of untrusted endpoints exceeding the configured maximum allowed.')
apSLBStatsUntrustedEndpointsAgedOut = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 9), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsAgedOut.setStatus('current')
if mibBuilder.loadTexts: apSLBStatsUntrustedEndpointsAgedOut.setDescription('The number of untrusted endpoints aged out of the system because they were not authenticated within the configured grace period.')
apSLBUntrustedEndpointCapacity = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 10), Unsigned32()).setUnits('endpoints').setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacity.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacity.setDescription('The maximum number of untrusted endpoints allowed on the SLB. This value is a configured percentage of the maximum endpoint capacity of the system.')
apSLBUntrustedEndpointCapacityUpperThresh = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityUpperThresh.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityUpperThresh.setDescription('The configured untrusted endpoint capacity upper threshold percentage.')
apSLBUntrustedEndpointCapacityLowerThresh = MibScalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityLowerThresh.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityLowerThresh.setDescription('The configured untrusted endpoint capacity lower threshold percentage.')
apSLBEndpointCapacityThresholdTrap = NotificationType((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 1)).setObjects(("APSLB-MIB", "apSLBStatsEndpointsCurrent"), ("APSLB-MIB", "apSLBEndpointCapacity"), ("APSLB-MIB", "apSLBEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBEndpointCapacityLowerThresh"))
if mibBuilder.loadTexts: apSLBEndpointCapacityThresholdTrap.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityThresholdTrap.setDescription('The trap will be generated when the number of endpoints on the SLB exceeds the configured threshold.')
apSLBEndpointCapacityThresholdClearTrap = NotificationType((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 2)).setObjects(("APSLB-MIB", "apSLBStatsEndpointsCurrent"), ("APSLB-MIB", "apSLBEndpointCapacity"), ("APSLB-MIB", "apSLBEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBEndpointCapacityLowerThresh"))
if mibBuilder.loadTexts: apSLBEndpointCapacityThresholdClearTrap.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityThresholdClearTrap.setDescription('The trap will be generated when the number of endpoints on the SLB falls below the configured threshold.')
apSLBUntrustedEndpointCapacityThresholdTrap = NotificationType((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 3)).setObjects(("APSLB-MIB", "apSLBStatsUntrustedEndpointsCurrent"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsDenied"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsAgedOut"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacity"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityLowerThresh"))
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityThresholdTrap.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityThresholdTrap.setDescription('The trap will be generated when the number of untrusted endpoints on the SLB exceeds the configured threshold.')
apSLBUntrustedEndpointCapacityThresholdClearTrap = NotificationType((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 4)).setObjects(("APSLB-MIB", "apSLBStatsUntrustedEndpointsCurrent"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsDenied"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsAgedOut"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacity"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityLowerThresh"))
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityThresholdClearTrap.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityThresholdClearTrap.setDescription('The trap will be generated when the number of untrusted endpoints on the SLB falls below the configured threshold.')
apSLBEndpointCapacityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1, 1)).setObjects(("APSLB-MIB", "apSLBStatsEndpointsCurrent"), ("APSLB-MIB", "apSLBStatsEndpointsDenied"), ("APSLB-MIB", "apSLBEndpointCapacity"), ("APSLB-MIB", "apSLBEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBEndpointCapacityLowerThresh"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apSLBEndpointCapacityGroup = apSLBEndpointCapacityGroup.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityGroup.setDescription('Objects for monitoring SLB endpoint capacity.')
apSLBUntrustedEndpointCapacityGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1, 2)).setObjects(("APSLB-MIB", "apSLBStatsUntrustedEndpointsCurrent"), ("APSLB-MIB", "apSLBStatsTrustedEndpointsCurrent"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsDenied"), ("APSLB-MIB", "apSLBStatsUntrustedEndpointsAgedOut"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacity"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityUpperThresh"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityLowerThresh"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apSLBUntrustedEndpointCapacityGroup = apSLBUntrustedEndpointCapacityGroup.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityGroup.setDescription('Objects for monitoring SLB untrusted endpoint capacity.')
apSLBEndpointCapacityNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2, 1)).setObjects(("APSLB-MIB", "apSLBEndpointCapacityThresholdTrap"), ("APSLB-MIB", "apSLBEndpointCapacityThresholdClearTrap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apSLBEndpointCapacityNotificationsGroup = apSLBEndpointCapacityNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: apSLBEndpointCapacityNotificationsGroup.setDescription('Traps to monitor SLB endpoint capacity threshold crossings.')
apSLBUntrustedEndpointCapacityNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2, 2)).setObjects(("APSLB-MIB", "apSLBUntrustedEndpointCapacityThresholdTrap"), ("APSLB-MIB", "apSLBUntrustedEndpointCapacityThresholdClearTrap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
apSLBUntrustedEndpointCapacityNotificationsGroup = apSLBUntrustedEndpointCapacityNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts: apSLBUntrustedEndpointCapacityNotificationsGroup.setDescription('Traps to monitor SLB untrusted endpoint capacity threshold crossings.')
mibBuilder.exportSymbols("APSLB-MIB", apSLBEndpointCapacityNotificationsGroup=apSLBEndpointCapacityNotificationsGroup, apSLBEndpointCapacityLowerThresh=apSLBEndpointCapacityLowerThresh, apSLBUntrustedEndpointCapacityLowerThresh=apSLBUntrustedEndpointCapacityLowerThresh, apSLBStatsUntrustedEndpointsAgedOut=apSLBStatsUntrustedEndpointsAgedOut, apSLBEndpointCapacityThresholdTrap=apSLBEndpointCapacityThresholdTrap, apSLBUntrustedEndpointCapacityGroup=apSLBUntrustedEndpointCapacityGroup, apSLBUntrustedEndpointCapacity=apSLBUntrustedEndpointCapacity, apSLBUntrustedEndpointCapacityThresholdTrap=apSLBUntrustedEndpointCapacityThresholdTrap, apSLBStatsUntrustedEndpointsCurrent=apSLBStatsUntrustedEndpointsCurrent, apSLBNotifications=apSLBNotifications, apSLBModule=apSLBModule, apSLBNotificationPrefix=apSLBNotificationPrefix, apSLBConformance=apSLBConformance, apSLBMIBObjects=apSLBMIBObjects, apSLBGroups=apSLBGroups, apSLBMIBGeneralObjects=apSLBMIBGeneralObjects, apSLBStatsEndpointsCurrent=apSLBStatsEndpointsCurrent, apSLBUntrustedEndpointCapacityUpperThresh=apSLBUntrustedEndpointCapacityUpperThresh, apSLBEndpointCapacity=apSLBEndpointCapacity, apSLBStatsTrustedEndpointsCurrent=apSLBStatsTrustedEndpointsCurrent, apSLBUntrustedEndpointCapacityNotificationsGroup=apSLBUntrustedEndpointCapacityNotificationsGroup, apSLBStatsUntrustedEndpointsDenied=apSLBStatsUntrustedEndpointsDenied, apSLBNotificationGroups=apSLBNotificationGroups, PYSNMP_MODULE_ID=apSLBModule, apSLBEndpointCapacityThresholdClearTrap=apSLBEndpointCapacityThresholdClearTrap, apSLBUntrustedEndpointCapacityThresholdClearTrap=apSLBUntrustedEndpointCapacityThresholdClearTrap, apSLBStatsEndpointsDenied=apSLBStatsEndpointsDenied, apSLBNotificationObjects=apSLBNotificationObjects, apSLBEndpointCapacityGroup=apSLBEndpointCapacityGroup, apSLBEndpointCapacityUpperThresh=apSLBEndpointCapacityUpperThresh)
| (acmepacket_mgmt,) = mibBuilder.importSymbols('ACMEPACKET-SMI', 'acmepacketMgmt')
(object_identifier, octet_string, integer) = mibBuilder.importSymbols('ASN1', 'ObjectIdentifier', 'OctetString', 'Integer')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(single_value_constraint, constraints_intersection, value_size_constraint, constraints_union, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint', 'ConstraintsIntersection', 'ValueSizeConstraint', 'ConstraintsUnion', 'ValueRangeConstraint')
(inet_address, inet_address_prefix_length, inet_version, inet_zone_index, inet_address_type) = mibBuilder.importSymbols('INET-ADDRESS-MIB', 'InetAddress', 'InetAddressPrefixLength', 'InetVersion', 'InetZoneIndex', 'InetAddressType')
(module_compliance, object_group, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'ObjectGroup', 'NotificationGroup')
(object_identity, time_ticks, integer32, mib_scalar, mib_table, mib_table_row, mib_table_column, module_identity, gauge32, ip_address, iso, mib_identifier, bits, counter64, notification_type, counter32, unsigned32) = mibBuilder.importSymbols('SNMPv2-SMI', 'ObjectIdentity', 'TimeTicks', 'Integer32', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ModuleIdentity', 'Gauge32', 'IpAddress', 'iso', 'MibIdentifier', 'Bits', 'Counter64', 'NotificationType', 'Counter32', 'Unsigned32')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
ap_slb_module = module_identity((1, 3, 6, 1, 4, 1, 9148, 3, 11))
if mibBuilder.loadTexts:
apSLBModule.setLastUpdated('201103090000Z')
if mibBuilder.loadTexts:
apSLBModule.setOrganization('Acme Packet, Inc')
if mibBuilder.loadTexts:
apSLBModule.setContactInfo(' Customer Service Postal: Acme Packet, Inc 100 Crosby Drive Bedford, MA 01730 US Tel: 1-781-328-4400 E-mail: support@acmepacket.com')
if mibBuilder.loadTexts:
apSLBModule.setDescription('The Session Load Balancer MIB for Acme Packet.')
ap_slbmib_objects = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1))
ap_slb_notification_objects = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 2))
ap_slb_notification_prefix = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3))
ap_slb_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4))
ap_slb_notifications = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0))
ap_slb_groups = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1))
ap_slb_notification_groups = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2))
ap_slbmib_general_objects = mib_identifier((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1))
ap_slb_stats_endpoints_current = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 1), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsEndpointsCurrent.setDescription('Number of endpoints currently on the SLB.')
ap_slb_stats_endpoints_denied = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 2), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsEndpointsDenied.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsEndpointsDenied.setDescription('Number of endpoints denied by the SLB because the system has reached the maximum endpoint capacity.')
ap_slb_endpoint_capacity = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 3), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBEndpointCapacity.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacity.setDescription('The maximum number of endpoints allowed on the SLB. This value is based on the installed SLB license(s).')
ap_slb_endpoint_capacity_upper_thresh = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 4), unsigned32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBEndpointCapacityUpperThresh.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityUpperThresh.setDescription('The configured endpoint capacity upper threshold percentage.')
ap_slb_endpoint_capacity_lower_thresh = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 5), unsigned32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBEndpointCapacityLowerThresh.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityLowerThresh.setDescription('The configured endpoint capacity lower threshold percentage.')
ap_slb_stats_untrusted_endpoints_current = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 6), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsCurrent.setDescription('Number of untrusted endpoints currently on the SLB.')
ap_slb_stats_trusted_endpoints_current = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 7), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsTrustedEndpointsCurrent.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsTrustedEndpointsCurrent.setDescription('Number of trusted endpoints currently on the SLB.')
ap_slb_stats_untrusted_endpoints_denied = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 8), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsDenied.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsDenied.setDescription('The number of untrusted endpoints denied by the SLB due to the total number of untrusted endpoints exceeding the configured maximum allowed.')
ap_slb_stats_untrusted_endpoints_aged_out = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 9), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsAgedOut.setStatus('current')
if mibBuilder.loadTexts:
apSLBStatsUntrustedEndpointsAgedOut.setDescription('The number of untrusted endpoints aged out of the system because they were not authenticated within the configured grace period.')
ap_slb_untrusted_endpoint_capacity = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 10), unsigned32()).setUnits('endpoints').setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacity.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacity.setDescription('The maximum number of untrusted endpoints allowed on the SLB. This value is a configured percentage of the maximum endpoint capacity of the system.')
ap_slb_untrusted_endpoint_capacity_upper_thresh = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 11), unsigned32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityUpperThresh.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityUpperThresh.setDescription('The configured untrusted endpoint capacity upper threshold percentage.')
ap_slb_untrusted_endpoint_capacity_lower_thresh = mib_scalar((1, 3, 6, 1, 4, 1, 9148, 3, 11, 1, 1, 12), unsigned32().subtype(subtypeSpec=value_range_constraint(0, 100))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityLowerThresh.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityLowerThresh.setDescription('The configured untrusted endpoint capacity lower threshold percentage.')
ap_slb_endpoint_capacity_threshold_trap = notification_type((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 1)).setObjects(('APSLB-MIB', 'apSLBStatsEndpointsCurrent'), ('APSLB-MIB', 'apSLBEndpointCapacity'), ('APSLB-MIB', 'apSLBEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBEndpointCapacityLowerThresh'))
if mibBuilder.loadTexts:
apSLBEndpointCapacityThresholdTrap.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityThresholdTrap.setDescription('The trap will be generated when the number of endpoints on the SLB exceeds the configured threshold.')
ap_slb_endpoint_capacity_threshold_clear_trap = notification_type((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 2)).setObjects(('APSLB-MIB', 'apSLBStatsEndpointsCurrent'), ('APSLB-MIB', 'apSLBEndpointCapacity'), ('APSLB-MIB', 'apSLBEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBEndpointCapacityLowerThresh'))
if mibBuilder.loadTexts:
apSLBEndpointCapacityThresholdClearTrap.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityThresholdClearTrap.setDescription('The trap will be generated when the number of endpoints on the SLB falls below the configured threshold.')
ap_slb_untrusted_endpoint_capacity_threshold_trap = notification_type((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 3)).setObjects(('APSLB-MIB', 'apSLBStatsUntrustedEndpointsCurrent'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsDenied'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsAgedOut'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacity'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityLowerThresh'))
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityThresholdTrap.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityThresholdTrap.setDescription('The trap will be generated when the number of untrusted endpoints on the SLB exceeds the configured threshold.')
ap_slb_untrusted_endpoint_capacity_threshold_clear_trap = notification_type((1, 3, 6, 1, 4, 1, 9148, 3, 11, 3, 0, 4)).setObjects(('APSLB-MIB', 'apSLBStatsUntrustedEndpointsCurrent'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsDenied'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsAgedOut'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacity'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityLowerThresh'))
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityThresholdClearTrap.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityThresholdClearTrap.setDescription('The trap will be generated when the number of untrusted endpoints on the SLB falls below the configured threshold.')
ap_slb_endpoint_capacity_group = object_group((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1, 1)).setObjects(('APSLB-MIB', 'apSLBStatsEndpointsCurrent'), ('APSLB-MIB', 'apSLBStatsEndpointsDenied'), ('APSLB-MIB', 'apSLBEndpointCapacity'), ('APSLB-MIB', 'apSLBEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBEndpointCapacityLowerThresh'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ap_slb_endpoint_capacity_group = apSLBEndpointCapacityGroup.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityGroup.setDescription('Objects for monitoring SLB endpoint capacity.')
ap_slb_untrusted_endpoint_capacity_group = object_group((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 1, 2)).setObjects(('APSLB-MIB', 'apSLBStatsUntrustedEndpointsCurrent'), ('APSLB-MIB', 'apSLBStatsTrustedEndpointsCurrent'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsDenied'), ('APSLB-MIB', 'apSLBStatsUntrustedEndpointsAgedOut'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacity'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityUpperThresh'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityLowerThresh'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ap_slb_untrusted_endpoint_capacity_group = apSLBUntrustedEndpointCapacityGroup.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityGroup.setDescription('Objects for monitoring SLB untrusted endpoint capacity.')
ap_slb_endpoint_capacity_notifications_group = notification_group((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2, 1)).setObjects(('APSLB-MIB', 'apSLBEndpointCapacityThresholdTrap'), ('APSLB-MIB', 'apSLBEndpointCapacityThresholdClearTrap'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ap_slb_endpoint_capacity_notifications_group = apSLBEndpointCapacityNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts:
apSLBEndpointCapacityNotificationsGroup.setDescription('Traps to monitor SLB endpoint capacity threshold crossings.')
ap_slb_untrusted_endpoint_capacity_notifications_group = notification_group((1, 3, 6, 1, 4, 1, 9148, 3, 11, 4, 2, 2)).setObjects(('APSLB-MIB', 'apSLBUntrustedEndpointCapacityThresholdTrap'), ('APSLB-MIB', 'apSLBUntrustedEndpointCapacityThresholdClearTrap'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ap_slb_untrusted_endpoint_capacity_notifications_group = apSLBUntrustedEndpointCapacityNotificationsGroup.setStatus('current')
if mibBuilder.loadTexts:
apSLBUntrustedEndpointCapacityNotificationsGroup.setDescription('Traps to monitor SLB untrusted endpoint capacity threshold crossings.')
mibBuilder.exportSymbols('APSLB-MIB', apSLBEndpointCapacityNotificationsGroup=apSLBEndpointCapacityNotificationsGroup, apSLBEndpointCapacityLowerThresh=apSLBEndpointCapacityLowerThresh, apSLBUntrustedEndpointCapacityLowerThresh=apSLBUntrustedEndpointCapacityLowerThresh, apSLBStatsUntrustedEndpointsAgedOut=apSLBStatsUntrustedEndpointsAgedOut, apSLBEndpointCapacityThresholdTrap=apSLBEndpointCapacityThresholdTrap, apSLBUntrustedEndpointCapacityGroup=apSLBUntrustedEndpointCapacityGroup, apSLBUntrustedEndpointCapacity=apSLBUntrustedEndpointCapacity, apSLBUntrustedEndpointCapacityThresholdTrap=apSLBUntrustedEndpointCapacityThresholdTrap, apSLBStatsUntrustedEndpointsCurrent=apSLBStatsUntrustedEndpointsCurrent, apSLBNotifications=apSLBNotifications, apSLBModule=apSLBModule, apSLBNotificationPrefix=apSLBNotificationPrefix, apSLBConformance=apSLBConformance, apSLBMIBObjects=apSLBMIBObjects, apSLBGroups=apSLBGroups, apSLBMIBGeneralObjects=apSLBMIBGeneralObjects, apSLBStatsEndpointsCurrent=apSLBStatsEndpointsCurrent, apSLBUntrustedEndpointCapacityUpperThresh=apSLBUntrustedEndpointCapacityUpperThresh, apSLBEndpointCapacity=apSLBEndpointCapacity, apSLBStatsTrustedEndpointsCurrent=apSLBStatsTrustedEndpointsCurrent, apSLBUntrustedEndpointCapacityNotificationsGroup=apSLBUntrustedEndpointCapacityNotificationsGroup, apSLBStatsUntrustedEndpointsDenied=apSLBStatsUntrustedEndpointsDenied, apSLBNotificationGroups=apSLBNotificationGroups, PYSNMP_MODULE_ID=apSLBModule, apSLBEndpointCapacityThresholdClearTrap=apSLBEndpointCapacityThresholdClearTrap, apSLBUntrustedEndpointCapacityThresholdClearTrap=apSLBUntrustedEndpointCapacityThresholdClearTrap, apSLBStatsEndpointsDenied=apSLBStatsEndpointsDenied, apSLBNotificationObjects=apSLBNotificationObjects, apSLBEndpointCapacityGroup=apSLBEndpointCapacityGroup, apSLBEndpointCapacityUpperThresh=apSLBEndpointCapacityUpperThresh) |
num="""\
75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
num = [[int(i) for i in row.split()] for row in num.split('\n')]
for i in range(len(num)-1,0,-1):
for j in range(len(num[i])-1):
num[i-1][j] += max(num[i][j],num[i][j+1])
print(num[0][0])
| num = '75\n95 64\n17 47 82\n18 35 87 10\n20 04 82 47 65\n19 01 23 75 03 34\n88 02 77 73 07 63 67\n99 65 04 28 06 16 70 92\n41 41 26 56 83 40 80 70 33\n41 48 72 33 47 32 37 16 94 29\n53 71 44 65 25 43 91 52 97 51 14\n70 11 33 28 77 73 17 78 39 68 17 57\n91 71 52 38 17 14 91 43 58 50 27 29 48\n63 66 04 68 89 53 67 30 73 16 69 87 40 31\n04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'
num = [[int(i) for i in row.split()] for row in num.split('\n')]
for i in range(len(num) - 1, 0, -1):
for j in range(len(num[i]) - 1):
num[i - 1][j] += max(num[i][j], num[i][j + 1])
print(num[0][0]) |
#i'm an idiot, this algoritm is fucking useless, the FB and LR notation is literally binary notation.
def ticketCheck(line):
rowMin = 0
rowMax = 127
colMin = 0
colMax = 7
for i in range (7):
if line[i] == 'F':
rowMax = rowMin + abs(int((rowMax - rowMin)/2))
else: rowMin = rowMax - abs(int((rowMax - rowMin)/2))
for i in range(3):
if line[7+i] == 'L':
colMax = colMin + abs(int((colMax - colMin)/2))
else: colMin = colMax - abs(int((colMax - colMin)/2))
ID = rowMax*8 + colMax
return(rowMax, colMax,ID)
iDs=[]
rows=[]
cols=[]
with open('marcomole00/5/input.txt') as f:
for line in f:
line = line.strip('\n')
tempRow,tempCol, tempId = ticketCheck(line)
rows.append(tempRow)
cols.append(tempCol)
iDs.append( tempId)
iDs.sort()
for i in range(len(iDs)-1):
if (iDs[i+1] - iDs[i])==2 and ((iDs[i] +1 ) not in iDs):
print('my ID is :', iDs[i]+1)
print(max(iDs))
#this is a simple visualization of the data, just to check that there is only a 'hole' of ids
for row in range(128):
print(f'{row})', end='')
for col in range(8):
if (row*8 +col) in iDs: print ('x', end='')
else: print('o',end = '')
print('')
| def ticket_check(line):
row_min = 0
row_max = 127
col_min = 0
col_max = 7
for i in range(7):
if line[i] == 'F':
row_max = rowMin + abs(int((rowMax - rowMin) / 2))
else:
row_min = rowMax - abs(int((rowMax - rowMin) / 2))
for i in range(3):
if line[7 + i] == 'L':
col_max = colMin + abs(int((colMax - colMin) / 2))
else:
col_min = colMax - abs(int((colMax - colMin) / 2))
id = rowMax * 8 + colMax
return (rowMax, colMax, ID)
i_ds = []
rows = []
cols = []
with open('marcomole00/5/input.txt') as f:
for line in f:
line = line.strip('\n')
(temp_row, temp_col, temp_id) = ticket_check(line)
rows.append(tempRow)
cols.append(tempCol)
iDs.append(tempId)
iDs.sort()
for i in range(len(iDs) - 1):
if iDs[i + 1] - iDs[i] == 2 and iDs[i] + 1 not in iDs:
print('my ID is :', iDs[i] + 1)
print(max(iDs))
for row in range(128):
print(f'{row})', end='')
for col in range(8):
if row * 8 + col in iDs:
print('x', end='')
else:
print('o', end='')
print('') |
list = [50,100,150,200,250,300]
mininumber = list[0]
for x in list:
if mininumber > x:
mininumber = x
print("mininumber is ",mininumber)
| list = [50, 100, 150, 200, 250, 300]
mininumber = list[0]
for x in list:
if mininumber > x:
mininumber = x
print('mininumber is ', mininumber) |
# File: config.py
# Author: Qian Ge <geqian1001@gmail.com>
# directory of pre-trained vgg parameters
vgg_dir = '../../data/pretrain/vgg/vgg19.npy'
# directory of training data
data_dir = '../../data/dataset/256_ObjectCategories/'
# directory of testing data
test_data_dir = '../data/'
# directory of inference data
infer_data_dir = '../data/'
# directory for saving inference data
infer_dir = '../../data/tmp/'
# directory for saving summary
summary_dir = '../../data/tmp/'
# directory for saving checkpoint
checkpoint_dir = '../../data/tmp/'
# directory for restoring checkpoint
model_dir = '../../data/tmp/'
# directory for saving prediction results
result_dir = '../../data/tmp/'
| vgg_dir = '../../data/pretrain/vgg/vgg19.npy'
data_dir = '../../data/dataset/256_ObjectCategories/'
test_data_dir = '../data/'
infer_data_dir = '../data/'
infer_dir = '../../data/tmp/'
summary_dir = '../../data/tmp/'
checkpoint_dir = '../../data/tmp/'
model_dir = '../../data/tmp/'
result_dir = '../../data/tmp/' |
# Solution 1
# O(n^2) time | O(n) space
def longestIncreasingSubsequence(array):
if len(array) <= 1:
return array
sequences = [None for _ in range(len(array))]
lengths = [1 for _ in range(len(array))]
maxLenIdx = 0
for i in range(len(array)):
curNum = array[i]
for j in range(i):
otherNum = array[j]
if otherNum < curNum and lengths[i] < lengths[j] + 1:
lengths[i] = lengths[j] + 1
sequences[i] = j
maxLenIdx = i
print("--------------")
print("lengths: ", lengths)
print("sequences: ", sequences)
print("maxLenIdx: ", maxLenIdx)
print("")
return buildSequence(array, sequences, maxLenIdx)
def buildSequence(nums, sequences, maxLenIdx):
result = [nums[maxLenIdx]]
while sequences[maxLenIdx] is not None:
maxLenIdx = sequences[maxLenIdx]
result.append(nums[maxLenIdx])
return list(reversed(result))
| def longest_increasing_subsequence(array):
if len(array) <= 1:
return array
sequences = [None for _ in range(len(array))]
lengths = [1 for _ in range(len(array))]
max_len_idx = 0
for i in range(len(array)):
cur_num = array[i]
for j in range(i):
other_num = array[j]
if otherNum < curNum and lengths[i] < lengths[j] + 1:
lengths[i] = lengths[j] + 1
sequences[i] = j
max_len_idx = i
print('--------------')
print('lengths: ', lengths)
print('sequences: ', sequences)
print('maxLenIdx: ', maxLenIdx)
print('')
return build_sequence(array, sequences, maxLenIdx)
def build_sequence(nums, sequences, maxLenIdx):
result = [nums[maxLenIdx]]
while sequences[maxLenIdx] is not None:
max_len_idx = sequences[maxLenIdx]
result.append(nums[maxLenIdx])
return list(reversed(result)) |
class Auth:
class general:
token = None # Token for general authentication
class live:
result = None # JSON result of the Live Auth request;
class Me(object):
def __init__(self):
self.id = None
self.username = None
self.auth = Auth
class HTTP_Request:
class Login:
uri = "https://social.triller.co/v1.5/user/auth"
headers = {
"origin": "https://triller.co",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36"
}
class Live:
uri = "https://api.live.triller.co/_ah/api/halogen/v1/auth/triller"
base_uri = "https://social.triller.co"
| class Auth:
class General:
token = None
class Live:
result = None
class Me(object):
def __init__(self):
self.id = None
self.username = None
self.auth = Auth
class Http_Request:
class Login:
uri = 'https://social.triller.co/v1.5/user/auth'
headers = {'origin': 'https://triller.co', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.82 Safari/537.36'}
class Live:
uri = 'https://api.live.triller.co/_ah/api/halogen/v1/auth/triller'
base_uri = 'https://social.triller.co' |
# MIT License
#
# Copyright (c) 2020 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class AttesterSlashing(object):
def __init__(self):
self.attestation_1_attesting_indices = []
self.attestation_1_slot = None
self.attestation_1_index = None
self.attestation_1_beacon_block_root = None
self.attestation_1_source_epoch = None
self.attestation_1_source_root = None
self.attestation_1_target_epoch = None
self.attestation_1_target_root = None
self.attestation_1_signature = None
self.attestation_2_attesting_indices = []
self.attestation_2_slot = None
self.attestation_2_index = None
self.attestation_2_beacon_block_root = None
self.attestation_2_source_epoch = None
self.attestation_2_source_root = None
self.attestation_2_target_epoch = None
self.attestation_2_target_root = None
self.attestation_2_signature = None
| class Attesterslashing(object):
def __init__(self):
self.attestation_1_attesting_indices = []
self.attestation_1_slot = None
self.attestation_1_index = None
self.attestation_1_beacon_block_root = None
self.attestation_1_source_epoch = None
self.attestation_1_source_root = None
self.attestation_1_target_epoch = None
self.attestation_1_target_root = None
self.attestation_1_signature = None
self.attestation_2_attesting_indices = []
self.attestation_2_slot = None
self.attestation_2_index = None
self.attestation_2_beacon_block_root = None
self.attestation_2_source_epoch = None
self.attestation_2_source_root = None
self.attestation_2_target_epoch = None
self.attestation_2_target_root = None
self.attestation_2_signature = None |
"""
pyrtf-ng Errors and Exceptions
"""
class RTFError(Exception):
pass
class ParseError(RTFError):
"""
Unable to parse the RTF data.
"""
| """
pyrtf-ng Errors and Exceptions
"""
class Rtferror(Exception):
pass
class Parseerror(RTFError):
"""
Unable to parse the RTF data.
""" |
def pytrades():
pass
def DiffEvol():
pass
def PyPolyChord():
pass
def PolyChord():
pass
def celerite():
pass
def ttvfast():
pass
def george():
pass
def batman():
pass
def dynesty():
pass
## absurd workaround to fix the lack of celerite in the system
def Celerite_QuasiPeriodicActivity():
pass
class dummy_one:
def __init__(self):
self.terms = dummy_two()
class dummy_two:
def __init__(self):
self.Term = dummy_three(0, 0, 0, 0)
#def Term(self):
# return
class dummy_three:
def __init__(self, a, b, c, d):
self.Term = 0
| def pytrades():
pass
def diff_evol():
pass
def py_poly_chord():
pass
def poly_chord():
pass
def celerite():
pass
def ttvfast():
pass
def george():
pass
def batman():
pass
def dynesty():
pass
def celerite__quasi_periodic_activity():
pass
class Dummy_One:
def __init__(self):
self.terms = dummy_two()
class Dummy_Two:
def __init__(self):
self.Term = dummy_three(0, 0, 0, 0)
class Dummy_Three:
def __init__(self, a, b, c, d):
self.Term = 0 |
NO_ROLE_CODE = ''
TRUSTEE_CODE = '0'
STEWARD_CODE = '2'
TGB_CODE = '100'
TRUST_ANCHOR_CODE = '101'
| no_role_code = ''
trustee_code = '0'
steward_code = '2'
tgb_code = '100'
trust_anchor_code = '101' |
class PartialCumulativeClass:
'''
the concept:
'''
def __init__(self, conf, shared):
self.conf = conf
self.shared = shared
self.sharedAnalysis = None
# define data source / destination
# define data source / destination
self.dataSourceFilePath = ''
self.dataSourceFileName = ''
self.saveAnomaliesFilePath = self.conf.transactionsDataResultsPath + 'time_periods/'
self.saveAnomaliesFileName = 'part_cumulCAT.tsv'
# test purposes :: -1 == full analysis
# test purposes :: -1 == full analysis
self.test_limit_rows = -1
# storage vars
# storage vars
self.transactionsData = {}
self.transactionsDataSums = {}
self.transactionsDataSumsWeights = {}
self.transactionsDataSumsClassified = {}
self.transactionsDataSumsWeightsClassified = {}
self.transactionsDataSumsPartial = {}
self.transactionsDataSumsPartialClassified = {}
self.transactionsDataSumsPartialTransposed = {}
self.transactionsDataSumsPartialTransposedClassified = {}
self.weightsComparedTransposed = {}
self.weightsComparedTransposedClassified = {}
self.transactionDataTimeFrameCluster = 3
# analysis variables
# analysis variables
self._transactions_av_window_size = 5
self._transactions_av_std_dev = 1.5
# anomalies storage variables
# anomalies storage variables
self.exceptionsDict = {}
self.exceptionsClassifiedDict = {}
def findAnomaliesThroughPartialCumulation(self):
# get transactions
# get transactions
fullDataPathSource = self.dataSourceFilePath + self.dataSourceFileName
self.transactionsData = self.conf.sharedCommon.readAndOrganizeTransactions2Dict(fullDataPathSource, '\t', self.test_limit_rows)
# working area:
# - create partial sums by summing self._transactionDataWindowSum consecutive transactions
# -
self.createTransactionTimeFrameSums()
'''
# working area; for every transaction list between two entities:
# - calculate derivative
# - average derivative
# - identify anomalies
# - add identified anomalies on two lists: (i) cumuylative anomaly list (ii) full anomalies list :: retain company classificatyion
for classificator, transactionDict in self.transactionsData['data'].items():
for companyIds, transactionList in transactionDict.items():
companyIdList = companyIds.split('-')
companyIdPublic = companyIdList[0]
companyIdAny = companyIdList[1]
# initialize
# initialize
if classificator not in self.exceptionsClassifiedDict:
self.exceptionsClassifiedDict[classificator] = [0] * (len(transactionList) + 1)
self.exceptionsClassifiedCompaniesDict[classificator] = [{} for _ in range(len(transactionList))]
if len(self.exceptionsCumulatedList) == 0:
self.exceptionsCumulatedList = [0] * (len(transactionList) + 1)
self.exceptionsCumulatedCompaniesList = [{} for _ in range(len(transactionList))]
# execute calculations
# execute calculations
tmp_derivatives = self.shared.getDerivatives(transactionList)
tmp_derivatives_av = self.shared.getAveragedList(tmp_derivatives, self._transactions_av_window_size)
tmp_exceptions = self.shared.getExceptionsLocal(tmp_derivatives, tmp_derivatives_av,
self._transactions_av_std_dev)
# add exceptions to cumulative and classified dictionary
# add exceptions to cumulative and classified dictionary
for index in tmp_exceptions:
self.exceptionsClassifiedDict[classificator][index] = self.exceptionsClassifiedDict[classificator][
index] + 1
self.exceptionsCumulatedList[index] = self.exceptionsCumulatedList[index] + 1
# associate companies to exceptions classification
# associate companies to exceptions classification
if companyIdAny not in self.exceptionsClassifiedCompaniesDict[classificator][index]:
self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] = 1
else:
self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] = \
self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] + 1
if companyIdAny not in self.exceptionsCumulatedCompaniesList[index]:
self.exceptionsCumulatedCompaniesList[index][companyIdAny] = 1
else:
self.exceptionsCumulatedCompaniesList[index][companyIdAny] = \
self.exceptionsCumulatedCompaniesList[index][companyIdAny] + 1
if (plotSingleDerivativesGraph):
# self.conf.plt.plot(data_av, color='red')
self.conf.plt.plot(tmp_derivatives_av, color='orange')
self.conf.plt.plot(tmp_derivatives, 'k.')
self.conf.plt.plot(transactionList, 'k.', color='green')
# add anomalies
anomalies_x = tmp_exceptions.keys()
anomalies_y = tmp_exceptions.values()
self.conf.plt.scatter(anomalies_x, anomalies_y)
#for brk in breaks:
# self.conf.plt.axvline(x=brk)
self.conf.plt.show()
# find maximums and associate company ids to it
# find maximums and associate company ids to it
maxList, self.anomaliesList = self.shared.extractDataFromListMaximums(self.exceptionsCumulatedList,
self.exceptionsCumulatedCompaniesList,
self._transactions_av_window_size,
self._breaks_maximum_window_dev)
for classificator in self.exceptionsClassifiedDict:
self.anomaliesClassifiedDict[classificator] = []
maxList, self.anomaliesClassifiedDict[classificator] = self.shared.extractDataFromListMaximums(
self.exceptionsClassifiedDict[classificator], self.exceptionsClassifiedCompaniesDict[classificator],
self._transactions_av_window_size, self._breaks_maximum_window_dev)
'''
return None
def createTransactionTimeFrameSums(self):
'''
Function takes transaction data transactionsData and creates a list of sums of self.transactionDataTimeFrameCluster consecutive transactions.
Result is stored in self.transactionsDataPartialSums
Function creates a list of full sums stored in self.transactionsDataSums.
:return: 0
'''
# list through classifiers
# list through classifiers
for classifier in self.transactionsData['data']:
# init transactions sum dictionary
# init transactions sum dictionary
if classifier not in self.transactionsDataSumsClassified:
self.transactionsDataSumsClassified[classifier] = {}
self.transactionsDataSumsPartialClassified[classifier] = {}
self.transactionsDataSumsPartialTransposedClassified[classifier] = []
self.weightsComparedTransposedClassified[classifier] = []
# list relations
# list relations
for companyIds in self.transactionsData['data'][classifier]:
#if(companyIds == '5065402000-5486815000'):
# print(len(self.transactionsData['data'][classifier][companyIds]), companyIds, self.transactionsData['data'][classifier][companyIds])
tmp_sum = 0.0
tmp_sum_part = 0.0
tmp_frame_index = 0
tmp_sum_part_index = 0
if companyIds not in self.transactionsDataSumsPartial:
numOfslots = int(len(self.transactionsData['data'][classifier][companyIds]) / self.transactionDataTimeFrameCluster) + 1
self.transactionsDataSumsPartial[companyIds] = [0.0] * numOfslots
self.transactionsDataSumsPartialClassified[classifier][companyIds] = [0.0] * numOfslots
if len(self.transactionsDataSumsPartialTransposedClassified[classifier]) == 0:
self.transactionsDataSumsPartialTransposedClassified[classifier] = [{} for _ in range(numOfslots)]
if len(self.transactionsDataSumsPartialTransposed) == 0:
self.transactionsDataSumsPartialTransposed = [{} for _ in range(numOfslots)]
# data vars for storing compared weights
if len(self.weightsComparedTransposedClassified[classifier]) == 0:
self.weightsComparedTransposedClassified[classifier] = [{} for _ in range(numOfslots)]
if len(self.weightsComparedTransposed) == 0:
self.weightsComparedTransposed = [{} for _ in range(numOfslots)]
# list individual transactions
# list individual transactions
for trans in self.transactionsData['data'][classifier][companyIds]:
tmp_sum = tmp_sum + float(trans)
tmp_sum_part = tmp_sum_part + float(trans)
tmp_frame_index = tmp_frame_index + 1
if tmp_frame_index >= self.transactionDataTimeFrameCluster:
# save in a "natural" form
self.transactionsDataSumsPartial[companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialClassified[classifier][companyIds][tmp_sum_part_index] = tmp_sum_part
# save in a "transposed" form
self.transactionsDataSumsPartialTransposed[tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSumsPartialTransposedClassified[classifier][tmp_sum_part_index][companyIds] = tmp_sum_part
tmp_sum_part = 0.0
tmp_frame_index = 0
tmp_sum_part_index = tmp_sum_part_index + 1
# save trailing data
# save trailing data
if(tmp_frame_index > 0 and tmp_frame_index < self.transactionDataTimeFrameCluster):
# normal data
self.transactionsDataSumsPartial[companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialClassified[classifier][companyIds][tmp_sum_part_index] = tmp_sum_part
# transposed data
self.transactionsDataSumsPartialTransposed[tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSumsPartialTransposedClassified[classifier][tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSums[companyIds] = tmp_sum
self.transactionsDataSumsClassified[classifier][companyIds] = tmp_sum
# get base transaction weights
# get base transaction weights
self.transactionsDataSumsWeights = self.convertTransactions2Weights(self.transactionsDataSums)
for classifier in self.transactionsDataSumsClassified:
self.transactionsDataSumsWeightsClassified[classifier] = self.convertTransactions2Weights(self.transactionsDataSumsClassified[classifier])
# compare partial transaction weights to base weights
# compare partial transaction weights to base weights
self.exceptionsDict = self.identifyAnomalies(self.transactionsDataSumsPartialTransposed, self.transactionsDataSumsWeights)
# print(self.exceptionsDict)
for classifier in self.transactionsDataSumsPartialTransposedClassified:
self.exceptionsClassifiedDict[classifier] = self.identifyAnomalies(self.transactionsDataSumsPartialTransposedClassified[classifier], self.transactionsDataSumsWeightsClassified[classifier])
#print(self.exceptionsClassifiedDict[classifier])
return None
def convertTransactions2Weights(self, dataDict):
weightsList = {}
data_sum = sum(dataDict.values())
if data_sum == 0.0:
return weightsList
for ids,value in dataDict.items():
weightsList[ids] = value / data_sum
return weightsList
def identifyAnomalies(self, partialTransactionSumsTransposed, transactionSumWeights):
anomalyDict = {}
for partialDataSet in partialTransactionSumsTransposed:
tmp_partialWeights = self.convertTransactions2Weights(partialDataSet)
# get comparative weights
# get comparative weights
tmp_comparedWeights = {}
for ids in tmp_partialWeights:
if transactionSumWeights[ids] > 0.0 and tmp_partialWeights[ids] > 0.0:
tmp_comparedWeights[ids] = self.conf.numpy.log10(tmp_partialWeights[ids] / transactionSumWeights[ids])
# tmp_comparedWeights[ids] = tmp_partialWeights[ids] / transactionSumWeights[ids]
'''
# plot histogram
# plot histogram
plotHistogram = False
if plotHistogram:
self.conf.plt.plot(list(tmp_comparedWeights.values()), 'k.')
self.conf.plt.show()
self.conf.plt.close()
self.conf.plt.figure(figsize=(8, 6))
self.conf.plt.style.use('seaborn-poster')
'''
# average comparative weights and look identify anomalies
# average comparative weights and look identify anomalies
# requiring minimum amount of data
# requiring minimum amount of data
if len(tmp_comparedWeights) < self._transactions_av_window_size:
continue
tmp_comparedWeights_av = self.shared.getAveragedList(list(tmp_comparedWeights.values()), self._transactions_av_window_size)
anomaliesDetected = self.shared.getExceptionsLocal(list(tmp_comparedWeights.values()), tmp_comparedWeights_av, self._transactions_av_std_dev)
idsList = list(tmp_comparedWeights.keys())
for index, value in enumerate(anomaliesDetected):
maticnaList = idsList[index].split('-')
if index not in anomalyDict:
anomalyDict[maticnaList[1]] = float(value)
else:
anomalyDict[maticnaList[1]] = anomalyDict[maticnaList[1]] + float(value)
# add anomalies onto a main datasotre variable
# add anomalies onto a main datasotre variable
'''
plotAnomalies = False
if plotAnomalies:
self.conf.plt.plot(tmp_comparedWeights_av, color='red')
self.conf.plt.plot(list(tmp_comparedWeights.values()), 'k.')
# self.conf.plt.hist(list(tmp_comparedWeights.values()), 1000)
# add anomalies
anomalies_x = anomaliesDetected.keys()
anomalies_y = anomaliesDetected.values()
self.conf.plt.scatter(anomalies_x, anomalies_y)
# for brk in breaks:
# self.conf.plt.axvline(x=brk)
self.conf.plt.show()
self.conf.plt.close()
self.conf.plt.figure(figsize=(8, 6))
self.conf.plt.style.use('seaborn-poster')
'''
return sorted(anomalyDict.items(), key=lambda kv: kv[1], reverse=True)
# print results
# print results
def saveAnomalies2File(self):
'''
function saves anomnalies into file
:return: None
'''
# set dictionary in correct format
# set dictionary in correct format
finalDataDict = {}
finalDataDict['head'] = ["maticna", "score"]
finalDataDict['data'] = []
for row in self.exceptionsDict:
tmp_row = [str(row[0]), str(row[1])]
finalDataDict['data'].append(tmp_row)
# enrich data
# enrich data
fieldsDict = {'maticna': 'company_name'}
finalDataDict = self.sharedAnalysis.appendAjpesOrganizationNames2Dict(finalDataDict, fieldsDict)
fileName = self.saveAnomaliesFileName.replace('CAT', '')
fullFileName = self.saveAnomaliesFilePath + fileName
self.conf.sharedCommon.sendDict2Output(finalDataDict, fullFileName)
# repeat for every company group, classified by their field of interest
# repeat for every company group, classified by their field of interest
for classificator in self.exceptionsClassifiedDict:
# skip
finalDataDict = {}
finalDataDict['head'] = ["maticna", "score"]
finalDataDict['data'] = []
for row in self.exceptionsClassifiedDict[classificator]:
tmp_row = [str(row[0]), str(row[1])]
finalDataDict['data'].append(tmp_row)
# enrich data
# enrich data
fieldsDict = {'maticna': 'company_name'}
finalDataDict = self.sharedAnalysis.appendAjpesOrganizationNames2Dict(finalDataDict, fieldsDict)
# save data
# save data
fileName = self.saveAnomaliesFileName.replace('CAT', classificator)
fullFileName = self.saveAnomaliesFilePath + fileName
self.conf.sharedCommon.sendDict2Output(finalDataDict, fullFileName)
return None
| class Partialcumulativeclass:
"""
the concept:
"""
def __init__(self, conf, shared):
self.conf = conf
self.shared = shared
self.sharedAnalysis = None
self.dataSourceFilePath = ''
self.dataSourceFileName = ''
self.saveAnomaliesFilePath = self.conf.transactionsDataResultsPath + 'time_periods/'
self.saveAnomaliesFileName = 'part_cumulCAT.tsv'
self.test_limit_rows = -1
self.transactionsData = {}
self.transactionsDataSums = {}
self.transactionsDataSumsWeights = {}
self.transactionsDataSumsClassified = {}
self.transactionsDataSumsWeightsClassified = {}
self.transactionsDataSumsPartial = {}
self.transactionsDataSumsPartialClassified = {}
self.transactionsDataSumsPartialTransposed = {}
self.transactionsDataSumsPartialTransposedClassified = {}
self.weightsComparedTransposed = {}
self.weightsComparedTransposedClassified = {}
self.transactionDataTimeFrameCluster = 3
self._transactions_av_window_size = 5
self._transactions_av_std_dev = 1.5
self.exceptionsDict = {}
self.exceptionsClassifiedDict = {}
def find_anomalies_through_partial_cumulation(self):
full_data_path_source = self.dataSourceFilePath + self.dataSourceFileName
self.transactionsData = self.conf.sharedCommon.readAndOrganizeTransactions2Dict(fullDataPathSource, '\t', self.test_limit_rows)
self.createTransactionTimeFrameSums()
"\n # working area; for every transaction list between two entities:\n # - calculate derivative\n # - average derivative\n # - identify anomalies\n # - add identified anomalies on two lists: (i) cumuylative anomaly list (ii) full anomalies list :: retain company classificatyion\n\n for classificator, transactionDict in self.transactionsData['data'].items():\n for companyIds, transactionList in transactionDict.items():\n\n companyIdList = companyIds.split('-')\n companyIdPublic = companyIdList[0]\n companyIdAny = companyIdList[1]\n\n # initialize\n # initialize\n\n if classificator not in self.exceptionsClassifiedDict:\n self.exceptionsClassifiedDict[classificator] = [0] * (len(transactionList) + 1)\n self.exceptionsClassifiedCompaniesDict[classificator] = [{} for _ in range(len(transactionList))]\n if len(self.exceptionsCumulatedList) == 0:\n self.exceptionsCumulatedList = [0] * (len(transactionList) + 1)\n self.exceptionsCumulatedCompaniesList = [{} for _ in range(len(transactionList))]\n\n # execute calculations\n # execute calculations\n\n tmp_derivatives = self.shared.getDerivatives(transactionList)\n tmp_derivatives_av = self.shared.getAveragedList(tmp_derivatives, self._transactions_av_window_size)\n tmp_exceptions = self.shared.getExceptionsLocal(tmp_derivatives, tmp_derivatives_av,\n self._transactions_av_std_dev)\n\n # add exceptions to cumulative and classified dictionary\n # add exceptions to cumulative and classified dictionary\n\n for index in tmp_exceptions:\n self.exceptionsClassifiedDict[classificator][index] = self.exceptionsClassifiedDict[classificator][\n index] + 1\n self.exceptionsCumulatedList[index] = self.exceptionsCumulatedList[index] + 1\n\n # associate companies to exceptions classification\n # associate companies to exceptions classification\n\n if companyIdAny not in self.exceptionsClassifiedCompaniesDict[classificator][index]:\n self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] = 1\n else:\n self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] = self.exceptionsClassifiedCompaniesDict[classificator][index][companyIdAny] + 1\n if companyIdAny not in self.exceptionsCumulatedCompaniesList[index]:\n self.exceptionsCumulatedCompaniesList[index][companyIdAny] = 1\n else:\n self.exceptionsCumulatedCompaniesList[index][companyIdAny] = self.exceptionsCumulatedCompaniesList[index][companyIdAny] + 1\n\n if (plotSingleDerivativesGraph):\n # self.conf.plt.plot(data_av, color='red')\n self.conf.plt.plot(tmp_derivatives_av, color='orange')\n self.conf.plt.plot(tmp_derivatives, 'k.')\n self.conf.plt.plot(transactionList, 'k.', color='green')\n # add anomalies\n anomalies_x = tmp_exceptions.keys()\n anomalies_y = tmp_exceptions.values()\n self.conf.plt.scatter(anomalies_x, anomalies_y)\n \n #for brk in breaks:\n # self.conf.plt.axvline(x=brk)\n\n self.conf.plt.show()\n\n # find maximums and associate company ids to it\n # find maximums and associate company ids to it\n\n maxList, self.anomaliesList = self.shared.extractDataFromListMaximums(self.exceptionsCumulatedList,\n self.exceptionsCumulatedCompaniesList,\n self._transactions_av_window_size,\n self._breaks_maximum_window_dev)\n for classificator in self.exceptionsClassifiedDict:\n self.anomaliesClassifiedDict[classificator] = []\n maxList, self.anomaliesClassifiedDict[classificator] = self.shared.extractDataFromListMaximums(\n self.exceptionsClassifiedDict[classificator], self.exceptionsClassifiedCompaniesDict[classificator],\n self._transactions_av_window_size, self._breaks_maximum_window_dev)\n\n "
return None
def create_transaction_time_frame_sums(self):
"""
Function takes transaction data transactionsData and creates a list of sums of self.transactionDataTimeFrameCluster consecutive transactions.
Result is stored in self.transactionsDataPartialSums
Function creates a list of full sums stored in self.transactionsDataSums.
:return: 0
"""
for classifier in self.transactionsData['data']:
if classifier not in self.transactionsDataSumsClassified:
self.transactionsDataSumsClassified[classifier] = {}
self.transactionsDataSumsPartialClassified[classifier] = {}
self.transactionsDataSumsPartialTransposedClassified[classifier] = []
self.weightsComparedTransposedClassified[classifier] = []
for company_ids in self.transactionsData['data'][classifier]:
tmp_sum = 0.0
tmp_sum_part = 0.0
tmp_frame_index = 0
tmp_sum_part_index = 0
if companyIds not in self.transactionsDataSumsPartial:
num_ofslots = int(len(self.transactionsData['data'][classifier][companyIds]) / self.transactionDataTimeFrameCluster) + 1
self.transactionsDataSumsPartial[companyIds] = [0.0] * numOfslots
self.transactionsDataSumsPartialClassified[classifier][companyIds] = [0.0] * numOfslots
if len(self.transactionsDataSumsPartialTransposedClassified[classifier]) == 0:
self.transactionsDataSumsPartialTransposedClassified[classifier] = [{} for _ in range(numOfslots)]
if len(self.transactionsDataSumsPartialTransposed) == 0:
self.transactionsDataSumsPartialTransposed = [{} for _ in range(numOfslots)]
if len(self.weightsComparedTransposedClassified[classifier]) == 0:
self.weightsComparedTransposedClassified[classifier] = [{} for _ in range(numOfslots)]
if len(self.weightsComparedTransposed) == 0:
self.weightsComparedTransposed = [{} for _ in range(numOfslots)]
for trans in self.transactionsData['data'][classifier][companyIds]:
tmp_sum = tmp_sum + float(trans)
tmp_sum_part = tmp_sum_part + float(trans)
tmp_frame_index = tmp_frame_index + 1
if tmp_frame_index >= self.transactionDataTimeFrameCluster:
self.transactionsDataSumsPartial[companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialClassified[classifier][companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialTransposed[tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSumsPartialTransposedClassified[classifier][tmp_sum_part_index][companyIds] = tmp_sum_part
tmp_sum_part = 0.0
tmp_frame_index = 0
tmp_sum_part_index = tmp_sum_part_index + 1
if tmp_frame_index > 0 and tmp_frame_index < self.transactionDataTimeFrameCluster:
self.transactionsDataSumsPartial[companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialClassified[classifier][companyIds][tmp_sum_part_index] = tmp_sum_part
self.transactionsDataSumsPartialTransposed[tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSumsPartialTransposedClassified[classifier][tmp_sum_part_index][companyIds] = tmp_sum_part
self.transactionsDataSums[companyIds] = tmp_sum
self.transactionsDataSumsClassified[classifier][companyIds] = tmp_sum
self.transactionsDataSumsWeights = self.convertTransactions2Weights(self.transactionsDataSums)
for classifier in self.transactionsDataSumsClassified:
self.transactionsDataSumsWeightsClassified[classifier] = self.convertTransactions2Weights(self.transactionsDataSumsClassified[classifier])
self.exceptionsDict = self.identifyAnomalies(self.transactionsDataSumsPartialTransposed, self.transactionsDataSumsWeights)
for classifier in self.transactionsDataSumsPartialTransposedClassified:
self.exceptionsClassifiedDict[classifier] = self.identifyAnomalies(self.transactionsDataSumsPartialTransposedClassified[classifier], self.transactionsDataSumsWeightsClassified[classifier])
return None
def convert_transactions2_weights(self, dataDict):
weights_list = {}
data_sum = sum(dataDict.values())
if data_sum == 0.0:
return weightsList
for (ids, value) in dataDict.items():
weightsList[ids] = value / data_sum
return weightsList
def identify_anomalies(self, partialTransactionSumsTransposed, transactionSumWeights):
anomaly_dict = {}
for partial_data_set in partialTransactionSumsTransposed:
tmp_partial_weights = self.convertTransactions2Weights(partialDataSet)
tmp_compared_weights = {}
for ids in tmp_partialWeights:
if transactionSumWeights[ids] > 0.0 and tmp_partialWeights[ids] > 0.0:
tmp_comparedWeights[ids] = self.conf.numpy.log10(tmp_partialWeights[ids] / transactionSumWeights[ids])
"\n # plot histogram\n # plot histogram\n\n plotHistogram = False\n if plotHistogram:\n self.conf.plt.plot(list(tmp_comparedWeights.values()), 'k.')\n self.conf.plt.show()\n self.conf.plt.close()\n self.conf.plt.figure(figsize=(8, 6))\n self.conf.plt.style.use('seaborn-poster')\n "
if len(tmp_comparedWeights) < self._transactions_av_window_size:
continue
tmp_compared_weights_av = self.shared.getAveragedList(list(tmp_comparedWeights.values()), self._transactions_av_window_size)
anomalies_detected = self.shared.getExceptionsLocal(list(tmp_comparedWeights.values()), tmp_comparedWeights_av, self._transactions_av_std_dev)
ids_list = list(tmp_comparedWeights.keys())
for (index, value) in enumerate(anomaliesDetected):
maticna_list = idsList[index].split('-')
if index not in anomalyDict:
anomalyDict[maticnaList[1]] = float(value)
else:
anomalyDict[maticnaList[1]] = anomalyDict[maticnaList[1]] + float(value)
"\n plotAnomalies = False\n if plotAnomalies:\n self.conf.plt.plot(tmp_comparedWeights_av, color='red')\n self.conf.plt.plot(list(tmp_comparedWeights.values()), 'k.')\n # self.conf.plt.hist(list(tmp_comparedWeights.values()), 1000)\n # add anomalies\n anomalies_x = anomaliesDetected.keys()\n anomalies_y = anomaliesDetected.values()\n self.conf.plt.scatter(anomalies_x, anomalies_y)\n # for brk in breaks:\n # self.conf.plt.axvline(x=brk)\n self.conf.plt.show()\n self.conf.plt.close()\n self.conf.plt.figure(figsize=(8, 6))\n self.conf.plt.style.use('seaborn-poster')\n "
return sorted(anomalyDict.items(), key=lambda kv: kv[1], reverse=True)
def save_anomalies2_file(self):
"""
function saves anomnalies into file
:return: None
"""
final_data_dict = {}
finalDataDict['head'] = ['maticna', 'score']
finalDataDict['data'] = []
for row in self.exceptionsDict:
tmp_row = [str(row[0]), str(row[1])]
finalDataDict['data'].append(tmp_row)
fields_dict = {'maticna': 'company_name'}
final_data_dict = self.sharedAnalysis.appendAjpesOrganizationNames2Dict(finalDataDict, fieldsDict)
file_name = self.saveAnomaliesFileName.replace('CAT', '')
full_file_name = self.saveAnomaliesFilePath + fileName
self.conf.sharedCommon.sendDict2Output(finalDataDict, fullFileName)
for classificator in self.exceptionsClassifiedDict:
final_data_dict = {}
finalDataDict['head'] = ['maticna', 'score']
finalDataDict['data'] = []
for row in self.exceptionsClassifiedDict[classificator]:
tmp_row = [str(row[0]), str(row[1])]
finalDataDict['data'].append(tmp_row)
fields_dict = {'maticna': 'company_name'}
final_data_dict = self.sharedAnalysis.appendAjpesOrganizationNames2Dict(finalDataDict, fieldsDict)
file_name = self.saveAnomaliesFileName.replace('CAT', classificator)
full_file_name = self.saveAnomaliesFilePath + fileName
self.conf.sharedCommon.sendDict2Output(finalDataDict, fullFileName)
return None |
'''
You are given an integer array nums sorted in non-decreasing order.
Build and return an integer array result with the same length as
nums such that result[i] is equal to the summation of absolute
differences between nums[i] and all the other elements in the
array.
In other words, result[i] is equal to sum(|nums[i]-nums[j]|)
where 0 <= j < nums.length and j != i (0-indexed).
Example:
Input: nums = [2,3,5]
Output: [4,3,5]
Explanation: Assuming the arrays are 0-indexed, then
result[0] = |2-2| + |2-3| + |2-5| = 0 + 1 + 3 = 4,
result[1] = |3-2| + |3-3| + |3-5| = 1 + 0 + 2 = 3,
result[2] = |5-2| + |5-3| + |5-5| = 3 + 2 + 0 = 5.
Example:
Input: nums = [1,4,6,8,10]
Output: [24,15,13,15,21]
Constraints:
- 2 <= nums.length <= 10^5
- 1 <= nums[i] <= nums[i + 1] <= 10^4
'''
#Difficulty:Medium
#59 / 59 test cases passed.
#Runtime: 956 ms
#Memory Usage: 29.7 MB
#Runtime: 956 ms, faster than 25.00% of Python3 online submissions for Sum of Absolute Differences in a Sorted Array.
#Memory Usage: 29.7 MB, less than 50.00% of Python3 online submissions for Sum of Absolute Differences in a Sorted Array.
class Solution:
def getSumAbsoluteDifferences(self, nums: List[int]) -> List[int]:
left = 0
right = sum(nums)
length = len(nums)
result = []
for index, num in enumerate(nums):
right -= num
value = abs(num*index - left) + abs(num*(length-index-1) - right)
result.append(value)
left += num
return result
| """
You are given an integer array nums sorted in non-decreasing order.
Build and return an integer array result with the same length as
nums such that result[i] is equal to the summation of absolute
differences between nums[i] and all the other elements in the
array.
In other words, result[i] is equal to sum(|nums[i]-nums[j]|)
where 0 <= j < nums.length and j != i (0-indexed).
Example:
Input: nums = [2,3,5]
Output: [4,3,5]
Explanation: Assuming the arrays are 0-indexed, then
result[0] = |2-2| + |2-3| + |2-5| = 0 + 1 + 3 = 4,
result[1] = |3-2| + |3-3| + |3-5| = 1 + 0 + 2 = 3,
result[2] = |5-2| + |5-3| + |5-5| = 3 + 2 + 0 = 5.
Example:
Input: nums = [1,4,6,8,10]
Output: [24,15,13,15,21]
Constraints:
- 2 <= nums.length <= 10^5
- 1 <= nums[i] <= nums[i + 1] <= 10^4
"""
class Solution:
def get_sum_absolute_differences(self, nums: List[int]) -> List[int]:
left = 0
right = sum(nums)
length = len(nums)
result = []
for (index, num) in enumerate(nums):
right -= num
value = abs(num * index - left) + abs(num * (length - index - 1) - right)
result.append(value)
left += num
return result |
#Servo test
e1 = Entry(root)
e1.grid(row=0, column=1)
def cal():
global dc
deg = abs(float(deg1))
dc = 0.056*deg + 2.5
p.ChangeDutyCycle(dc)
print(deg, dc) | e1 = entry(root)
e1.grid(row=0, column=1)
def cal():
global dc
deg = abs(float(deg1))
dc = 0.056 * deg + 2.5
p.ChangeDutyCycle(dc)
print(deg, dc) |
# Weight converter
weight = float(input("Weight?"))
unit = input("(L)bs or (K)g?")
if unit.upper() == "K":
print(weight*2.2)
elif unit.upper() == "L":
print(weight*0.45)
else:
print("Error, please verify your input") | weight = float(input('Weight?'))
unit = input('(L)bs or (K)g?')
if unit.upper() == 'K':
print(weight * 2.2)
elif unit.upper() == 'L':
print(weight * 0.45)
else:
print('Error, please verify your input') |
def stair_ways(n):
ways = [0] * n
ways[0] = 1
if n > 1:
ways[1] = 1
if n > 2:
ways[2] = 1
for p in range(0, n):
if p + 1 < n:
ways[p+1] += ways[p]
if p + 2 < n:
ways[p+2] += ways[p]
if p + 3 < n:
ways[p+3] += ways[p]
return ways[n-1]
def test_stair_ways():
assert 1 == stair_ways(1)
assert 2 == stair_ways(2)
assert 4 == stair_ways(3)
if __name__ == "__main__":
print(stair_ways(30))
| def stair_ways(n):
ways = [0] * n
ways[0] = 1
if n > 1:
ways[1] = 1
if n > 2:
ways[2] = 1
for p in range(0, n):
if p + 1 < n:
ways[p + 1] += ways[p]
if p + 2 < n:
ways[p + 2] += ways[p]
if p + 3 < n:
ways[p + 3] += ways[p]
return ways[n - 1]
def test_stair_ways():
assert 1 == stair_ways(1)
assert 2 == stair_ways(2)
assert 4 == stair_ways(3)
if __name__ == '__main__':
print(stair_ways(30)) |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 00:08:24 2017
@author: soumi
"""
## scale canvas pixels to android screen pixel
class SketchDipCalculator:
def __init__(self, width, height):
self.mHeightPx=height
self.mWidthPx = width
## default screen size considered for andorid code generation
standardScreenWidthDpi = 410
standardScreenHeightDpi = 730
self.mWidthDpr = self.mWidthPx / standardScreenWidthDpi
self.mHeightDpr = self.mHeightPx / standardScreenHeightDpi
##convert px widht to dip
def pxToWidthDip(self, px) :
return int(px / self.mWidthDpr)
##convert px height to dip
def pxToHeightDip(self, px) :
return int(px / self.mHeightDpr)
##convert dip to px height
def dipToHeightPx(self, height) :
return int(height * self.mHeightDpr)
##convert dip to px width
def dipToWidthPx(self, dip) :
return int(dip * self.mWidthDpr)
| """
Created on Tue Nov 28 00:08:24 2017
@author: soumi
"""
class Sketchdipcalculator:
def __init__(self, width, height):
self.mHeightPx = height
self.mWidthPx = width
standard_screen_width_dpi = 410
standard_screen_height_dpi = 730
self.mWidthDpr = self.mWidthPx / standardScreenWidthDpi
self.mHeightDpr = self.mHeightPx / standardScreenHeightDpi
def px_to_width_dip(self, px):
return int(px / self.mWidthDpr)
def px_to_height_dip(self, px):
return int(px / self.mHeightDpr)
def dip_to_height_px(self, height):
return int(height * self.mHeightDpr)
def dip_to_width_px(self, dip):
return int(dip * self.mWidthDpr) |
#!/usr/bin/env python
a = 0
b = 1
while b < 100:
print(b)
a,b = b,a+b
| a = 0
b = 1
while b < 100:
print(b)
(a, b) = (b, a + b) |
"""
``pysiml`` is a python library for similarity measures
"""
__version__ = '0.1.0'
| """
``pysiml`` is a python library for similarity measures
"""
__version__ = '0.1.0' |
class state:
def __init__(self, name, population, area, capital):
self.name = name
self.population = population
self.area = area
self.capital = capital
def calc_density(self):
return self.area/self.population
state_1 = state('guj', 50000000, 40000000, 'gandhinagar')
state_2 = state('maharashtra', 80000000, 100000000, 'mumbai')
print(state_1.name)
print(state_2.calc_density())
| class State:
def __init__(self, name, population, area, capital):
self.name = name
self.population = population
self.area = area
self.capital = capital
def calc_density(self):
return self.area / self.population
state_1 = state('guj', 50000000, 40000000, 'gandhinagar')
state_2 = state('maharashtra', 80000000, 100000000, 'mumbai')
print(state_1.name)
print(state_2.calc_density()) |
puffRstring = '''
impute_zeros <- function(x, y, bw){
k <- ksmooth(x=x, y=y, bandwidth=bw)
y[y == 0] <- k$y[y == 0]
return(y)
}
mednorm <- function(x){x/median(x)}
mednorm.ksmooth <-function(x,y,bw){mednorm(ksmooth(x=x,y=y,bandwidth = bw)$y)}
mednorm.ksmooth.norm <-function(x,y,bw,norm.y){mednorm.ksmooth(x,y,bw)/mednorm.ksmooth(x,norm.y,bw)}
inner.quant.mean.norm <- function(x, inner=c(0.4,0.6)){
innerq <- quantile(x=x, probs = inner)
x/mean(x[x >= innerq[1] & x <= innerq[2]])
}
slope <- function(x1,x2,y1,y2){
(y2-y1)/(x2-x1)
}
prob.data <- function(Forward){
num.emits <- dim(Forward)[2]
return(sum(Forward[,num.emits]))
}
# HMM Functions
viterbi.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal", logprobs=FALSE){
## logprobs = whether or not transition and initial matrices are logged yet, assumes false
if(!(logprobs)){
initial = log(initial)
transitions = log(transitions)
}
num.states <- length(states)
num.emits <- length(emitted.data)
## need to add log_probs instead of multiply probs to prevent underflow
## write.table(emissions, file="del1")
## write.table(transitions, file="del2")
## write.table(initial,file="del3")
## write.table(num.states, file="del4")
## write.table(num.emits, file="del5")
if (emodel == "normal"){
V=viterbi.normal(emissions, transitions, initial, emitted.data, num.states, num.emits)
} else if (emodel == "poisson"){
V=viterbi.poisson(emissions, transitions, initial, emitted.data, num.states, num.emits)
} else if (emodel == "exponential"){
## NOTE: emission_prob_means that would otherwise be used for normal or poissoin are inversed (1/mu) for exponential and geometric
emissions[1,] <- 1/emissions[1,]
V=viterbi.exponential(emissions, transitions, initial, emitted.data, num.states, num.emits)
} else if (emodel == "geometric"){
## NOTE: emission_prob_means that would otherwise be used for normal or poissoin are inversed (1/mu) for exponential and geometric
emissions[1,] <- 1/emissions[1,]
V=viterbi.geometric(emissions, transitions, initial, emitted.data, num.states, num.emits)
} else if (emodel == "gamma"){
params <- emissions
## Estimate shape
params[1,] <- emissions[1,]^2 / emissions[2,]^2
## Estimate scale
params[2,] <- emissions[2,]^2 / emissions[1,]
## Stay calm and carry on
V=viterbi.gamma(params, transitions, initial, emitted.data, num.states, num.emits)
} else if (emodel == "discrete"){
V=viterbi.discrete(emissions, transitions, initial, emitted.data, num.states, num.emits)
}
viterbi_path <- matrix(data = rep(0, num.emits), nrow = 1)
viterbi_path[num.emits] <- which.max(V$Viterbi[,num.emits]);
viterbi_prob <- V$Viterbi[viterbi_path[num.emits], num.emits]
for (j in num.emits:2){
viterbi_path[j-1] <- V$pointer[j,viterbi_path[j]]
}
return(list(viterbi_path=viterbi_path, viterbi_prob=viterbi_prob))
}
##NORMAL
viterbi.normal <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
Viterbi[ ,1] <- initial + dnorm(emitted.data[1], mean = emissions[1, ], sd = emissions[2, ], log = TRUE)
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- dnorm(emitted.data[j], mean = emissions[1, i], sd = emissions[2, i], log = TRUE) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
##POISSON
viterbi.poisson <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
## rounds floats to integers
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
Viterbi[ ,1] <- initial + dpois(round(emitted.data[1]), lambda = emissions[1, ], log=TRUE)
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- dpois(round(emitted.data[j]), lambda = emissions[1, i], log = TRUE) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
## EXPONENTIAL
viterbi.exponential <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
Viterbi[ ,1] <- initial + dexp(emitted.data[1], rate = emissions[1, ], log=TRUE)
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- dexp(emitted.data[j], rate = emissions[1, i], log = TRUE) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
## GEOMETRIC
viterbi.geometric <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
## rounds floats to integers
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
Viterbi[ ,1] <- initial + dgeom(round(emitted.data[1]), prob = emissions[1, ], log=TRUE)
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- dgeom(round(emitted.data[j]), prob = emissions[1, i], log = TRUE) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
## GAMMA
viterbi.gamma <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
Viterbi[ ,1] <- initial + dgamma(emitted.data[1], shape = emissions[1, ], scale =emissions[2, ], log=TRUE)
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- dgamma(emitted.data[j], shape = emissions[1, i], scale = emissions[2, i], log = TRUE) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
##DISCRETE
viterbi.discrete <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){
## "emissions" = numstates x numsymbols matrix (rows sum to 1)
## rounds floats to integers
pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)
Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)
## INITIALIZE AND GO
Viterbi[ ,1] <- initial + log( emissions[ , emitted.data[1]] )
pointer[1, ] <- 1
f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}
for (j in 2:num.emits){
selection <- Viterbi[,j-1] + transitions
for (i in 1:num.states){
maxstate <- which.max(selection[,i])
Viterbi[i,j] <- log( emissions[ i, emitted.data[j]] ) + selection[maxstate,i]
pointer[j,i] <- maxstate
}
}
return(list(Viterbi=Viterbi, pointer=pointer))
}
#### VECTORIZED FORWARDS ########
forward.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){
num.states <- length(states)
num.emits <- length(emitted.data)
Forward <- matrix(data = rep(0, num.states*num.emits), nrow = num.states)
scalefactors <- matrix(data = rep(0, num.emits*2), nrow = 2)
#model
if (emodel == "normal"){emodel.fxn <- puff.normal}
else if (emodel == "exponential"){
emissions[1,] <- 1/emissions[1,]
emodel.fxn <- puff.exponential
} else if (emodel == "poisson"){emodel.fxn <- puff.poisson}
else if (emodel == "geometric"){
emissions[1,] <- 1/emissions[1,]
emodel.fxn <- puff.geometric
} else if (emodel == "gamma") {
params <- emissions
## Estimate shape
params[1,] <- emissions[1,]^2 / emissions[2,]^2
## Estimate scale
params[2,] <- emissions[2,]^2 / emissions[1,]
## Stay calm and carry on
emissions <- params
emodel.fxn <- puff.gamma
}
## initial
Forward[, 1] <- initial*emodel.fxn(emitted.data[1], emissions)
## scale to prevent underflow -- keep track of scaling
scalefactors[1,1] <- sum(Forward[, 1])
scalefactors[2,1] <- log(scalefactors[1,1])
Forward[,1] <- Forward[,1]/scalefactors[1,1]
## iterate
for(k in 2:num.emits){
emit <- emodel.fxn(emitted.data[k], emissions)
Forward[, k] <- emit* Forward[,k-1] %*% transitions ## same as emit* Forward[,k-1] * colSums(transitions)
scalefactors[1,k] <- sum(Forward[, k])
scalefactors[2,k] <- log(scalefactors[1,k]) + scalefactors[2,k-1]
Forward[,k] <- Forward[,k]/scalefactors[1,k]
}
return(list(forward=Forward, scales=scalefactors))
## mutiply forward column by row2,samecol in scale factors OR by exp(row3,samecol in scalfactors) to get actual value for forward
## update: OR actually I think multiply fwd column by product of [row1,1:samecol] in scale factors
## I must have at one point taken out row2 of scale factors (when row3 was logs of row2)
}
#### VECTORIZED BACKWARDS ########
backward.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){
num.states <- length(states)
num.emits <- length(emitted.data)
Backward = matrix(data = rep(0, num.states*num.emits), nrow = num.states)
scalefactors <- matrix(data = rep(0, num.emits*2), nrow = 2)
#model
if (emodel == "normal"){emodel.fxn <- puff.normal}
else if (emodel == "exponential"){
emissions[1,] <- 1/emissions[1,]
emodel.fxn <- puff.exponential
}
else if (emodel == "poisson"){emodel.fxn <- puff.poisson}
else if (emodel == "geometric"){
emissions[1,] <- 1/emissions[1,]
emodel.fxn <- puff.geometric
}
else if (emodel == "gamma"){
params <- emissions
## Estimate shape
params[1,] <- emissions[1,]^2 / emissions[2,]^2
## Estimate scale
params[2,] <- emissions[2,]^2 / emissions[1,]
## Stay calm and carry on
emissions <- params
emodel.fxn <- puff.gamma
}
## initial
Backward[ , num.emits] <- 1
## scale to prevent underflow -- keep track of scaling
scalefactors[1,num.emits] <- sum(Backward[, num.emits])
scalefactors[2,num.emits] <- log(scalefactors[1,num.emits])
Backward[,num.emits] <- Backward[,num.emits]/scalefactors[1,num.emits]
## iterate
for(k in (num.emits-1):1){
# emit <- matrix(dnorm(emitted.data[k+1], mean = emissions[1, ], sd = emissions[2, ]))
emit <- matrix(emodel.fxn(emitted.data[k+1], emissions))
# print(Backward[, k+1] * emit)
Backward [, k] <- transitions %*% (Backward[, k+1] * emit)
scalefactors[1,k] <- sum(Backward[, k])
scalefactors[2,k] <- log(scalefactors[1,k]) + scalefactors[2,k+1]
Backward[,k] <- Backward[,k]/scalefactors[1,k]
}
return(list(backward=Backward, scales=scalefactors))
}
puff.normal <- function(x, emissions){
dnorm(x, mean = emissions[1, ], sd = emissions[2, ], log=FALSE)
}
puff.exponential <- function(x, emissions){
dexp(x = x, rate = emissions[1, ], log = FALSE)
}
puff.poisson <- function(x, emissions){
dpois(x = round(x), lambda = emissions[1, ], log = FALSE)
}
puff.geometric <- function(x, emissions){
dgeom(x = round(x), prob = emissions[1, ], log = FALSE)
}
puff.gamma <- function(x, emissions){
dgamma(x = x, shape = emissions[1, ], scale = emissions[2, ], log=FALSE)
}
###
posterior <- function(Forward, Backward, states){
## F and B matrices are from small sequence fxns -- not scaled
num.states <- length(states)
num.emits <- dim(Forward)[2]
posterior.path <- matrix(data = rep(0, num.emits), nrow = 1)
probs <- matrix(data = rep(0, num.emits), nrow = 1)
pd <- prob.data(Forward = Forward)
for (i in 1:num.emits){
fb <- Forward[,i]*Backward[,i]
max.state <- which.max(fb)
posterior.path[i] <- max.state
# probs[i] <- max(fb)/pd ## should be divided by prob.data...?
}
return(list(posterior.path=posterior.path, probs=probs))
}
compare.statepath <- function(sp1, sp2){
# where sp is a numeric vector or char vector with each state its own element -- not seq format
total <- length(sp1)
ident <- sum(sp1 == sp2)
edit.dist <- total - ident
return(list(edit.dist=edit.dist, identical.count=ident, pct.id=100*ident/total))
}
baum.welch.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){
#model
if (emodel == "normal"){emodel.fxn <- puff.normal}
else if (emodel == "exponential"){emodel.fxn <- puff.exponential}
else if (emodel == "poisson"){emodel.fxn <- puff.poisson}
else if (emodel == "geometric"){emodel.fxn <- puff.geometric}
# emissions, transitions, and initial probs given are "best guesses" (or randomly chosen)
# calculate log-likelihood of model
n.states <- length(states)
n.emits <- length(emitted.data)
c <- 0.00001
new.L <- 0
old.L <- 100 #arbitrary number producing difference > c
while (abs(new.L - old.L > c)){
old.L <- new.L
# emissions, transitions, and initial probs given are "best guesses" (or randomly chosen)
# calculate log-likelihood of model
# Get fwd, backward, and prob(data)
fwd <- forward.puff(emissions, transitions, initial, states, emitted.data, emodel)
bck <- backward.puff(emissions, transitions, initial, states, emitted.data, emodel)
p <- prob.data(Forward = fwd$forward)
new.L <- log10(p)
#update initial, transition, emissions
# calc new log likelihood of model
# calc difference between new and old log likelihood
# if diff > cutoff, return to fwd/bck/p step; else done
TRANS <- update.transitions(n.states, n.emits, fwd, bck, transitions, emissions)
EMIS <- update.emissions()
}
}
# update.transitions <- function(n.states, n.emits, fwd, bck, transitions, emissions){
# TRANS <- matrix(rep(0, n.states*n.states), nrow=n.states)
# for (i in 1:n.states){
# for (k in 1:n.states){
# for (m in 1:(n.emits-1)){
# TRANS[i,k] <- TRANS[i,k] + fwd[i,m] * transitions[i,k] * ##TODO#emissions[k,emitted.data[1,m+1]## * bck[k,m+1]
# }
# }
# }
# return(TRANS)
# }
# update.emissions <- function(n.states, n.emits){
# EMIS <- matrix(rep(0, n.states*n.emits), nrow=n.states)
# for (i in 1:n.states){
# for (k in 1:)
# }
# }
## if took N=10 backsamples -- ie. N state paths
## could get mean and std dev (emission dist) for each state by simple count stats
## could also get transition probs by simple count stats
backsample.puff <- function(Forward, transitions, n.states=NA, states=NA, n.emits=NA){
## TODO vectorize -- e.g. eliminate for loop
if(is.na(n.states) || is.na(states) || is.na(n.emits)){
dim.fwd <- dim(Forward)
n.states <- dim.fwd[1]
states <- 1:n.states
n.emits <- dim.fwd[2]
}
#initialization
b.sample <- rep(0, n.emits)
# Randomly sample a state for Sn according to: P(Sn=sn|X1:n) = P(Sn=sn,X1:n)/P(X1:n)
## p(data) not nec since it scales all by same number and all proportionally the same
b.sample[n.emits] <- sample(x = states, size = 1, prob = Forward[,n.emits]) #/p)
## Iterate for k in n.emits-1 to 1
## Randomly sample a state for Sk according to: P(Sk=sk|Sk+1=sk+1, X1:n)
## = P(Sk=sk, X1:k)*P(Sk+1=sk+1|Sk=sk)/ [sum(Sk) P(Sk, X1:k)*P(Sk+1=sk+1|Sk)]
## = fwd_k(Sk=sk) * trans(Sk+1=sk+1 | Sk=sk) / sum(all states using numerator terms)
for (k in (n.emits-1):1){
b.sample[k] <- sample(x = states, size = 1, prob = Forward[,k]*transitions[,b.sample[k+1]]) # no need to re-scale fwd values since they would still be proportionally same
}
return(b.sample)
}
n.backsamples <- function(Forward, transitions, N=1000){
#Forward is Forward matrix (not list object with F and scales)
# transitions is trans prob matrix
## TODO vectorize -- e.g. can pick N states for each at once
dim.fwd <- dim(Forward)
n.states <- dim.fwd[1]
states <- 1:n.states
n.emits <- dim.fwd[2]
b.samples <- matrix(rep(0, N*n.emits), nrow = N)
for(i in 1:N){
b.samples[i,] <- backsample.puff(Forward, transitions, n.states=n.states, states=states, n.emits=n.emits)
}
return(b.samples)
}
backsample.state.freq <- function(b.samples, n.states, states, N=NA, n.emits=NA){
#b.samples is a n.bsamples x n.emits matrix output from n.backsamples
if(is.na(N) || is.na(n.emits)){
d <- dim(b.samples)
N <- d[1]
n.emits <- d[2]
}
freq <- matrix(rep(0, n.states*n.emits), nrow = n.states)
for(i in 1:n.emits){
freq[,i] <- table(c(states,b.samples[,i]))-1 #adding all states in to ensure all levels are represented followed by subtracting 1 from all counts
}
return(freq)
}
backsample.max.freq.path <- function(freq){
apply(X = freq, MARGIN = 2, FUN = which.max)
}
##params for 3state
##initial <- matrix(c(1,1,1)/3, nrow=1)
emissions <- matrix(rep(0, 6), nrow=2)
emissions[1,] <- c(-0.5,0,0.5)
emissions[2,] <- c(0.5,0.5,0.5)
##transitions <- matrix(rep(0,9),nrow=3)
##transitions[1,] <- c(0.99, 0.005, 0.005)
##transitions[2,] <- c(0.005,0.99,0.005)
##transitions[3,] <- c(0.005,0.005,0.99)
#
initial <- matrix(c(0.006,0.988,0.006), nrow=1)
transitions <- matrix(rep(0,9),nrow=3)
transitions[1,] <- c(0.99998, 0.00001, 0.00001)
transitions[2,] <- c(0.000000125,0.99999975,0.000000125)
transitions[3,] <- c(0.00001,0.00001,0.99998)
## params for 7state
##initial7 <- matrix(rep(1,7)/7, nrow=1)
initial7 <- matrix(c(0.997,rep(0.0005,6)), nrow=1)
s <- c(1,sqrt(2),2,sqrt(8),4,sqrt(32),8)
m <- c(1,2,4,8,16,32,64)
##For exponential and geometric
###m <- 1/m
emissions7 <- matrix(rep(0, 14), nrow=2)
emissions7[1,] <- m
emissions7[2,] <- s
transitions7 <- matrix(rep(0,49),nrow=7)
for(i in 1:7){
transitions7[i,1:7] <- 0.001 #0.000001
transitions7[i,i] <- 0.999 #0.999999
## transitions7[i,1:7] <- 0.000001
## transitions7[i,i] <- 0.999999
# if(i>1){transitions7[i,i-1] <- 0.000005}
# if(i<7){transitions7[i,i+1] <- 0.000005}
transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])
}
##transitions7 <- matrix(rep(0,49),nrow=7)
##for(i in 1:7){
## transitions7[i,1:7] <- 0.0001
## transitions7[i,i] <- 0.9999
## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])
##}
##transitions7 <- matrix(rep(0,49),nrow=7)
##for(i in 1:7){
## if(i>1){
## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.0001/j}
## }
## if (i<7){
## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.0001/j}
## }
## transitions7[i,i] <- 0.9999
## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])
##}
##transitions7 <- matrix(rep(0,49),nrow=7)
##for(i in 1:7){
## if(i>1){
## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.0001/j^3}
## }
## if (i<7){
## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.0001/j^3}
## }
## transitions7[i,i] <- 0.9999
## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])
##}
##transitions7 <- matrix(rep(0,49),nrow=7)
##for(i in 1:7){
## if(i>1){
## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.001/j^3}
## }
## if (i<7){
## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.001/j^3}
## }
## transitions7[i,i] <- 0.999
## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])
##}
generate.normal <- function(n, mu, sig){
rnorm(n, mean = mu, sd = sig)
}
generate.exponential <- function(n, mu, sig){
## assumes mu already 1/mu_given
## sig is just dummy var
rexp(n, rate=mu)
}
generate.poisson <- function(n, mu, sig){
## mu is rounded
rpois(n, lambda = round(mu))
}
generate.geometric <- function(n, mu, sig){
## assumes mu is 1/mu_given
## sig is just dummy var
rgeom(n, prob = mu)
}
##generate_statepath <- function(transitions, initial, states, len=10){
## statenums <- 1:length(states)
## statepath <- vector(mode="integer", length=len)
## # INITIAL
## statepath[1] <- sample(statenums, size = 1, prob = initial)
## ## TRANSITIONS
## for (i in 2:len){
## statepath[i] <- sample(statenums, size=1, prob = transitions[statepath[i-1], ])
## }
## return(statepath)
##}
##
##generate_emitted_data <- function(emissions, statepath, emodel = 'normal'){
## #model
## if (emodel == "normal"){emodel.fxn <- generate.normal}
## else if (emodel == "exponential"){emodel.fxn <- generate.exponential}
## else if (emodel == "poisson"){emodel.fxn <- generate.poisson}
## else if (emodel == "geometric"){emodel.fxn <- generate.geometric}
##
## statepathlen = length(statepath)
## emitted_data <- vector(mode='numeric', length=statepathlen)
## for (i in 1:statepathlen){
## emitted_data[i] <- emodel.fxn(n=1, mu=emissions[1, statepath[i]], sig=emissions[2, statepath[i]])
## }
## return(emitted_data)
##}
generate <- function(emissions, transitions, initial, states, statepathlen=10, emodel="normal"){
#model
if (emodel == "normal"){emodel.fxn <- generate.normal}
else if (emodel == "exponential"){emodel.fxn <- generate.exponential}
else if (emodel == "poisson"){emodel.fxn <- generate.poisson}
else if (emodel == "geometric"){emodel.fxn <- generate.geometric}
## Ensure states are indexes
statenums <- 1:length(states)
statepath <- vector(mode="integer", length=statepathlen)
emitted_data <- vector(mode='numeric', length=statepathlen)
# INITIAL
statepath[1] <- sample(statenums, size = 1, prob = initial)
emitted_data[1] <- emodel.fxn(n=1, mu=emissions[1, statepath[1]], sig=emissions[2, statepath[1]])
## TRANSITIONS
for (i in 2:statepathlen){
statepath[i] <- sample(statenums, size=1, prob = transitions[statepath[i-1], ])
emitted_data[i] <- emodel.fxn(n=1, mu=emissions[1, statepath[i]], sig=emissions[2, statepath[i]])
}
return(list(statepath, emitted_data))
}
'''
| puff_rstring = '\nimpute_zeros <- function(x, y, bw){\n k <- ksmooth(x=x, y=y, bandwidth=bw)\n y[y == 0] <- k$y[y == 0]\n return(y)\n}\nmednorm <- function(x){x/median(x)}\nmednorm.ksmooth <-function(x,y,bw){mednorm(ksmooth(x=x,y=y,bandwidth = bw)$y)}\nmednorm.ksmooth.norm <-function(x,y,bw,norm.y){mednorm.ksmooth(x,y,bw)/mednorm.ksmooth(x,norm.y,bw)}\ninner.quant.mean.norm <- function(x, inner=c(0.4,0.6)){\n innerq <- quantile(x=x, probs = inner)\n x/mean(x[x >= innerq[1] & x <= innerq[2]])\n}\n\nslope <- function(x1,x2,y1,y2){\n (y2-y1)/(x2-x1)\n}\n\n\n\nprob.data <- function(Forward){\n num.emits <- dim(Forward)[2]\n return(sum(Forward[,num.emits]))\n}\n\n\n\n\n# HMM Functions\nviterbi.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal", logprobs=FALSE){\n ## logprobs = whether or not transition and initial matrices are logged yet, assumes false\n if(!(logprobs)){\n initial = log(initial)\n transitions = log(transitions)\n }\n num.states <- length(states)\n num.emits <- length(emitted.data) \n ## need to add log_probs instead of multiply probs to prevent underflow\n## write.table(emissions, file="del1")\n## write.table(transitions, file="del2")\n## write.table(initial,file="del3")\n## write.table(num.states, file="del4")\n## write.table(num.emits, file="del5")\n if (emodel == "normal"){\n V=viterbi.normal(emissions, transitions, initial, emitted.data, num.states, num.emits)\n } else if (emodel == "poisson"){\n V=viterbi.poisson(emissions, transitions, initial, emitted.data, num.states, num.emits)\n } else if (emodel == "exponential"){\n ## NOTE: emission_prob_means that would otherwise be used for normal or poissoin are inversed (1/mu) for exponential and geometric\n emissions[1,] <- 1/emissions[1,]\n V=viterbi.exponential(emissions, transitions, initial, emitted.data, num.states, num.emits)\n } else if (emodel == "geometric"){\n ## NOTE: emission_prob_means that would otherwise be used for normal or poissoin are inversed (1/mu) for exponential and geometric\n emissions[1,] <- 1/emissions[1,]\n V=viterbi.geometric(emissions, transitions, initial, emitted.data, num.states, num.emits)\n } else if (emodel == "gamma"){\n params <- emissions\n ## Estimate shape\n params[1,] <- emissions[1,]^2 / emissions[2,]^2\n ## Estimate scale\n params[2,] <- emissions[2,]^2 / emissions[1,]\n ## Stay calm and carry on\n V=viterbi.gamma(params, transitions, initial, emitted.data, num.states, num.emits)\n } else if (emodel == "discrete"){\n V=viterbi.discrete(emissions, transitions, initial, emitted.data, num.states, num.emits)\n }\n \n viterbi_path <- matrix(data = rep(0, num.emits), nrow = 1)\n viterbi_path[num.emits] <- which.max(V$Viterbi[,num.emits]); \n viterbi_prob <- V$Viterbi[viterbi_path[num.emits], num.emits]\n \n for (j in num.emits:2){\n viterbi_path[j-1] <- V$pointer[j,viterbi_path[j]]\n }\n\n return(list(viterbi_path=viterbi_path, viterbi_prob=viterbi_prob))\n}\n\n##NORMAL\nviterbi.normal <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states) \n Viterbi[ ,1] <- initial + dnorm(emitted.data[1], mean = emissions[1, ], sd = emissions[2, ], log = TRUE)\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- dnorm(emitted.data[j], mean = emissions[1, i], sd = emissions[2, i], log = TRUE) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n##POISSON\nviterbi.poisson <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n ## rounds floats to integers\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states) \n Viterbi[ ,1] <- initial + dpois(round(emitted.data[1]), lambda = emissions[1, ], log=TRUE)\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- dpois(round(emitted.data[j]), lambda = emissions[1, i], log = TRUE) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n## EXPONENTIAL\nviterbi.exponential <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states) \n Viterbi[ ,1] <- initial + dexp(emitted.data[1], rate = emissions[1, ], log=TRUE)\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- dexp(emitted.data[j], rate = emissions[1, i], log = TRUE) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n\n## GEOMETRIC\nviterbi.geometric <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n ## rounds floats to integers\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states) \n Viterbi[ ,1] <- initial + dgeom(round(emitted.data[1]), prob = emissions[1, ], log=TRUE)\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- dgeom(round(emitted.data[j]), prob = emissions[1, i], log = TRUE) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n## GAMMA\nviterbi.gamma <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states) \n Viterbi[ ,1] <- initial + dgamma(emitted.data[1], shape = emissions[1, ], scale =emissions[2, ], log=TRUE)\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- dgamma(emitted.data[j], shape = emissions[1, i], scale = emissions[2, i], log = TRUE) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n\n##DISCRETE\nviterbi.discrete <- function(emissions, transitions, initial, emitted.data, num.states, num.emits){\n ## "emissions" = numstates x numsymbols matrix (rows sum to 1)\n ## rounds floats to integers\n pointer <- matrix(rep(0, num.emits*num.states), nrow = num.emits)\n Viterbi <- matrix(rep(0, num.states*num.emits), nrow = num.states)\n\n ## INITIALIZE AND GO\n Viterbi[ ,1] <- initial + log( emissions[ , emitted.data[1]] )\n pointer[1, ] <- 1\n f <- function(x){i <- which.max(x); y <- x[i]; return(c(i,y))}\n for (j in 2:num.emits){\n selection <- Viterbi[,j-1] + transitions\n for (i in 1:num.states){\n maxstate <- which.max(selection[,i])\n Viterbi[i,j] <- log( emissions[ i, emitted.data[j]] ) + selection[maxstate,i]\n pointer[j,i] <- maxstate \n }\n } \n return(list(Viterbi=Viterbi, pointer=pointer))\n}\n\n\n#### VECTORIZED FORWARDS ########\nforward.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){\n num.states <- length(states)\n num.emits <- length(emitted.data)\n Forward <- matrix(data = rep(0, num.states*num.emits), nrow = num.states)\n scalefactors <- matrix(data = rep(0, num.emits*2), nrow = 2)\n \n #model\n if (emodel == "normal"){emodel.fxn <- puff.normal}\n else if (emodel == "exponential"){\n emissions[1,] <- 1/emissions[1,]\n emodel.fxn <- puff.exponential\n } else if (emodel == "poisson"){emodel.fxn <- puff.poisson}\n else if (emodel == "geometric"){\n emissions[1,] <- 1/emissions[1,]\n emodel.fxn <- puff.geometric\n } else if (emodel == "gamma") {\n params <- emissions\n ## Estimate shape\n params[1,] <- emissions[1,]^2 / emissions[2,]^2\n ## Estimate scale\n params[2,] <- emissions[2,]^2 / emissions[1,]\n ## Stay calm and carry on\n emissions <- params\n emodel.fxn <- puff.gamma\n }\n \n ## initial\n Forward[, 1] <- initial*emodel.fxn(emitted.data[1], emissions)\n ## scale to prevent underflow -- keep track of scaling\n scalefactors[1,1] <- sum(Forward[, 1])\n scalefactors[2,1] <- log(scalefactors[1,1])\n Forward[,1] <- Forward[,1]/scalefactors[1,1]\n \n ## iterate\n for(k in 2:num.emits){\n emit <- emodel.fxn(emitted.data[k], emissions)\n Forward[, k] <- emit* Forward[,k-1] %*% transitions ## same as emit* Forward[,k-1] * colSums(transitions)\n scalefactors[1,k] <- sum(Forward[, k])\n scalefactors[2,k] <- log(scalefactors[1,k]) + scalefactors[2,k-1]\n Forward[,k] <- Forward[,k]/scalefactors[1,k]\n }\n \n return(list(forward=Forward, scales=scalefactors))\n ## mutiply forward column by row2,samecol in scale factors OR by exp(row3,samecol in scalfactors) to get actual value for forward\n ## update: OR actually I think multiply fwd column by product of [row1,1:samecol] in scale factors\n ## I must have at one point taken out row2 of scale factors (when row3 was logs of row2)\n}\n\n\n\n\n#### VECTORIZED BACKWARDS ########\nbackward.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){\n num.states <- length(states)\n num.emits <- length(emitted.data)\n Backward = matrix(data = rep(0, num.states*num.emits), nrow = num.states)\n scalefactors <- matrix(data = rep(0, num.emits*2), nrow = 2)\n \n #model\n if (emodel == "normal"){emodel.fxn <- puff.normal}\n else if (emodel == "exponential"){\n emissions[1,] <- 1/emissions[1,]\n emodel.fxn <- puff.exponential\n }\n else if (emodel == "poisson"){emodel.fxn <- puff.poisson}\n else if (emodel == "geometric"){\n emissions[1,] <- 1/emissions[1,]\n emodel.fxn <- puff.geometric\n }\n else if (emodel == "gamma"){\n params <- emissions\n ## Estimate shape\n params[1,] <- emissions[1,]^2 / emissions[2,]^2\n ## Estimate scale\n params[2,] <- emissions[2,]^2 / emissions[1,]\n ## Stay calm and carry on\n emissions <- params\n emodel.fxn <- puff.gamma\n }\n \n ## initial\n Backward[ , num.emits] <- 1\n \n ## scale to prevent underflow -- keep track of scaling\n scalefactors[1,num.emits] <- sum(Backward[, num.emits])\n scalefactors[2,num.emits] <- log(scalefactors[1,num.emits])\n Backward[,num.emits] <- Backward[,num.emits]/scalefactors[1,num.emits]\n \n ## iterate\n for(k in (num.emits-1):1){\n # emit <- matrix(dnorm(emitted.data[k+1], mean = emissions[1, ], sd = emissions[2, ]))\n emit <- matrix(emodel.fxn(emitted.data[k+1], emissions))\n # print(Backward[, k+1] * emit)\n Backward [, k] <- transitions %*% (Backward[, k+1] * emit)\n scalefactors[1,k] <- sum(Backward[, k])\n scalefactors[2,k] <- log(scalefactors[1,k]) + scalefactors[2,k+1]\n Backward[,k] <- Backward[,k]/scalefactors[1,k]\n }\n return(list(backward=Backward, scales=scalefactors))\n}\n\n\npuff.normal <- function(x, emissions){\n dnorm(x, mean = emissions[1, ], sd = emissions[2, ], log=FALSE)\n}\n\npuff.exponential <- function(x, emissions){\n dexp(x = x, rate = emissions[1, ], log = FALSE)\n}\n\npuff.poisson <- function(x, emissions){\n dpois(x = round(x), lambda = emissions[1, ], log = FALSE)\n}\n\npuff.geometric <- function(x, emissions){\n dgeom(x = round(x), prob = emissions[1, ], log = FALSE)\n}\n\n\npuff.gamma <- function(x, emissions){\n dgamma(x = x, shape = emissions[1, ], scale = emissions[2, ], log=FALSE)\n}\n\n###\nposterior <- function(Forward, Backward, states){\n ## F and B matrices are from small sequence fxns -- not scaled\n num.states <- length(states)\n num.emits <- dim(Forward)[2]\n posterior.path <- matrix(data = rep(0, num.emits), nrow = 1)\n probs <- matrix(data = rep(0, num.emits), nrow = 1)\n pd <- prob.data(Forward = Forward)\n for (i in 1:num.emits){\n fb <- Forward[,i]*Backward[,i]\n max.state <- which.max(fb)\n posterior.path[i] <- max.state\n # probs[i] <- max(fb)/pd ## should be divided by prob.data...?\n }\n return(list(posterior.path=posterior.path, probs=probs)) \n}\n\ncompare.statepath <- function(sp1, sp2){\n # where sp is a numeric vector or char vector with each state its own element -- not seq format\n total <- length(sp1)\n ident <- sum(sp1 == sp2)\n edit.dist <- total - ident\n return(list(edit.dist=edit.dist, identical.count=ident, pct.id=100*ident/total))\n}\n\n\nbaum.welch.puff <- function(emissions, transitions, initial, states, emitted.data, emodel="normal"){\n #model\n if (emodel == "normal"){emodel.fxn <- puff.normal}\n else if (emodel == "exponential"){emodel.fxn <- puff.exponential}\n else if (emodel == "poisson"){emodel.fxn <- puff.poisson}\n else if (emodel == "geometric"){emodel.fxn <- puff.geometric}\n # emissions, transitions, and initial probs given are "best guesses" (or randomly chosen)\n # calculate log-likelihood of model\n n.states <- length(states)\n n.emits <- length(emitted.data)\n c <- 0.00001\n new.L <- 0\n old.L <- 100 #arbitrary number producing difference > c\n while (abs(new.L - old.L > c)){\n old.L <- new.L\n # emissions, transitions, and initial probs given are "best guesses" (or randomly chosen)\n # calculate log-likelihood of model\n # Get fwd, backward, and prob(data)\n fwd <- forward.puff(emissions, transitions, initial, states, emitted.data, emodel)\n bck <- backward.puff(emissions, transitions, initial, states, emitted.data, emodel)\n p <- prob.data(Forward = fwd$forward)\n new.L <- log10(p)\n #update initial, transition, emissions\n # calc new log likelihood of model\n # calc difference between new and old log likelihood\n # if diff > cutoff, return to fwd/bck/p step; else done \n TRANS <- update.transitions(n.states, n.emits, fwd, bck, transitions, emissions)\n EMIS <- update.emissions()\n }\n}\n\n\n# update.transitions <- function(n.states, n.emits, fwd, bck, transitions, emissions){\n# TRANS <- matrix(rep(0, n.states*n.states), nrow=n.states)\n# for (i in 1:n.states){\n# for (k in 1:n.states){\n# for (m in 1:(n.emits-1)){\n# TRANS[i,k] <- TRANS[i,k] + fwd[i,m] * transitions[i,k] * ##TODO#emissions[k,emitted.data[1,m+1]## * bck[k,m+1]\n# }\n# }\n# }\n# return(TRANS)\n# }\n\n# update.emissions <- function(n.states, n.emits){\n# EMIS <- matrix(rep(0, n.states*n.emits), nrow=n.states)\n# for (i in 1:n.states){\n# for (k in 1:)\n# }\n# }\n## if took N=10 backsamples -- ie. N state paths\n## could get mean and std dev (emission dist) for each state by simple count stats\n## could also get transition probs by simple count stats\n\nbacksample.puff <- function(Forward, transitions, n.states=NA, states=NA, n.emits=NA){\n ## TODO vectorize -- e.g. eliminate for loop\n if(is.na(n.states) || is.na(states) || is.na(n.emits)){\n dim.fwd <- dim(Forward)\n n.states <- dim.fwd[1]\n states <- 1:n.states\n n.emits <- dim.fwd[2] \n }\n #initialization\n b.sample <- rep(0, n.emits)\n # Randomly sample a state for Sn according to: P(Sn=sn|X1:n) = P(Sn=sn,X1:n)/P(X1:n)\n ## p(data) not nec since it scales all by same number and all proportionally the same\n b.sample[n.emits] <- sample(x = states, size = 1, prob = Forward[,n.emits]) #/p)\n \n ## Iterate for k in n.emits-1 to 1\n ## Randomly sample a state for Sk according to: P(Sk=sk|Sk+1=sk+1, X1:n) \n ## = P(Sk=sk, X1:k)*P(Sk+1=sk+1|Sk=sk)/ [sum(Sk) P(Sk, X1:k)*P(Sk+1=sk+1|Sk)]\n ## = fwd_k(Sk=sk) * trans(Sk+1=sk+1 | Sk=sk) / sum(all states using numerator terms)\n for (k in (n.emits-1):1){\n b.sample[k] <- sample(x = states, size = 1, prob = Forward[,k]*transitions[,b.sample[k+1]]) # no need to re-scale fwd values since they would still be proportionally same\n }\n \n return(b.sample)\n}\n\nn.backsamples <- function(Forward, transitions, N=1000){\n #Forward is Forward matrix (not list object with F and scales)\n # transitions is trans prob matrix\n ## TODO vectorize -- e.g. can pick N states for each at once\n dim.fwd <- dim(Forward)\n n.states <- dim.fwd[1]\n states <- 1:n.states\n n.emits <- dim.fwd[2]\n b.samples <- matrix(rep(0, N*n.emits), nrow = N)\n for(i in 1:N){\n b.samples[i,] <- backsample.puff(Forward, transitions, n.states=n.states, states=states, n.emits=n.emits)\n }\n return(b.samples)\n}\n\nbacksample.state.freq <- function(b.samples, n.states, states, N=NA, n.emits=NA){\n #b.samples is a n.bsamples x n.emits matrix output from n.backsamples\n if(is.na(N) || is.na(n.emits)){\n d <- dim(b.samples)\n N <- d[1]\n n.emits <- d[2]\n }\n freq <- matrix(rep(0, n.states*n.emits), nrow = n.states)\n for(i in 1:n.emits){\n freq[,i] <- table(c(states,b.samples[,i]))-1 #adding all states in to ensure all levels are represented followed by subtracting 1 from all counts\n }\n return(freq)\n}\n\n\nbacksample.max.freq.path <- function(freq){\n apply(X = freq, MARGIN = 2, FUN = which.max)\n}\n\n\n##params for 3state\n##initial <- matrix(c(1,1,1)/3, nrow=1)\nemissions <- matrix(rep(0, 6), nrow=2)\nemissions[1,] <- c(-0.5,0,0.5)\nemissions[2,] <- c(0.5,0.5,0.5)\n\n\n##transitions <- matrix(rep(0,9),nrow=3)\n##transitions[1,] <- c(0.99, 0.005, 0.005)\n##transitions[2,] <- c(0.005,0.99,0.005)\n##transitions[3,] <- c(0.005,0.005,0.99)\n#\ninitial <- matrix(c(0.006,0.988,0.006), nrow=1)\ntransitions <- matrix(rep(0,9),nrow=3)\ntransitions[1,] <- c(0.99998, 0.00001, 0.00001)\ntransitions[2,] <- c(0.000000125,0.99999975,0.000000125)\ntransitions[3,] <- c(0.00001,0.00001,0.99998)\n\n\n## params for 7state\n##initial7 <- matrix(rep(1,7)/7, nrow=1)\ninitial7 <- matrix(c(0.997,rep(0.0005,6)), nrow=1)\n\ns <- c(1,sqrt(2),2,sqrt(8),4,sqrt(32),8) \nm <- c(1,2,4,8,16,32,64) \n\n##For exponential and geometric\n###m <- 1/m\n\n\nemissions7 <- matrix(rep(0, 14), nrow=2)\nemissions7[1,] <- m\nemissions7[2,] <- s\n\n\n\ntransitions7 <- matrix(rep(0,49),nrow=7)\nfor(i in 1:7){\n transitions7[i,1:7] <- 0.001 #0.000001\n transitions7[i,i] <- 0.999 #0.999999\n## transitions7[i,1:7] <- 0.000001\n## transitions7[i,i] <- 0.999999\n # if(i>1){transitions7[i,i-1] <- 0.000005}\n # if(i<7){transitions7[i,i+1] <- 0.000005}\n transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])\n}\n\n##transitions7 <- matrix(rep(0,49),nrow=7)\n##for(i in 1:7){\n## transitions7[i,1:7] <- 0.0001 \n## transitions7[i,i] <- 0.9999 \n## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])\n##}\n\n##transitions7 <- matrix(rep(0,49),nrow=7)\n##for(i in 1:7){\n## if(i>1){\n## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.0001/j}\n## }\n## if (i<7){\n## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.0001/j}\n## }\n## transitions7[i,i] <- 0.9999 \n## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])\n##}\n\n##transitions7 <- matrix(rep(0,49),nrow=7)\n##for(i in 1:7){\n## if(i>1){\n## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.0001/j^3}\n## }\n## if (i<7){\n## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.0001/j^3}\n## }\n## transitions7[i,i] <- 0.9999 \n## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])\n##}\n\n##transitions7 <- matrix(rep(0,49),nrow=7)\n##for(i in 1:7){\n## if(i>1){\n## for (j in 1:(i-1)){transitions7[i,(i-j)] <- 0.001/j^3}\n## }\n## if (i<7){\n## for (j in 1:(7-i)){transitions7[i,(i+j)] <- 0.001/j^3}\n## }\n## transitions7[i,i] <- 0.999\n## transitions7[i,] <- transitions7[i,]/sum(transitions7[i,])\n##}\n\n\ngenerate.normal <- function(n, mu, sig){\n rnorm(n, mean = mu, sd = sig)\n}\n\ngenerate.exponential <- function(n, mu, sig){\n ## assumes mu already 1/mu_given\n ## sig is just dummy var\n rexp(n, rate=mu)\n}\n\ngenerate.poisson <- function(n, mu, sig){\n ## mu is rounded\n rpois(n, lambda = round(mu))\n}\n\ngenerate.geometric <- function(n, mu, sig){\n ## assumes mu is 1/mu_given\n ## sig is just dummy var\n rgeom(n, prob = mu)\n}\n\n##generate_statepath <- function(transitions, initial, states, len=10){\n## statenums <- 1:length(states)\n## statepath <- vector(mode="integer", length=len)\n## # INITIAL\n## statepath[1] <- sample(statenums, size = 1, prob = initial)\n## ## TRANSITIONS\n## for (i in 2:len){\n## statepath[i] <- sample(statenums, size=1, prob = transitions[statepath[i-1], ])\n## }\n## return(statepath)\n##}\n##\n##generate_emitted_data <- function(emissions, statepath, emodel = \'normal\'){\n## #model\n## if (emodel == "normal"){emodel.fxn <- generate.normal}\n## else if (emodel == "exponential"){emodel.fxn <- generate.exponential}\n## else if (emodel == "poisson"){emodel.fxn <- generate.poisson}\n## else if (emodel == "geometric"){emodel.fxn <- generate.geometric}\n##\n## statepathlen = length(statepath)\n## emitted_data <- vector(mode=\'numeric\', length=statepathlen)\n## for (i in 1:statepathlen){\n## emitted_data[i] <- emodel.fxn(n=1, mu=emissions[1, statepath[i]], sig=emissions[2, statepath[i]])\n## }\n## return(emitted_data)\n##}\n\n\ngenerate <- function(emissions, transitions, initial, states, statepathlen=10, emodel="normal"){\n #model\n if (emodel == "normal"){emodel.fxn <- generate.normal}\n else if (emodel == "exponential"){emodel.fxn <- generate.exponential}\n else if (emodel == "poisson"){emodel.fxn <- generate.poisson}\n else if (emodel == "geometric"){emodel.fxn <- generate.geometric}\n\n ## Ensure states are indexes\n statenums <- 1:length(states)\n statepath <- vector(mode="integer", length=statepathlen)\n emitted_data <- vector(mode=\'numeric\', length=statepathlen)\n \n # INITIAL\n statepath[1] <- sample(statenums, size = 1, prob = initial)\n emitted_data[1] <- emodel.fxn(n=1, mu=emissions[1, statepath[1]], sig=emissions[2, statepath[1]])\n \n ## TRANSITIONS\n for (i in 2:statepathlen){\n statepath[i] <- sample(statenums, size=1, prob = transitions[statepath[i-1], ])\n emitted_data[i] <- emodel.fxn(n=1, mu=emissions[1, statepath[i]], sig=emissions[2, statepath[i]])\n }\n return(list(statepath, emitted_data))\n}\n' |
# -----------------
# User Instructions
#
# In this problem, you will generalize the bridge problem
# by writing a function bridge_problem3, that makes a call
# to lowest_cost_search.
def bridge_problem3(here):
"""Find the fastest (least elapsed time) path to
the goal in the bridge problem."""
# your code here
start = (frozenset(here) | frozenset(['light']), frozenset())
def is_goal(state):
state_here, _ = state
return state_here == frozenset() or here == set(['light'])
return lowest_cost_search(start, bsuccessors2, is_goal, bcost) # <== your arguments here
# your code here if necessary
def lowest_cost_search(start, successors, is_goal, action_cost):
"""Return the lowest cost path, starting from start state,
and considering successors(state) => {state:action,...},
that ends in a state for which is_goal(state) is true,
where the cost of a path is the sum of action costs,
which are given by action_cost(action)."""
Fail = []
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
state1 = final_state(path)
if is_goal(state1):
return path
explored.add(state1)
pcost = path_cost(path)
for (state, action) in successors(state1).items():
if state not in explored:
total_cost = pcost + action_cost(action)
path2 = path + [(action, total_cost), state]
add_to_frontier(frontier, path2)
return Fail
def final_state(path): return path[-1]
def path_cost(path):
"The total cost of a path (which is stored in a tuple with the final action)."
if len(path) < 3:
return 0
else:
action, total_cost = path[-2]
return total_cost
def add_to_frontier(frontier, path):
"Add path to frontier, replacing costlier path if there is one."
# (This could be done more efficiently.)
# Find if there is an old path to the final state of this path.
old = None
for i,p in enumerate(frontier):
if final_state(p) == final_state(path):
old = i
break
if old is not None and path_cost(frontier[old]) < path_cost(path):
return # Old path was better; do nothing
elif old is not None:
del frontier[old] # Old path was worse; delete it
## Now add the new path and re-sort
frontier.append(path)
frontier.sort(key=path_cost)
def bsuccessors2(state):
"""Return a dict of {state:action} pairs. A state is a (here, there) tuple,
where here and there are frozensets of people (indicated by their times) and/or
the light."""
here, there = state
if 'light' in here:
return dict(((here - frozenset([a, b, 'light']),
there | frozenset([a, b, 'light'])),
(a, b, '->'))
for a in here if a is not 'light'
for b in here if b is not 'light')
else:
return dict(((here | frozenset([a, b, 'light']),
there - frozenset([a, b, 'light'])),
(a, b, '<-'))
for a in there if a is not 'light'
for b in there if b is not 'light')
def bcost(action):
"Returns the cost (a number) of an action in the bridge problem."
# An action is an (a, b, arrow) tuple; a and b are times; arrow is a string
a, b, arrow = action
return max(a, b)
def test():
here = [1, 2, 5, 10]
assert bridge_problem3(here) == [
(frozenset([1, 2, 'light', 10, 5]), frozenset([])),
((2, 1, '->'), 2),
(frozenset([10, 5]), frozenset([1, 2, 'light'])),
((2, 2, '<-'), 4),
(frozenset(['light', 10, 2, 5]), frozenset([1])),
((5, 10, '->'), 14),
(frozenset([2]), frozenset([1, 10, 5, 'light'])),
((1, 1, '<-'), 15),
(frozenset([1, 2, 'light']), frozenset([10, 5])),
((2, 1, '->'), 17),
(frozenset([]), frozenset([1, 10, 2, 5, 'light']))]
return 'test passes'
print(test())
| def bridge_problem3(here):
"""Find the fastest (least elapsed time) path to
the goal in the bridge problem."""
start = (frozenset(here) | frozenset(['light']), frozenset())
def is_goal(state):
(state_here, _) = state
return state_here == frozenset() or here == set(['light'])
return lowest_cost_search(start, bsuccessors2, is_goal, bcost)
def lowest_cost_search(start, successors, is_goal, action_cost):
"""Return the lowest cost path, starting from start state,
and considering successors(state) => {state:action,...},
that ends in a state for which is_goal(state) is true,
where the cost of a path is the sum of action costs,
which are given by action_cost(action)."""
fail = []
explored = set()
frontier = [[start]]
while frontier:
path = frontier.pop(0)
state1 = final_state(path)
if is_goal(state1):
return path
explored.add(state1)
pcost = path_cost(path)
for (state, action) in successors(state1).items():
if state not in explored:
total_cost = pcost + action_cost(action)
path2 = path + [(action, total_cost), state]
add_to_frontier(frontier, path2)
return Fail
def final_state(path):
return path[-1]
def path_cost(path):
"""The total cost of a path (which is stored in a tuple with the final action)."""
if len(path) < 3:
return 0
else:
(action, total_cost) = path[-2]
return total_cost
def add_to_frontier(frontier, path):
"""Add path to frontier, replacing costlier path if there is one."""
old = None
for (i, p) in enumerate(frontier):
if final_state(p) == final_state(path):
old = i
break
if old is not None and path_cost(frontier[old]) < path_cost(path):
return
elif old is not None:
del frontier[old]
frontier.append(path)
frontier.sort(key=path_cost)
def bsuccessors2(state):
"""Return a dict of {state:action} pairs. A state is a (here, there) tuple,
where here and there are frozensets of people (indicated by their times) and/or
the light."""
(here, there) = state
if 'light' in here:
return dict((((here - frozenset([a, b, 'light']), there | frozenset([a, b, 'light'])), (a, b, '->')) for a in here if a is not 'light' for b in here if b is not 'light'))
else:
return dict((((here | frozenset([a, b, 'light']), there - frozenset([a, b, 'light'])), (a, b, '<-')) for a in there if a is not 'light' for b in there if b is not 'light'))
def bcost(action):
"""Returns the cost (a number) of an action in the bridge problem."""
(a, b, arrow) = action
return max(a, b)
def test():
here = [1, 2, 5, 10]
assert bridge_problem3(here) == [(frozenset([1, 2, 'light', 10, 5]), frozenset([])), ((2, 1, '->'), 2), (frozenset([10, 5]), frozenset([1, 2, 'light'])), ((2, 2, '<-'), 4), (frozenset(['light', 10, 2, 5]), frozenset([1])), ((5, 10, '->'), 14), (frozenset([2]), frozenset([1, 10, 5, 'light'])), ((1, 1, '<-'), 15), (frozenset([1, 2, 'light']), frozenset([10, 5])), ((2, 1, '->'), 17), (frozenset([]), frozenset([1, 10, 2, 5, 'light']))]
return 'test passes'
print(test()) |
#
# PySNMP MIB module Unisphere-Data-IP-PROFILE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Data-IP-PROFILE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:31:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Unsigned32, iso, Counter32, ObjectIdentity, ModuleIdentity, Bits, IpAddress, Counter64, MibIdentifier, TimeTicks, Integer32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Unsigned32", "iso", "Counter32", "ObjectIdentity", "ModuleIdentity", "Bits", "IpAddress", "Counter64", "MibIdentifier", "TimeTicks", "Integer32", "NotificationType")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
usDataMibs, = mibBuilder.importSymbols("Unisphere-Data-MIBs", "usDataMibs")
UsdEnable, UsdSetMap, UsdName = mibBuilder.importSymbols("Unisphere-Data-TC", "UsdEnable", "UsdSetMap", "UsdName")
usdIpProfileMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26))
usdIpProfileMIB.setRevisions(('2001-01-24 20:06', '2000-05-08 00:00', '1999-08-25 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: usdIpProfileMIB.setRevisionsDescriptions(('Deprecated usdIpProfileRowStatus; the table is now dense and populated as a side-effect of creation of an entry in the usdProfileNameTable in Unisphere-Data-PROFILE-MIB. Also, added usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable.', 'Obsoleted usdIpProfileLoopbackIfIndex, replacing it with usdIpProfileLoopback.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: usdIpProfileMIB.setLastUpdated('200101242006Z')
if mibBuilder.loadTexts: usdIpProfileMIB.setOrganization('Unisphere Networks Inc.')
if mibBuilder.loadTexts: usdIpProfileMIB.setContactInfo(' Unisphere Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886 USA Tel: +1 978 589 5800 Email: mib@UnisphereNetworks.com')
if mibBuilder.loadTexts: usdIpProfileMIB.setDescription('The IP Profile MIB for the Unisphere Networks Inc. enterprise.')
usdIpProfileObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1))
usdIpProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1))
usdIpProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1), )
if mibBuilder.loadTexts: usdIpProfileTable.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the usdProfileNameTable in the Unisphere-Data-PROFILE-MIB.')
usdIpProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1), ).setIndexNames((0, "Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileId"))
if mibBuilder.loadTexts: usdIpProfileEntry.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileEntry.setDescription('A profile describing configuration of an IP interface.')
usdIpProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: usdIpProfileId.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the usdProfileNameTable.')
usdIpProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileRowStatus.setStatus('deprecated')
if mibBuilder.loadTexts: usdIpProfileRowStatus.setDescription("Controls creation/deletion of entries in this table. Only the values 'createAndGo' and 'destroy' may be SET. The value of usdIpProfileId must match that of a profile name configured in usdProfileNameTable.")
usdIpProfileRouterName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 3), UsdName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileRouterName.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileRouterName.setDescription('The virtual router to which an IP interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
usdIpProfileIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 4), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileIpAddr.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileIpAddr.setDescription('An IP address to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
usdIpProfileIpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileIpMask.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileIpMask.setDescription('An IP address mask to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
usdIpProfileDirectedBcastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 6), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileDirectedBcastEnable.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileDirectedBcastEnable.setDescription('Enable/disable forwarding of directed broadcasts on this IP network interface.')
usdIpProfileIcmpRedirectEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 7), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileIcmpRedirectEnable.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileIcmpRedirectEnable.setDescription('Enable/disable transmission of ICMP Redirect messages on this IP network interface.')
usdIpProfileAccessRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 8), UsdEnable().clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileAccessRoute.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileAccessRoute.setDescription('Enable/disable whether a host route is automatically created for a remote host attached to an IP interface that is configured using this profile.')
usdIpProfileMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(512, 10240), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileMtu.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileMtu.setDescription('The configured MTU size for this IP network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
usdIpProfileLoopbackIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 10), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileLoopbackIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: usdIpProfileLoopbackIfIndex.setDescription('For unnumbered interfaces, the IfIndex of the IP loopback interface whose IP address is used as the source address for transmitted IP packets. A value of zero means the loopback interface is unspecified (e.g., when the interface is numbered).')
usdIpProfileLoopback = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileLoopback.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileLoopback.setDescription("The number of the loopback interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. For example, if the loopback interface for the associated router was configured via the console as 'loopback 2', this object would contain the integer value 2. A value of -1 indicates the loopback interface is unspecified, e.g., when the IP interface is numbered.")
usdIpProfileSetMap = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 12), UsdSetMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileSetMap.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the UsdSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure UsdSetMap, bits in UsdSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures UsdSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring UsdSetMap.")
usdIpProfileSrcAddrValidEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 13), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdIpProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IP packets are validated. Validation is performed by looking up the source IP address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
usdIpProfileMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4))
usdIpProfileMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1))
usdIpProfileMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2))
usdIpProfileCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 1)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileCompliance = usdIpProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: usdIpProfileCompliance.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when usdIpProfileLoopback replaced usdIpProfileLoopbackIfIndex.')
usdIpProfileCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 2)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileCompliance1 = usdIpProfileCompliance1.setStatus('obsolete')
if mibBuilder.loadTexts: usdIpProfileCompliance1.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when usdIpProfileRowStatus was deprecate and the usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable objects were added.')
usdIpProfileCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 3)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileCompliance2 = usdIpProfileCompliance2.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileCompliance2.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating UsdSetMap.')
usdIpProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 1)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRowStatus"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRouterName"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpAddr"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpMask"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileDirectedBcastEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIcmpRedirectEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileAccessRoute"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileMtu"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileLoopbackIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileGroup = usdIpProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts: usdIpProfileGroup.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Unisphere product. This group became obsolete when usdIpProfileLoopback replaced usdIpProfileLoopbackIfIndex.')
usdIpProfileGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 2)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRowStatus"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRouterName"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpAddr"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpMask"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileDirectedBcastEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIcmpRedirectEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileAccessRoute"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileMtu"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileLoopback"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileGroup1 = usdIpProfileGroup1.setStatus('obsolete')
if mibBuilder.loadTexts: usdIpProfileGroup1.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Unisphere product. This group became obsolete when usdIpProfileRowStatus was deprecate and the usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable objects were added.')
usdIpProfileGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 3)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRouterName"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpAddr"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIpMask"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileDirectedBcastEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileIcmpRedirectEnable"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileAccessRoute"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileMtu"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileLoopback"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileSetMap"), ("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileSrcAddrValidEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileGroup2 = usdIpProfileGroup2.setStatus('current')
if mibBuilder.loadTexts: usdIpProfileGroup2.setDescription('The basic collection of objects providing management of IP Profile functionality in a Unisphere product.')
usdIpProfileDeprecatedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 4)).setObjects(("Unisphere-Data-IP-PROFILE-MIB", "usdIpProfileRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdIpProfileDeprecatedGroup = usdIpProfileDeprecatedGroup.setStatus('deprecated')
if mibBuilder.loadTexts: usdIpProfileDeprecatedGroup.setDescription('Deprecated object providing management of IP Profile functionality in a Juniper product. This group has been deprecated but may still be supported on some implementations.')
mibBuilder.exportSymbols("Unisphere-Data-IP-PROFILE-MIB", usdIpProfileLoopback=usdIpProfileLoopback, usdIpProfileMtu=usdIpProfileMtu, usdIpProfile=usdIpProfile, usdIpProfileMIBConformance=usdIpProfileMIBConformance, usdIpProfileGroup=usdIpProfileGroup, usdIpProfileCompliance=usdIpProfileCompliance, usdIpProfileRowStatus=usdIpProfileRowStatus, usdIpProfileIcmpRedirectEnable=usdIpProfileIcmpRedirectEnable, usdIpProfileGroup1=usdIpProfileGroup1, usdIpProfileEntry=usdIpProfileEntry, usdIpProfileMIBCompliances=usdIpProfileMIBCompliances, usdIpProfileMIBGroups=usdIpProfileMIBGroups, usdIpProfileId=usdIpProfileId, usdIpProfileAccessRoute=usdIpProfileAccessRoute, usdIpProfileSrcAddrValidEnable=usdIpProfileSrcAddrValidEnable, usdIpProfileIpAddr=usdIpProfileIpAddr, usdIpProfileSetMap=usdIpProfileSetMap, usdIpProfileTable=usdIpProfileTable, usdIpProfileGroup2=usdIpProfileGroup2, usdIpProfileDeprecatedGroup=usdIpProfileDeprecatedGroup, usdIpProfileIpMask=usdIpProfileIpMask, usdIpProfileLoopbackIfIndex=usdIpProfileLoopbackIfIndex, usdIpProfileCompliance2=usdIpProfileCompliance2, usdIpProfileMIB=usdIpProfileMIB, usdIpProfileRouterName=usdIpProfileRouterName, usdIpProfileObjects=usdIpProfileObjects, usdIpProfileCompliance1=usdIpProfileCompliance1, usdIpProfileDirectedBcastEnable=usdIpProfileDirectedBcastEnable, PYSNMP_MODULE_ID=usdIpProfileMIB)
| (object_identifier, integer, octet_string) = mibBuilder.importSymbols('ASN1', 'ObjectIdentifier', 'Integer', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_intersection, constraints_union, value_size_constraint, single_value_constraint, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion', 'ValueSizeConstraint', 'SingleValueConstraint', 'ValueRangeConstraint')
(interface_index_or_zero,) = mibBuilder.importSymbols('IF-MIB', 'InterfaceIndexOrZero')
(module_compliance, notification_group, object_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup', 'ObjectGroup')
(mib_scalar, mib_table, mib_table_row, mib_table_column, gauge32, unsigned32, iso, counter32, object_identity, module_identity, bits, ip_address, counter64, mib_identifier, time_ticks, integer32, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Gauge32', 'Unsigned32', 'iso', 'Counter32', 'ObjectIdentity', 'ModuleIdentity', 'Bits', 'IpAddress', 'Counter64', 'MibIdentifier', 'TimeTicks', 'Integer32', 'NotificationType')
(textual_convention, display_string, row_status) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString', 'RowStatus')
(us_data_mibs,) = mibBuilder.importSymbols('Unisphere-Data-MIBs', 'usDataMibs')
(usd_enable, usd_set_map, usd_name) = mibBuilder.importSymbols('Unisphere-Data-TC', 'UsdEnable', 'UsdSetMap', 'UsdName')
usd_ip_profile_mib = module_identity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26))
usdIpProfileMIB.setRevisions(('2001-01-24 20:06', '2000-05-08 00:00', '1999-08-25 00:00'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
usdIpProfileMIB.setRevisionsDescriptions(('Deprecated usdIpProfileRowStatus; the table is now dense and populated as a side-effect of creation of an entry in the usdProfileNameTable in Unisphere-Data-PROFILE-MIB. Also, added usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable.', 'Obsoleted usdIpProfileLoopbackIfIndex, replacing it with usdIpProfileLoopback.', 'Initial version of this MIB module.'))
if mibBuilder.loadTexts:
usdIpProfileMIB.setLastUpdated('200101242006Z')
if mibBuilder.loadTexts:
usdIpProfileMIB.setOrganization('Unisphere Networks Inc.')
if mibBuilder.loadTexts:
usdIpProfileMIB.setContactInfo(' Unisphere Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886 USA Tel: +1 978 589 5800 Email: mib@UnisphereNetworks.com')
if mibBuilder.loadTexts:
usdIpProfileMIB.setDescription('The IP Profile MIB for the Unisphere Networks Inc. enterprise.')
usd_ip_profile_objects = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1))
usd_ip_profile = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1))
usd_ip_profile_table = mib_table((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1))
if mibBuilder.loadTexts:
usdIpProfileTable.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the usdProfileNameTable in the Unisphere-Data-PROFILE-MIB.')
usd_ip_profile_entry = mib_table_row((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1)).setIndexNames((0, 'Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileId'))
if mibBuilder.loadTexts:
usdIpProfileEntry.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileEntry.setDescription('A profile describing configuration of an IP interface.')
usd_ip_profile_id = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 1), unsigned32())
if mibBuilder.loadTexts:
usdIpProfileId.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the usdProfileNameTable.')
usd_ip_profile_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 2), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileRowStatus.setStatus('deprecated')
if mibBuilder.loadTexts:
usdIpProfileRowStatus.setDescription("Controls creation/deletion of entries in this table. Only the values 'createAndGo' and 'destroy' may be SET. The value of usdIpProfileId must match that of a profile name configured in usdProfileNameTable.")
usd_ip_profile_router_name = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 3), usd_name()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileRouterName.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileRouterName.setDescription('The virtual router to which an IP interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
usd_ip_profile_ip_addr = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 4), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileIpAddr.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileIpAddr.setDescription('An IP address to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
usd_ip_profile_ip_mask = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 5), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileIpMask.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileIpMask.setDescription('An IP address mask to be used by an IP interface configured by this profile. This object will have a value of 0.0.0.0 for an unnumbered interface.')
usd_ip_profile_directed_bcast_enable = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 6), usd_enable().clone('disable')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileDirectedBcastEnable.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileDirectedBcastEnable.setDescription('Enable/disable forwarding of directed broadcasts on this IP network interface.')
usd_ip_profile_icmp_redirect_enable = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 7), usd_enable().clone('disable')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileIcmpRedirectEnable.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileIcmpRedirectEnable.setDescription('Enable/disable transmission of ICMP Redirect messages on this IP network interface.')
usd_ip_profile_access_route = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 8), usd_enable().clone('enable')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileAccessRoute.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileAccessRoute.setDescription('Enable/disable whether a host route is automatically created for a remote host attached to an IP interface that is configured using this profile.')
usd_ip_profile_mtu = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 9), integer32().subtype(subtypeSpec=constraints_union(value_range_constraint(0, 0), value_range_constraint(512, 10240)))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileMtu.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileMtu.setDescription('The configured MTU size for this IP network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
usd_ip_profile_loopback_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 10), interface_index_or_zero()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileLoopbackIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts:
usdIpProfileLoopbackIfIndex.setDescription('For unnumbered interfaces, the IfIndex of the IP loopback interface whose IP address is used as the source address for transmitted IP packets. A value of zero means the loopback interface is unspecified (e.g., when the interface is numbered).')
usd_ip_profile_loopback = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 11), integer32().subtype(subtypeSpec=value_range_constraint(-1, 2147483647)).clone(-1)).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileLoopback.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileLoopback.setDescription("The number of the loopback interface, associated with the specified virtual router, whose IP address is used as the source address when transmitting IP packets on unnumbered remote access user links. For example, if the loopback interface for the associated router was configured via the console as 'loopback 2', this object would contain the integer value 2. A value of -1 indicates the loopback interface is unspecified, e.g., when the IP interface is numbered.")
usd_ip_profile_set_map = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 12), usd_set_map()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileSetMap.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the UsdSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure UsdSetMap, bits in UsdSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures UsdSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring UsdSetMap.")
usd_ip_profile_src_addr_valid_enable = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 1, 1, 1, 1, 13), usd_enable().clone('disable')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
usdIpProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IP packets are validated. Validation is performed by looking up the source IP address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
usd_ip_profile_mib_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4))
usd_ip_profile_mib_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1))
usd_ip_profile_mib_groups = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2))
usd_ip_profile_compliance = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 1)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_compliance = usdIpProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts:
usdIpProfileCompliance.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when usdIpProfileLoopback replaced usdIpProfileLoopbackIfIndex.')
usd_ip_profile_compliance1 = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 2)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileGroup1'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_compliance1 = usdIpProfileCompliance1.setStatus('obsolete')
if mibBuilder.loadTexts:
usdIpProfileCompliance1.setDescription('Obsolete compliance statement for systems supporting IP configuration profiles. This statement became obsolete when usdIpProfileRowStatus was deprecate and the usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable objects were added.')
usd_ip_profile_compliance2 = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 1, 3)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileGroup2'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_compliance2 = usdIpProfileCompliance2.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileCompliance2.setDescription('The compliance statement for systems supporting IP configuration profiles, incorporating UsdSetMap.')
usd_ip_profile_group = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 1)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRowStatus'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRouterName'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpAddr'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpMask'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileDirectedBcastEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIcmpRedirectEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileAccessRoute'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileMtu'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileLoopbackIfIndex'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_group = usdIpProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts:
usdIpProfileGroup.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Unisphere product. This group became obsolete when usdIpProfileLoopback replaced usdIpProfileLoopbackIfIndex.')
usd_ip_profile_group1 = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 2)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRowStatus'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRouterName'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpAddr'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpMask'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileDirectedBcastEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIcmpRedirectEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileAccessRoute'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileMtu'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileLoopback'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_group1 = usdIpProfileGroup1.setStatus('obsolete')
if mibBuilder.loadTexts:
usdIpProfileGroup1.setDescription('An obsolete collection of objects providing management of IP Profile functionality in a Unisphere product. This group became obsolete when usdIpProfileRowStatus was deprecate and the usdIpProfileSetMap and usdIpProfileSrcAddrValidEnable objects were added.')
usd_ip_profile_group2 = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 3)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRouterName'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpAddr'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIpMask'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileDirectedBcastEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileIcmpRedirectEnable'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileAccessRoute'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileMtu'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileLoopback'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileSetMap'), ('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileSrcAddrValidEnable'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_group2 = usdIpProfileGroup2.setStatus('current')
if mibBuilder.loadTexts:
usdIpProfileGroup2.setDescription('The basic collection of objects providing management of IP Profile functionality in a Unisphere product.')
usd_ip_profile_deprecated_group = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 26, 4, 2, 4)).setObjects(('Unisphere-Data-IP-PROFILE-MIB', 'usdIpProfileRowStatus'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ip_profile_deprecated_group = usdIpProfileDeprecatedGroup.setStatus('deprecated')
if mibBuilder.loadTexts:
usdIpProfileDeprecatedGroup.setDescription('Deprecated object providing management of IP Profile functionality in a Juniper product. This group has been deprecated but may still be supported on some implementations.')
mibBuilder.exportSymbols('Unisphere-Data-IP-PROFILE-MIB', usdIpProfileLoopback=usdIpProfileLoopback, usdIpProfileMtu=usdIpProfileMtu, usdIpProfile=usdIpProfile, usdIpProfileMIBConformance=usdIpProfileMIBConformance, usdIpProfileGroup=usdIpProfileGroup, usdIpProfileCompliance=usdIpProfileCompliance, usdIpProfileRowStatus=usdIpProfileRowStatus, usdIpProfileIcmpRedirectEnable=usdIpProfileIcmpRedirectEnable, usdIpProfileGroup1=usdIpProfileGroup1, usdIpProfileEntry=usdIpProfileEntry, usdIpProfileMIBCompliances=usdIpProfileMIBCompliances, usdIpProfileMIBGroups=usdIpProfileMIBGroups, usdIpProfileId=usdIpProfileId, usdIpProfileAccessRoute=usdIpProfileAccessRoute, usdIpProfileSrcAddrValidEnable=usdIpProfileSrcAddrValidEnable, usdIpProfileIpAddr=usdIpProfileIpAddr, usdIpProfileSetMap=usdIpProfileSetMap, usdIpProfileTable=usdIpProfileTable, usdIpProfileGroup2=usdIpProfileGroup2, usdIpProfileDeprecatedGroup=usdIpProfileDeprecatedGroup, usdIpProfileIpMask=usdIpProfileIpMask, usdIpProfileLoopbackIfIndex=usdIpProfileLoopbackIfIndex, usdIpProfileCompliance2=usdIpProfileCompliance2, usdIpProfileMIB=usdIpProfileMIB, usdIpProfileRouterName=usdIpProfileRouterName, usdIpProfileObjects=usdIpProfileObjects, usdIpProfileCompliance1=usdIpProfileCompliance1, usdIpProfileDirectedBcastEnable=usdIpProfileDirectedBcastEnable, PYSNMP_MODULE_ID=usdIpProfileMIB) |
def checkInline(s):
i = 0
while True:
try:
if s[i] == '$':
return i
except IndexError:
return -1
i += 1
def checkOutline(s):
return s == "<p class='md-math-block'>$"
def main():
f = open("./output.txt", "w")
while True:
s = input()
if s == ':qa!':
f.close()
break
else:
if checkOutline(s):
f.write('<p>\[\n')
while True:
t = input()
if t == '$</p>':
f.write('\]</p>\n')
break
else:
f.write(t+'\n')
elif checkInline(s) >= 0:
converted_string = str()
while checkInline(s) >= 0:
i = checkInline(s)
converted_string += s[:i] + '\('
s = s[i+1:]
j = checkInline(s)
converted_string += s[:j] + '\)'
s = s[j+1:]
converted_string += s
f.write(converted_string + '\n')
else:
f.write(s+'\n')
main() | def check_inline(s):
i = 0
while True:
try:
if s[i] == '$':
return i
except IndexError:
return -1
i += 1
def check_outline(s):
return s == "<p class='md-math-block'>$"
def main():
f = open('./output.txt', 'w')
while True:
s = input()
if s == ':qa!':
f.close()
break
elif check_outline(s):
f.write('<p>\\[\n')
while True:
t = input()
if t == '$</p>':
f.write('\\]</p>\n')
break
else:
f.write(t + '\n')
elif check_inline(s) >= 0:
converted_string = str()
while check_inline(s) >= 0:
i = check_inline(s)
converted_string += s[:i] + '\\('
s = s[i + 1:]
j = check_inline(s)
converted_string += s[:j] + '\\)'
s = s[j + 1:]
converted_string += s
f.write(converted_string + '\n')
else:
f.write(s + '\n')
main() |
## Read input as specified in the question.
## Print output as specified in the question.
N = int(input())
for i in range(1, N+1):
for j in range(0, i):
x = i - 1
if x == 0:
print("1", end='')
else:
if x == j or j == 0:
print(x, end = '')
else:
print(0, end = '')
print()
| n = int(input())
for i in range(1, N + 1):
for j in range(0, i):
x = i - 1
if x == 0:
print('1', end='')
elif x == j or j == 0:
print(x, end='')
else:
print(0, end='')
print() |
def solution(n):
if str(n ** (1/2))[-2] == '.': return int(((n **(1/2))+1) ** 2)
else: return -1
print(solution(121))
print(solution(3)) | def solution(n):
if str(n ** (1 / 2))[-2] == '.':
return int((n ** (1 / 2) + 1) ** 2)
else:
return -1
print(solution(121))
print(solution(3)) |
# Find the thirteen adjacent digits in the 1000-digit number that have the
# greatest product. What is the value of this product?
# Problem taken from https://projecteuler.net/problem=8
number = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
products = set()
for i in range(len(number) - 13):
p = 1
for j in number[i:i+13]:
p *= int(j)
products.add(p)
print(max(products))
| number = '7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
products = set()
for i in range(len(number) - 13):
p = 1
for j in number[i:i + 13]:
p *= int(j)
products.add(p)
print(max(products)) |
OPTIONAL_FIELDS = [
'tags', 'consumes', 'produces', 'schemes', 'security',
'deprecated', 'operationId', 'externalDocs'
]
OPTIONAL_OAS3_FIELDS = [
'components', 'servers'
]
| optional_fields = ['tags', 'consumes', 'produces', 'schemes', 'security', 'deprecated', 'operationId', 'externalDocs']
optional_oas3_fields = ['components', 'servers'] |
rows=6
for num in range(rows):
for i in range(num):
print(num, end=" ")
print()
'''
Step for num in range(6):
step1:num=0
for i in range(0): 0,0
--------------------- skip
step2:num=1
for i in range(1): 0,1
1
step3:num=2
for i in range(2): 0,1,2
2 2
step4:num=3
for i in range(3): 0,1,2,3
3 3 3
step5:num=4
for i in range(4):
4 4 4 4
step6:num=5
for i in range(5):
5 5 5 5 5
1
2 2
3 3 3
4 4 4 4
5 5 5 5 5
'''
| rows = 6
for num in range(rows):
for i in range(num):
print(num, end=' ')
print()
'\nStep for num in range(6):\n step1:num=0\n for i in range(0): 0,0\n --------------------- skip\n step2:num=1\n for i in range(1): 0,1\n 1\n step3:num=2\n for i in range(2): 0,1,2\n 2 2 \n step4:num=3\n for i in range(3): 0,1,2,3\n 3 3 3\n step5:num=4\n for i in range(4):\n 4 4 4 4\n step6:num=5\n for i in range(5):\n 5 5 5 5 5\n\n1\n2 2\n3 3 3\n4 4 4 4\n5 5 5 5 5\n\n' |
class NotImplementedException(Exception):
pass
class GeolocationBackend(object):
def __init__(self, ip):
self._ip = ip
self._continent = None
self._country = None
self._geo_data = None
self._raw_data = None
def geolocate(self):
raise NotImplementedException()
def _parse(self):
raise NotImplementedException()
def data(self):
self._parse()
return {
ip: self._ip,
continent: self._continent,
county: self._country,
geo: self._geo_data,
raw_data: self._raw_data
}
| class Notimplementedexception(Exception):
pass
class Geolocationbackend(object):
def __init__(self, ip):
self._ip = ip
self._continent = None
self._country = None
self._geo_data = None
self._raw_data = None
def geolocate(self):
raise not_implemented_exception()
def _parse(self):
raise not_implemented_exception()
def data(self):
self._parse()
return {ip: self._ip, continent: self._continent, county: self._country, geo: self._geo_data, raw_data: self._raw_data} |
class Solution:
def missingNumber(self, arr: List[int]) -> int:
x=int(((len(arr)+1)/2)*(arr[0]+arr[-1]))
return x-sum(arr)
| class Solution:
def missing_number(self, arr: List[int]) -> int:
x = int((len(arr) + 1) / 2 * (arr[0] + arr[-1]))
return x - sum(arr) |
def deep_merge_dicts(dict1, dict2):
output = {}
# adds keys from `dict1` if they do not exist in `dict2` and vice-versa
intersection = {**dict2, **dict1}
for k_intersect, v_intersect in intersection.items():
if k_intersect not in dict1:
v_dict2 = dict2[k_intersect]
output[k_intersect] = v_dict2
elif k_intersect not in dict2:
output[k_intersect] = v_intersect
elif isinstance(v_intersect, dict):
v_dict2 = dict2[k_intersect]
output[k_intersect] = deep_merge_dicts(v_intersect, v_dict2)
else:
output[k_intersect] = v_intersect
return output
| def deep_merge_dicts(dict1, dict2):
output = {}
intersection = {**dict2, **dict1}
for (k_intersect, v_intersect) in intersection.items():
if k_intersect not in dict1:
v_dict2 = dict2[k_intersect]
output[k_intersect] = v_dict2
elif k_intersect not in dict2:
output[k_intersect] = v_intersect
elif isinstance(v_intersect, dict):
v_dict2 = dict2[k_intersect]
output[k_intersect] = deep_merge_dicts(v_intersect, v_dict2)
else:
output[k_intersect] = v_intersect
return output |
def sayHello(name):
'''say hello to given name'''
print(f'Hello, {name}')
| def say_hello(name):
"""say hello to given name"""
print(f'Hello, {name}') |
# Prepare the data
piedpiper=np.array([4.57, 4.55, 5.47, 4.67, 5.41, 5.55, 5.53, 5.63, 3.86, 3.97, 5.44, 3.93, 5.31, 5.17, 4.39, 4.28, 5.25])
endframe = np.array([4.27, 3.93, 4.01, 4.07, 3.87, 4. , 4. , 3.72, 4.16, 4.1 , 3.9 , 3.97, 4.08, 3.96, 3.96, 3.77, 4.09])
# Assumption check
check_normality(piedpiper)
check_normality(endframe)
# Select the proper test
test, pvalue = stats.wilcoxon(endframe, piedpiper) # alternative defalt two sided
print('p-value:%.6f' % pvalue,'>> one_tailed_pval:%.6f'%(pvalue/2))
test,one_sided_pvalue = stats.wilcoxon(endframe, piedpiper, alternative = 'less')
print('one sided pvalue:%.6f' % (one_sided_pvalue))
if pvalue < 0.05:
print('Reject null hypothesis')
else:
print('Fail to reject null hypothesis')
| piedpiper = np.array([4.57, 4.55, 5.47, 4.67, 5.41, 5.55, 5.53, 5.63, 3.86, 3.97, 5.44, 3.93, 5.31, 5.17, 4.39, 4.28, 5.25])
endframe = np.array([4.27, 3.93, 4.01, 4.07, 3.87, 4.0, 4.0, 3.72, 4.16, 4.1, 3.9, 3.97, 4.08, 3.96, 3.96, 3.77, 4.09])
check_normality(piedpiper)
check_normality(endframe)
(test, pvalue) = stats.wilcoxon(endframe, piedpiper)
print('p-value:%.6f' % pvalue, '>> one_tailed_pval:%.6f' % (pvalue / 2))
(test, one_sided_pvalue) = stats.wilcoxon(endframe, piedpiper, alternative='less')
print('one sided pvalue:%.6f' % one_sided_pvalue)
if pvalue < 0.05:
print('Reject null hypothesis')
else:
print('Fail to reject null hypothesis') |
"""
segmentation.py
SCTE35 Segmentation Descriptor tables.
"""
"""
Table 20 from page 58 of
https://www.scte.org/SCTEDocs/Standards/ANSI_SCTE%2035%202019r1.pdf
"""
table20 = {
0x00: "Restrict Group 0",
0x01: "Restrict Group 1",
0x02: "Restrict Group 2",
0x03: "No Restrictions",
}
"""
table 22 from page 62 of
https://www.scte.org/SCTEDocs/Standards/ANSI_SCTE%2035%202019r1.pdf
I am using the segmentation_type_id as a key.
Segmentation_type_id : segmentation_message
"""
table22 = {
0x00: "Not Indicated",
0x01: "Content Identification",
0x10: "Program Start",
0x11: "Program End",
0x12: "Program Early Termination",
0x13: "Program Breakaway",
0x14: "Program Resumption",
0x15: "Program Runover Planned",
0x16: "Program RunoverUnplanned",
0x17: "Program Overlap Start",
0x18: "Program Blackout Override",
0x19: "Program Start ??? In Progress",
0x20: "Chapter Start",
0x21: "Chapter End",
0x22: "Break Start",
0x23: "Break End",
0x24: "Opening Credit Start",
0x25: "Opening Credit End",
0x26: "Closing Credit Start",
0x27: "Closing Credit End",
0x30: "Provider Advertisement Start",
0x31: "Provider Advertisement End",
0x32: "Distributor Advertisement Start",
0x33: "Distributor Advertisement End",
0x34: "Provider Placement Opportunity Start",
0x35: "Provider Placement Opportunity End",
0x36: "Distributor Placement Opportunity Start",
0x37: "Distributor Placement Opportunity End",
0x38: "Provider Overlay Placement Opportunity Start",
0x39: "Provider Overlay Placement Opportunity End",
0x3A: "Distributor Overlay Placement Opportunity Start",
0x3B: "Distributor Overlay Placement Opportunity End",
0x40: "Unscheduled Event Start",
0x41: "Unscheduled Event End",
0x50: "Network Start",
0x51: "Network End",
}
| """
segmentation.py
SCTE35 Segmentation Descriptor tables.
"""
'\nTable 20 from page 58 of\nhttps://www.scte.org/SCTEDocs/Standards/ANSI_SCTE%2035%202019r1.pdf\n'
table20 = {0: 'Restrict Group 0', 1: 'Restrict Group 1', 2: 'Restrict Group 2', 3: 'No Restrictions'}
'\ntable 22 from page 62 of\nhttps://www.scte.org/SCTEDocs/Standards/ANSI_SCTE%2035%202019r1.pdf\nI am using the segmentation_type_id as a key.\nSegmentation_type_id : segmentation_message\n'
table22 = {0: 'Not Indicated', 1: 'Content Identification', 16: 'Program Start', 17: 'Program End', 18: 'Program Early Termination', 19: 'Program Breakaway', 20: 'Program Resumption', 21: 'Program Runover Planned', 22: 'Program RunoverUnplanned', 23: 'Program Overlap Start', 24: 'Program Blackout Override', 25: 'Program Start ??? In Progress', 32: 'Chapter Start', 33: 'Chapter End', 34: 'Break Start', 35: 'Break End', 36: 'Opening Credit Start', 37: 'Opening Credit End', 38: 'Closing Credit Start', 39: 'Closing Credit End', 48: 'Provider Advertisement Start', 49: 'Provider Advertisement End', 50: 'Distributor Advertisement Start', 51: 'Distributor Advertisement End', 52: 'Provider Placement Opportunity Start', 53: 'Provider Placement Opportunity End', 54: 'Distributor Placement Opportunity Start', 55: 'Distributor Placement Opportunity End', 56: 'Provider Overlay Placement Opportunity Start', 57: 'Provider Overlay Placement Opportunity End', 58: 'Distributor Overlay Placement Opportunity Start', 59: 'Distributor Overlay Placement Opportunity End', 64: 'Unscheduled Event Start', 65: 'Unscheduled Event End', 80: 'Network Start', 81: 'Network End'} |
pkgname = "pcre"
pkgver = "8.45"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--with-pic",
"--enable-utf8",
"--enable-unicode-properties",
"--enable-pcretest-libedit",
"--enable-pcregrep-libz",
"--enable-pcregrep-libbz2",
"--enable-newline-is-anycrlf",
"--enable-jit",
"--enable-static",
"--disable-stack-for-recursion",
]
hostmakedepends = ["pkgconf"]
makedepends = ["zlib-devel", "libbz2-devel", "libedit-devel"]
pkgdesc = "Perl Compatible Regular Expressions"
maintainer = "q66 <q66@chimera-linux.org>"
license = "BSD-3-Clause"
url = "http://www.pcre.org"
source = f"$(SOURCEFORGE_SITE)/{pkgname}/{pkgname}/{pkgver}/{pkgname}-{pkgver}.tar.bz2"
sha256 = "4dae6fdcd2bb0bb6c37b5f97c33c2be954da743985369cddac3546e3218bffb8"
options = ["!cross"]
def post_install(self):
self.install_license("LICENCE")
@subpackage("libpcrecpp")
def _libpcrecpp(self):
self.pkgdesc = f"{pkgdesc} (C++ shared libraries)"
return ["usr/lib/libpcrecpp.so.*"]
@subpackage("libpcre")
def _libpcre(self):
self.pkgdesc = f"{pkgdesc} (shared libraries)"
return self.default_libs()
@subpackage("pcre-devel")
def _devel(self):
self.depends += ["zlib-devel", "libbz2-devel"]
return self.default_devel(man = True, extra = ["usr/share/doc"])
| pkgname = 'pcre'
pkgver = '8.45'
pkgrel = 0
build_style = 'gnu_configure'
configure_args = ['--with-pic', '--enable-utf8', '--enable-unicode-properties', '--enable-pcretest-libedit', '--enable-pcregrep-libz', '--enable-pcregrep-libbz2', '--enable-newline-is-anycrlf', '--enable-jit', '--enable-static', '--disable-stack-for-recursion']
hostmakedepends = ['pkgconf']
makedepends = ['zlib-devel', 'libbz2-devel', 'libedit-devel']
pkgdesc = 'Perl Compatible Regular Expressions'
maintainer = 'q66 <q66@chimera-linux.org>'
license = 'BSD-3-Clause'
url = 'http://www.pcre.org'
source = f'$(SOURCEFORGE_SITE)/{pkgname}/{pkgname}/{pkgver}/{pkgname}-{pkgver}.tar.bz2'
sha256 = '4dae6fdcd2bb0bb6c37b5f97c33c2be954da743985369cddac3546e3218bffb8'
options = ['!cross']
def post_install(self):
self.install_license('LICENCE')
@subpackage('libpcrecpp')
def _libpcrecpp(self):
self.pkgdesc = f'{pkgdesc} (C++ shared libraries)'
return ['usr/lib/libpcrecpp.so.*']
@subpackage('libpcre')
def _libpcre(self):
self.pkgdesc = f'{pkgdesc} (shared libraries)'
return self.default_libs()
@subpackage('pcre-devel')
def _devel(self):
self.depends += ['zlib-devel', 'libbz2-devel']
return self.default_devel(man=True, extra=['usr/share/doc']) |
"""@package gensolver_dumper
Abstract class for dumpers.
"""
class GenSolverDumper:
def __init__(self, output_file):
""" Abstract class for creating dumper objects.
:param str output_file: Path to the file to store output, if output_file == '' then will not output to file.
"""
self.dump_file = output_file
if self.dump_file != '':
f = open(self.dump_file, 'w') # clear the file if it already exists
f.close()
return
def dump(self, dump_info):
raise Exception("not implemented in {0}".format(self))
| """@package gensolver_dumper
Abstract class for dumpers.
"""
class Gensolverdumper:
def __init__(self, output_file):
""" Abstract class for creating dumper objects.
:param str output_file: Path to the file to store output, if output_file == '' then will not output to file.
"""
self.dump_file = output_file
if self.dump_file != '':
f = open(self.dump_file, 'w')
f.close()
return
def dump(self, dump_info):
raise exception('not implemented in {0}'.format(self)) |
def m2f_e(s, st):
return [[st.index(ch), st.insert(0, st.pop(st.index(ch)))][0] for ch in s]
def m2f_d(sq, st):
return ''.join([st[i], st.insert(0, st.pop(i))][0] for i in sq)
ST = list('abcdefghijklmnopqrstuvwxyz')
for s in ['broood', 'bananaaa', 'hiphophiphop']:
encode = m2f_e(s, ST[::])
print('%14r encodes to %r' % (s, encode), end=', ')
decode = m2f_d(encode, ST[::])
print('decodes back to %r' % decode)
assert s == decode, 'Whoops!'
| def m2f_e(s, st):
return [[st.index(ch), st.insert(0, st.pop(st.index(ch)))][0] for ch in s]
def m2f_d(sq, st):
return ''.join(([st[i], st.insert(0, st.pop(i))][0] for i in sq))
st = list('abcdefghijklmnopqrstuvwxyz')
for s in ['broood', 'bananaaa', 'hiphophiphop']:
encode = m2f_e(s, ST[:])
print('%14r encodes to %r' % (s, encode), end=', ')
decode = m2f_d(encode, ST[:])
print('decodes back to %r' % decode)
assert s == decode, 'Whoops!' |
def get_pyenv_root():
return "/usr/local/pyenv"
def get_user():
return "root"
def get_group():
return "root"
def get_rc_file():
return "/etc/profile.d/pyenv.sh"
def get_python_test_case():
return "3.9.0", True
def get_venv_test_case():
return "neovim", True
| def get_pyenv_root():
return '/usr/local/pyenv'
def get_user():
return 'root'
def get_group():
return 'root'
def get_rc_file():
return '/etc/profile.d/pyenv.sh'
def get_python_test_case():
return ('3.9.0', True)
def get_venv_test_case():
return ('neovim', True) |
# https://www.hackerrank.com/challenges/30-testing/forum/comments/138775
print("5")
print("5 3\n-1 90 999 100 0")
print("4 2\n0 -1 2 1")
print("3 3\n-1 0 1")
print("6 1\n-1 0 1 -1 2 3")
print("7 3\n-1 0 1 2 3 4 5")
| print('5')
print('5 3\n-1 90 999 100 0')
print('4 2\n0 -1 2 1')
print('3 3\n-1 0 1')
print('6 1\n-1 0 1 -1 2 3')
print('7 3\n-1 0 1 2 3 4 5') |
def inorde_tree_walk(self, vertice = None):
if(self.raiz == None): #arvore vazia
return
if(verice == None): #Por padrao comeca pela raiz
vertice = self.raiz
if(vertice.left != None): #Decomposicao
self.inorde_tree_walk(vertice = vertice.left)
print(vertice)
if(vertice.right != None): #Decomposicao
self.inorde_tree_walk(vertice = vertice.right) | def inorde_tree_walk(self, vertice=None):
if self.raiz == None:
return
if verice == None:
vertice = self.raiz
if vertice.left != None:
self.inorde_tree_walk(vertice=vertice.left)
print(vertice)
if vertice.right != None:
self.inorde_tree_walk(vertice=vertice.right) |
#
# PySNMP MIB module HM2-DHCPS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HM2-DHCPS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:31:20 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint")
HmEnabledStatus, hm2ConfigurationMibs = mibBuilder.importSymbols("HM2-TC-MIB", "HmEnabledStatus", "hm2ConfigurationMibs")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Bits, Unsigned32, ModuleIdentity, ObjectIdentity, Counter64, Integer32, iso, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, IpAddress, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "Unsigned32", "ModuleIdentity", "ObjectIdentity", "Counter64", "Integer32", "iso", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "IpAddress", "Gauge32", "NotificationType")
TextualConvention, RowStatus, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString", "MacAddress")
hm2DhcpsMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 248, 11, 91))
hm2DhcpsMib.setRevisions(('2012-03-16 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: hm2DhcpsMib.setRevisionsDescriptions(('Initial version.',))
if mibBuilder.loadTexts: hm2DhcpsMib.setLastUpdated('201203160000Z')
if mibBuilder.loadTexts: hm2DhcpsMib.setOrganization('Hirschmann Automation and Control GmbH')
if mibBuilder.loadTexts: hm2DhcpsMib.setContactInfo('Postal: Stuttgarter Str. 45-51 72654 Neckartenzlingen Germany Phone: +49 7127 140 E-mail: hac.support@belden.com')
if mibBuilder.loadTexts: hm2DhcpsMib.setDescription('Hirschmann DHCP server MIB. Copyright (C) 2012. All Rights Reserved.')
hm2DHCPServerMibNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 0))
hm2DHCPServerMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1))
hm2DHCPServerSNMPExtensionGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 3))
hm2DHCPServerGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1))
hm2DHCPServerConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1))
hm2DHCPServerLeaseGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2))
hm2DHCPServerInterfaceGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3))
hm2DHCPServerCounterGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4))
hm2DHCPServerMode = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2DHCPServerMode.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerMode.setDescription('Enable or disable DHCP server global.')
hm2DHCPServerMaxPoolEntries = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerMaxPoolEntries.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerMaxPoolEntries.setDescription('Maximum possible entries in hm2DHCPServerPoolTable.')
hm2DHCPServerMaxLeaseEntries = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerMaxLeaseEntries.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerMaxLeaseEntries.setDescription('Maximum possible entries in hm2DHCPServerLeaseTable.')
hm2DHCPServerPoolTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5), )
if mibBuilder.loadTexts: hm2DHCPServerPoolTable.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolTable.setDescription('A table containing the DHCP server pools.')
hm2DHCPServerPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1), ).setIndexNames((0, "HM2-DHCPS-MIB", "hm2DHCPServerPoolIndex"))
if mibBuilder.loadTexts: hm2DHCPServerPoolEntry.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolEntry.setDescription('A logical row in the hm2DHCPServerPoolTable.')
hm2DHCPServerPoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerPoolIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolIndex.setDescription('The index of hm2DHCPServerPoolTable.')
hm2DHCPServerPoolStartIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolStartIpAddress.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolStartIpAddress.setDescription('The IPv4 address of the first address in the range. The value of hm2DHCPServerPoolStartIpAddress MUST be less than or equal to the value of hm2DHCPServerPoolEndIpAddress.')
hm2DHCPServerPoolEndIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 3), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolEndIpAddress.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolEndIpAddress.setDescription('The IPv4 address of the last address in the range. The value of hm2DHCPServerPoolEndIpAddress MUST be greater than or equal to the value of hm2DHCPServerPoolStartIpAddress.')
hm2DHCPServerPoolLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 4), Unsigned32().clone(86400)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolLeaseTime.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolLeaseTime.setDescription("The pools lease time in number of seconds. A value of 4294967295 SHOULD be used for leases that have a lease time which is 'infinite' and for BOOTP leases.")
hm2DHCPServerPoolFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 5), Bits().clone(namedValues=NamedValues(("interface", 0), ("mac", 1), ("gateway", 2), ("clientid", 3), ("remoteid", 4), ("circuitid", 5), ("dynamic", 6), ("vlanid", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolFlags.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolFlags.setDescription('This object shows the parameters that are used to lease the IP Address.')
hm2DHCPServerPoolIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 6), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolIfIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolIfIndex.setDescription('The index of the interface.')
hm2DHCPServerPoolMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 7), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolMacAddress.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolMacAddress.setDescription('The MAC Address of the entry that is used to lease the IP Address.')
hm2DHCPServerPoolGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 8), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolGateway.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolGateway.setDescription('The IPv4 address of the Gatewayinterface that is used to lease the IP Address.')
hm2DHCPServerPoolClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 9), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolClientId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolClientId.setDescription('The Client Identifier of the entry that is used to lease the IP Address.')
hm2DHCPServerPoolRemoteId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 10), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolRemoteId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolRemoteId.setDescription('The Remote Identifier of the entry that is used to lease the IP Address. The Remote Identifier must be send in Option 82 as defined in RFC 3046.')
hm2DHCPServerPoolCircuitId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 11), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolCircuitId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolCircuitId.setDescription('The Cicuit Identifier of the entry that is used to lease the IP Address. The Circuit Identifier must be send in Option 82 as defined in RFC 3046.')
hm2DHCPServerPoolHirschmannClient = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 12), HmEnabledStatus().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolHirschmannClient.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolHirschmannClient.setDescription('Enable or disable Hirschmann Multicast.')
hm2DHCPServerPoolVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 13), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolVlanId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolVlanId.setDescription('The Vlan ID of the entry that is used to lease the IP Address. A value of -1 corresponds to management vlan (the default), any other value (1-4042) represents a specific VLAN')
hm2DHCPServerPoolOptionConfFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 30), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 70))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionConfFileName.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionConfFileName.setDescription('Full specified name of the configuration file e.g. tftp://192.9.200.1/cfg/config1.sav. An empty string zeros the SNAME and the FILE field in the DHCP header.')
hm2DHCPServerPoolOptionGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 31), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionGateway.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionGateway.setDescription('The IPv4 address of the Gateway. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2DHCPServerPoolOptionNetmask = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 32), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionNetmask.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionNetmask.setDescription('The subnet mask. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2DHCPServerPoolOptionWINS = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 33), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionWINS.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionWINS.setDescription('The IPv4 address of the WINS Server. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2DHCPServerPoolOptionDNS = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 34), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionDNS.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionDNS.setDescription('The IPv4 address of the DNS Server. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2DHCPServerPoolOptionHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 35), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionHostname.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolOptionHostname.setDescription('The name of the client (Option 12). An empty string disables the attachment of the option field in the DHCP message.')
hm2DHCPServerPoolMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("config", 2), ("ttdp", 3))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2DHCPServerPoolMethod.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolMethod.setDescription('The source of the DHCP Server Pool. User can set the object to none(1), config(2), ttdp(3).')
hm2DHCPServerPoolErrorStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 99), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerPoolErrorStatus.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolErrorStatus.setDescription('The error Code by create a new Pool.')
hm2DHCPServerPoolRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 100), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2DHCPServerPoolRowStatus.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerPoolRowStatus.setDescription('This object indicates the status of this entry.')
hm2DHCPServerLeaseTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1), )
if mibBuilder.loadTexts: hm2DHCPServerLeaseTable.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseTable.setDescription('A table containing the DHCP server leases.')
hm2DHCPServerLeaseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1), ).setIndexNames((0, "HM2-DHCPS-MIB", "hm2DHCPServerLeasePoolIndex"), (0, "HM2-DHCPS-MIB", "hm2DHCPServerLeaseIpAddress"))
if mibBuilder.loadTexts: hm2DHCPServerLeaseEntry.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseEntry.setDescription('A logical row in the hm2DHCPServerLeaseTable.')
hm2DHCPServerLeasePoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeasePoolIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeasePoolIndex.setDescription('The index of the hm2DHCPServerPoolTable above.')
hm2DHCPServerLeaseIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseIpAddress.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseIpAddress.setDescription('This is an IP address from the pool with index hm2DHCPServerLeasePoolIndex.')
hm2DHCPServerLeaseState = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("bootp", 1), ("offering", 2), ("requesting", 3), ("bound", 4), ("renewing", 5), ("rebinding", 6), ("declined", 7), ("released", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseState.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseState.setDescription('The state of the lease.')
hm2DHCPServerLeaseTimeRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseTimeRemaining.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseTimeRemaining.setDescription('The remaining time of the lease configured in hm2DHCPServerPoolLeaseTime.')
hm2DHCPServerLeaseIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseIfIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseIfIndex.setDescription('The interface index where the lease is currently active.')
hm2DHCPServerLeaseClientMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 6), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseClientMacAddress.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseClientMacAddress.setDescription('The MAC Address of the entry that has leased the IP Address.')
hm2DHCPServerLeaseGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseGateway.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseGateway.setDescription('The IPv4 address of the Gatewayinterface that was used to lease the IP Address.')
hm2DHCPServerLeaseClientId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseClientId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseClientId.setDescription('The Client Identifier of the entry that was used to lease the IP Address.')
hm2DHCPServerLeaseRemoteId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseRemoteId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseRemoteId.setDescription('The Remote Identifier of the entry that was used to lease the IP Address.')
hm2DHCPServerLeaseCircuitId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseCircuitId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseCircuitId.setDescription('The Cicuit Identifier of the entry that was used to lease the IP Address.')
hm2DHCPServerLeaseStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseStartTime.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseStartTime.setDescription('Lease start Time.')
hm2DHCPServerLeaseAction = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("release", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2DHCPServerLeaseAction.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseAction.setDescription('Manually release this ip address for new assignment.')
hm2DHCPServerLeaseVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerLeaseVlanId.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerLeaseVlanId.setDescription('The Vlan ID of the entry that is used to lease the IP Address. A value of -1 corresponds to management vlan (the default), any other value (1-4042) represents a specific VLAN')
hm2DHCPServerIfConfigTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1), )
if mibBuilder.loadTexts: hm2DHCPServerIfConfigTable.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerIfConfigTable.setDescription('A table containing current configuration information for each interface.')
hm2DHCPServerIfConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1), ).setIndexNames((0, "HM2-DHCPS-MIB", "hm2DHCPServerIfConfigIndex"))
if mibBuilder.loadTexts: hm2DHCPServerIfConfigEntry.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerIfConfigEntry.setDescription('A logical row in the hm2DHCPServerIfConfigTable.')
hm2DHCPServerIfConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerIfConfigIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerIfConfigIndex.setDescription('The index of the interface.')
hm2DHCPServerIfConfigMode = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1, 2), HmEnabledStatus().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2DHCPServerIfConfigMode.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerIfConfigMode.setDescription('Enable or disable DHCP server on this interface.')
hm2DHCPServerCounterIfTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2), )
if mibBuilder.loadTexts: hm2DHCPServerCounterIfTable.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterIfTable.setDescription('A table containing current configuration information for each interface.')
hm2DHCPServerCounterIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1), ).setIndexNames((0, "HM2-DHCPS-MIB", "hm2DHCPServerCounterIfIndex"))
if mibBuilder.loadTexts: hm2DHCPServerCounterIfEntry.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterIfEntry.setDescription('A logical row in the hm2DHCPServerCounterIfTable.')
hm2DHCPServerCounterIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterIfIndex.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterIfIndex.setDescription('The index of the interface.')
hm2DHCPServerCounterBootpRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpRequests.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpRequests.setDescription('The number of packets received that contain a Message Type of 1 (BOOTREQUEST) in the first octet and do not contain option number 53 (DHCP Message Type) in the options.')
hm2DHCPServerCounterBootpInvalids = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpInvalids.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpInvalids.setDescription('The number of packets received that do not contain a Message Type of 1 (BOOTREQUEST) in the first octet or are not valid BOOTP packets (e.g., too short, invalid field in packet header).')
hm2DHCPServerCounterBootpReplies = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpReplies.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpReplies.setDescription('The number of packets sent that contain a Message Type of 2 (BOOTREPLY) in the first octet and do not contain option number 53 (DHCP Message Type) in the options.')
hm2DHCPServerCounterBootpDroppedUnknownClients = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpDroppedUnknownClients.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpDroppedUnknownClients.setDescription('The number of BOOTP packets dropped due to the server not recognizing or not providing service to the hardware address received in the incoming packet.')
hm2DHCPServerCounterBootpDroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpDroppedNotServingSubnet.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterBootpDroppedNotServingSubnet.setDescription('The number of BOOTP packets dropped due to the server not being configured or not otherwise able to serve addresses on the subnet from which this message was received.')
hm2DHCPServerCounterDhcpv4Discovers = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Discovers.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Discovers.setDescription('The number of DHCPDISCOVER (option 53 with value 1) packets received.')
hm2DHCPServerCounterDhcpv4Offers = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Offers.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Offers.setDescription('The number of DHCPOFFER (option 53 with value 2) packets sent.')
hm2DHCPServerCounterDhcpv4Requests = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Requests.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Requests.setDescription('The number of DHCPREQUEST (option 53 with value 3) packets received.')
hm2DHCPServerCounterDhcpv4Declines = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Declines.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Declines.setDescription('The number of DHCPDECLINE (option 53 with value 4) packets received.')
hm2DHCPServerCounterDhcpv4Acks = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Acks.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Acks.setDescription('The number of DHCPACK (option 53 with value 5) packets sent.')
hm2DHCPServerCounterDhcpv4Naks = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Naks.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Naks.setDescription('The number of DHCPNACK (option 53 with value 6) packets sent.')
hm2DHCPServerCounterDhcpv4Releases = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Releases.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Releases.setDescription('The number of DHCPRELEASE (option 53 with value 7) packets received.')
hm2DHCPServerCounterDhcpv4Informs = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Informs.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Informs.setDescription('The number of DHCPINFORM (option 53 with value 8) packets received.')
hm2DHCPServerCounterDhcpv4ForcedRenews = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4ForcedRenews.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4ForcedRenews.setDescription('The number of DHCPFORCERENEW (option 53 with value 9) packets sent.')
hm2DHCPServerCounterDhcpv4Invalids = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Invalids.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4Invalids.setDescription('The number of DHCP packets received whose DHCP message type (i.e., option number 53) is not understood or handled by the server.')
hm2DHCPServerCounterDhcpv4DroppedUnknownClient = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4DroppedUnknownClient.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4DroppedUnknownClient.setDescription('The number of DHCP packets dropped due to the server not recognizing or not providing service to the client-id and/or hardware address received in the incoming packet.')
hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet.setDescription('The number of DHCP packets dropped due to the server not being configured or not otherwise able to serve addresses on the subnet from which this message was received.')
hm2DHCPServerCounterMiscOtherDhcpServer = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 40), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2DHCPServerCounterMiscOtherDhcpServer.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerCounterMiscOtherDhcpServer.setDescription('The number of BOOTP and DHCP packets from another DHCP server seen on this interface.')
hm2DHCPServerRowStatusInvalidConfigurationErrorReturn = ObjectIdentity((1, 3, 6, 1, 4, 1, 248, 11, 91, 3, 1))
if mibBuilder.loadTexts: hm2DHCPServerRowStatusInvalidConfigurationErrorReturn.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerRowStatusInvalidConfigurationErrorReturn.setDescription('DHCP Server pool with index {0} cannot be enabled, errors in data entry.')
hm2DHCPServerConflictDHCPRrelayErrorReturn = ObjectIdentity((1, 3, 6, 1, 4, 1, 248, 11, 91, 3, 2))
if mibBuilder.loadTexts: hm2DHCPServerConflictDHCPRrelayErrorReturn.setStatus('current')
if mibBuilder.loadTexts: hm2DHCPServerConflictDHCPRrelayErrorReturn.setDescription('{0} and {1} cannot be active at the same time.')
mibBuilder.exportSymbols("HM2-DHCPS-MIB", hm2DHCPServerPoolMacAddress=hm2DHCPServerPoolMacAddress, hm2DHCPServerLeaseAction=hm2DHCPServerLeaseAction, hm2DHCPServerIfConfigIndex=hm2DHCPServerIfConfigIndex, hm2DHCPServerMibNotifications=hm2DHCPServerMibNotifications, hm2DHCPServerConfigGroup=hm2DHCPServerConfigGroup, hm2DHCPServerCounterMiscOtherDhcpServer=hm2DHCPServerCounterMiscOtherDhcpServer, hm2DHCPServerCounterDhcpv4Discovers=hm2DHCPServerCounterDhcpv4Discovers, hm2DHCPServerLeaseStartTime=hm2DHCPServerLeaseStartTime, hm2DHCPServerPoolLeaseTime=hm2DHCPServerPoolLeaseTime, hm2DHCPServerPoolErrorStatus=hm2DHCPServerPoolErrorStatus, hm2DHCPServerCounterIfTable=hm2DHCPServerCounterIfTable, hm2DHCPServerLeaseEntry=hm2DHCPServerLeaseEntry, hm2DHCPServerCounterBootpDroppedUnknownClients=hm2DHCPServerCounterBootpDroppedUnknownClients, hm2DHCPServerPoolEntry=hm2DHCPServerPoolEntry, hm2DHCPServerIfConfigMode=hm2DHCPServerIfConfigMode, hm2DHCPServerCounterBootpReplies=hm2DHCPServerCounterBootpReplies, hm2DHCPServerLeaseGroup=hm2DHCPServerLeaseGroup, hm2DHCPServerLeaseClientId=hm2DHCPServerLeaseClientId, hm2DHCPServerCounterIfIndex=hm2DHCPServerCounterIfIndex, hm2DHCPServerCounterDhcpv4Naks=hm2DHCPServerCounterDhcpv4Naks, hm2DHCPServerPoolVlanId=hm2DHCPServerPoolVlanId, hm2DHCPServerIfConfigEntry=hm2DHCPServerIfConfigEntry, hm2DHCPServerCounterDhcpv4ForcedRenews=hm2DHCPServerCounterDhcpv4ForcedRenews, hm2DHCPServerPoolOptionWINS=hm2DHCPServerPoolOptionWINS, hm2DHCPServerInterfaceGroup=hm2DHCPServerInterfaceGroup, hm2DHCPServerPoolOptionNetmask=hm2DHCPServerPoolOptionNetmask, hm2DHCPServerCounterDhcpv4Acks=hm2DHCPServerCounterDhcpv4Acks, hm2DHCPServerCounterDhcpv4Releases=hm2DHCPServerCounterDhcpv4Releases, PYSNMP_MODULE_ID=hm2DhcpsMib, hm2DHCPServerPoolRemoteId=hm2DHCPServerPoolRemoteId, hm2DHCPServerPoolTable=hm2DHCPServerPoolTable, hm2DHCPServerSNMPExtensionGroup=hm2DHCPServerSNMPExtensionGroup, hm2DHCPServerPoolEndIpAddress=hm2DHCPServerPoolEndIpAddress, hm2DHCPServerLeaseTable=hm2DHCPServerLeaseTable, hm2DHCPServerLeaseState=hm2DHCPServerLeaseState, hm2DHCPServerMibObjects=hm2DHCPServerMibObjects, hm2DHCPServerPoolRowStatus=hm2DHCPServerPoolRowStatus, hm2DHCPServerCounterDhcpv4Declines=hm2DHCPServerCounterDhcpv4Declines, hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet=hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet, hm2DHCPServerLeasePoolIndex=hm2DHCPServerLeasePoolIndex, hm2DHCPServerPoolMethod=hm2DHCPServerPoolMethod, hm2DHCPServerIfConfigTable=hm2DHCPServerIfConfigTable, hm2DHCPServerCounterDhcpv4Requests=hm2DHCPServerCounterDhcpv4Requests, hm2DhcpsMib=hm2DhcpsMib, hm2DHCPServerRowStatusInvalidConfigurationErrorReturn=hm2DHCPServerRowStatusInvalidConfigurationErrorReturn, hm2DHCPServerCounterIfEntry=hm2DHCPServerCounterIfEntry, hm2DHCPServerCounterBootpDroppedNotServingSubnet=hm2DHCPServerCounterBootpDroppedNotServingSubnet, hm2DHCPServerMaxLeaseEntries=hm2DHCPServerMaxLeaseEntries, hm2DHCPServerLeaseIfIndex=hm2DHCPServerLeaseIfIndex, hm2DHCPServerPoolIndex=hm2DHCPServerPoolIndex, hm2DHCPServerCounterBootpInvalids=hm2DHCPServerCounterBootpInvalids, hm2DHCPServerPoolStartIpAddress=hm2DHCPServerPoolStartIpAddress, hm2DHCPServerPoolFlags=hm2DHCPServerPoolFlags, hm2DHCPServerCounterDhcpv4Informs=hm2DHCPServerCounterDhcpv4Informs, hm2DHCPServerGroup=hm2DHCPServerGroup, hm2DHCPServerCounterGroup=hm2DHCPServerCounterGroup, hm2DHCPServerPoolOptionGateway=hm2DHCPServerPoolOptionGateway, hm2DHCPServerLeaseGateway=hm2DHCPServerLeaseGateway, hm2DHCPServerPoolHirschmannClient=hm2DHCPServerPoolHirschmannClient, hm2DHCPServerLeaseClientMacAddress=hm2DHCPServerLeaseClientMacAddress, hm2DHCPServerCounterDhcpv4Invalids=hm2DHCPServerCounterDhcpv4Invalids, hm2DHCPServerMode=hm2DHCPServerMode, hm2DHCPServerPoolClientId=hm2DHCPServerPoolClientId, hm2DHCPServerLeaseIpAddress=hm2DHCPServerLeaseIpAddress, hm2DHCPServerCounterDhcpv4DroppedUnknownClient=hm2DHCPServerCounterDhcpv4DroppedUnknownClient, hm2DHCPServerPoolIfIndex=hm2DHCPServerPoolIfIndex, hm2DHCPServerLeaseRemoteId=hm2DHCPServerLeaseRemoteId, hm2DHCPServerConflictDHCPRrelayErrorReturn=hm2DHCPServerConflictDHCPRrelayErrorReturn, hm2DHCPServerLeaseTimeRemaining=hm2DHCPServerLeaseTimeRemaining, hm2DHCPServerPoolOptionDNS=hm2DHCPServerPoolOptionDNS, hm2DHCPServerCounterBootpRequests=hm2DHCPServerCounterBootpRequests, hm2DHCPServerLeaseVlanId=hm2DHCPServerLeaseVlanId, hm2DHCPServerPoolCircuitId=hm2DHCPServerPoolCircuitId, hm2DHCPServerPoolGateway=hm2DHCPServerPoolGateway, hm2DHCPServerPoolOptionHostname=hm2DHCPServerPoolOptionHostname, hm2DHCPServerLeaseCircuitId=hm2DHCPServerLeaseCircuitId, hm2DHCPServerCounterDhcpv4Offers=hm2DHCPServerCounterDhcpv4Offers, hm2DHCPServerMaxPoolEntries=hm2DHCPServerMaxPoolEntries, hm2DHCPServerPoolOptionConfFileName=hm2DHCPServerPoolOptionConfFileName)
| (octet_string, object_identifier, integer) = mibBuilder.importSymbols('ASN1', 'OctetString', 'ObjectIdentifier', 'Integer')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_union, constraints_intersection, value_size_constraint, value_range_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsUnion', 'ConstraintsIntersection', 'ValueSizeConstraint', 'ValueRangeConstraint', 'SingleValueConstraint')
(hm_enabled_status, hm2_configuration_mibs) = mibBuilder.importSymbols('HM2-TC-MIB', 'HmEnabledStatus', 'hm2ConfigurationMibs')
(interface_index,) = mibBuilder.importSymbols('IF-MIB', 'InterfaceIndex')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(counter32, bits, unsigned32, module_identity, object_identity, counter64, integer32, iso, mib_identifier, mib_scalar, mib_table, mib_table_row, mib_table_column, time_ticks, ip_address, gauge32, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'Counter32', 'Bits', 'Unsigned32', 'ModuleIdentity', 'ObjectIdentity', 'Counter64', 'Integer32', 'iso', 'MibIdentifier', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'TimeTicks', 'IpAddress', 'Gauge32', 'NotificationType')
(textual_convention, row_status, display_string, mac_address) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'RowStatus', 'DisplayString', 'MacAddress')
hm2_dhcps_mib = module_identity((1, 3, 6, 1, 4, 1, 248, 11, 91))
hm2DhcpsMib.setRevisions(('2012-03-16 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
hm2DhcpsMib.setRevisionsDescriptions(('Initial version.',))
if mibBuilder.loadTexts:
hm2DhcpsMib.setLastUpdated('201203160000Z')
if mibBuilder.loadTexts:
hm2DhcpsMib.setOrganization('Hirschmann Automation and Control GmbH')
if mibBuilder.loadTexts:
hm2DhcpsMib.setContactInfo('Postal: Stuttgarter Str. 45-51 72654 Neckartenzlingen Germany Phone: +49 7127 140 E-mail: hac.support@belden.com')
if mibBuilder.loadTexts:
hm2DhcpsMib.setDescription('Hirschmann DHCP server MIB. Copyright (C) 2012. All Rights Reserved.')
hm2_dhcp_server_mib_notifications = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 0))
hm2_dhcp_server_mib_objects = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1))
hm2_dhcp_server_snmp_extension_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 3))
hm2_dhcp_server_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1))
hm2_dhcp_server_config_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1))
hm2_dhcp_server_lease_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2))
hm2_dhcp_server_interface_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3))
hm2_dhcp_server_counter_group = mib_identifier((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4))
hm2_dhcp_server_mode = mib_scalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 1), hm_enabled_status().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hm2DHCPServerMode.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerMode.setDescription('Enable or disable DHCP server global.')
hm2_dhcp_server_max_pool_entries = mib_scalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 2), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerMaxPoolEntries.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerMaxPoolEntries.setDescription('Maximum possible entries in hm2DHCPServerPoolTable.')
hm2_dhcp_server_max_lease_entries = mib_scalar((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 3), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerMaxLeaseEntries.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerMaxLeaseEntries.setDescription('Maximum possible entries in hm2DHCPServerLeaseTable.')
hm2_dhcp_server_pool_table = mib_table((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5))
if mibBuilder.loadTexts:
hm2DHCPServerPoolTable.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolTable.setDescription('A table containing the DHCP server pools.')
hm2_dhcp_server_pool_entry = mib_table_row((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1)).setIndexNames((0, 'HM2-DHCPS-MIB', 'hm2DHCPServerPoolIndex'))
if mibBuilder.loadTexts:
hm2DHCPServerPoolEntry.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolEntry.setDescription('A logical row in the hm2DHCPServerPoolTable.')
hm2_dhcp_server_pool_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 1), unsigned32().subtype(subtypeSpec=value_range_constraint(1, 128))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerPoolIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolIndex.setDescription('The index of hm2DHCPServerPoolTable.')
hm2_dhcp_server_pool_start_ip_address = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 2), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolStartIpAddress.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolStartIpAddress.setDescription('The IPv4 address of the first address in the range. The value of hm2DHCPServerPoolStartIpAddress MUST be less than or equal to the value of hm2DHCPServerPoolEndIpAddress.')
hm2_dhcp_server_pool_end_ip_address = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 3), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolEndIpAddress.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolEndIpAddress.setDescription('The IPv4 address of the last address in the range. The value of hm2DHCPServerPoolEndIpAddress MUST be greater than or equal to the value of hm2DHCPServerPoolStartIpAddress.')
hm2_dhcp_server_pool_lease_time = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 4), unsigned32().clone(86400)).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolLeaseTime.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolLeaseTime.setDescription("The pools lease time in number of seconds. A value of 4294967295 SHOULD be used for leases that have a lease time which is 'infinite' and for BOOTP leases.")
hm2_dhcp_server_pool_flags = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 5), bits().clone(namedValues=named_values(('interface', 0), ('mac', 1), ('gateway', 2), ('clientid', 3), ('remoteid', 4), ('circuitid', 5), ('dynamic', 6), ('vlanid', 7)))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolFlags.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolFlags.setDescription('This object shows the parameters that are used to lease the IP Address.')
hm2_dhcp_server_pool_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 6), integer32()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolIfIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolIfIndex.setDescription('The index of the interface.')
hm2_dhcp_server_pool_mac_address = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 7), mac_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolMacAddress.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolMacAddress.setDescription('The MAC Address of the entry that is used to lease the IP Address.')
hm2_dhcp_server_pool_gateway = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 8), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolGateway.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolGateway.setDescription('The IPv4 address of the Gatewayinterface that is used to lease the IP Address.')
hm2_dhcp_server_pool_client_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 9), octet_string()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolClientId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolClientId.setDescription('The Client Identifier of the entry that is used to lease the IP Address.')
hm2_dhcp_server_pool_remote_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 10), octet_string()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolRemoteId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolRemoteId.setDescription('The Remote Identifier of the entry that is used to lease the IP Address. The Remote Identifier must be send in Option 82 as defined in RFC 3046.')
hm2_dhcp_server_pool_circuit_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 11), octet_string()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolCircuitId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolCircuitId.setDescription('The Cicuit Identifier of the entry that is used to lease the IP Address. The Circuit Identifier must be send in Option 82 as defined in RFC 3046.')
hm2_dhcp_server_pool_hirschmann_client = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 12), hm_enabled_status().clone('disable')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolHirschmannClient.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolHirschmannClient.setDescription('Enable or disable Hirschmann Multicast.')
hm2_dhcp_server_pool_vlan_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 13), integer32()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolVlanId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolVlanId.setDescription('The Vlan ID of the entry that is used to lease the IP Address. A value of -1 corresponds to management vlan (the default), any other value (1-4042) represents a specific VLAN')
hm2_dhcp_server_pool_option_conf_file_name = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 30), display_string().subtype(subtypeSpec=value_size_constraint(0, 70))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionConfFileName.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionConfFileName.setDescription('Full specified name of the configuration file e.g. tftp://192.9.200.1/cfg/config1.sav. An empty string zeros the SNAME and the FILE field in the DHCP header.')
hm2_dhcp_server_pool_option_gateway = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 31), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionGateway.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionGateway.setDescription('The IPv4 address of the Gateway. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2_dhcp_server_pool_option_netmask = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 32), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionNetmask.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionNetmask.setDescription('The subnet mask. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2_dhcp_server_pool_option_wins = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 33), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionWINS.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionWINS.setDescription('The IPv4 address of the WINS Server. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2_dhcp_server_pool_option_dns = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 34), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionDNS.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionDNS.setDescription('The IPv4 address of the DNS Server. A value of 0 disables the attachment of the option field in the DHCP message.')
hm2_dhcp_server_pool_option_hostname = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 35), display_string().subtype(subtypeSpec=value_size_constraint(0, 64))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionHostname.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolOptionHostname.setDescription('The name of the client (Option 12). An empty string disables the attachment of the option field in the DHCP message.')
hm2_dhcp_server_pool_method = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 36), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3))).clone(namedValues=named_values(('none', 1), ('config', 2), ('ttdp', 3))).clone('none')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hm2DHCPServerPoolMethod.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolMethod.setDescription('The source of the DHCP Server Pool. User can set the object to none(1), config(2), ttdp(3).')
hm2_dhcp_server_pool_error_status = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 99), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerPoolErrorStatus.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolErrorStatus.setDescription('The error Code by create a new Pool.')
hm2_dhcp_server_pool_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 1, 5, 1, 100), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
hm2DHCPServerPoolRowStatus.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerPoolRowStatus.setDescription('This object indicates the status of this entry.')
hm2_dhcp_server_lease_table = mib_table((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1))
if mibBuilder.loadTexts:
hm2DHCPServerLeaseTable.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseTable.setDescription('A table containing the DHCP server leases.')
hm2_dhcp_server_lease_entry = mib_table_row((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1)).setIndexNames((0, 'HM2-DHCPS-MIB', 'hm2DHCPServerLeasePoolIndex'), (0, 'HM2-DHCPS-MIB', 'hm2DHCPServerLeaseIpAddress'))
if mibBuilder.loadTexts:
hm2DHCPServerLeaseEntry.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseEntry.setDescription('A logical row in the hm2DHCPServerLeaseTable.')
hm2_dhcp_server_lease_pool_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 1), unsigned32().subtype(subtypeSpec=value_range_constraint(1, 128))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeasePoolIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeasePoolIndex.setDescription('The index of the hm2DHCPServerPoolTable above.')
hm2_dhcp_server_lease_ip_address = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 2), ip_address()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseIpAddress.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseIpAddress.setDescription('This is an IP address from the pool with index hm2DHCPServerLeasePoolIndex.')
hm2_dhcp_server_lease_state = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=named_values(('bootp', 1), ('offering', 2), ('requesting', 3), ('bound', 4), ('renewing', 5), ('rebinding', 6), ('declined', 7), ('released', 8)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseState.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseState.setDescription('The state of the lease.')
hm2_dhcp_server_lease_time_remaining = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 4), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseTimeRemaining.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseTimeRemaining.setDescription('The remaining time of the lease configured in hm2DHCPServerPoolLeaseTime.')
hm2_dhcp_server_lease_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 5), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseIfIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseIfIndex.setDescription('The interface index where the lease is currently active.')
hm2_dhcp_server_lease_client_mac_address = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 6), mac_address()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseClientMacAddress.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseClientMacAddress.setDescription('The MAC Address of the entry that has leased the IP Address.')
hm2_dhcp_server_lease_gateway = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 7), ip_address()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseGateway.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseGateway.setDescription('The IPv4 address of the Gatewayinterface that was used to lease the IP Address.')
hm2_dhcp_server_lease_client_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 8), octet_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseClientId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseClientId.setDescription('The Client Identifier of the entry that was used to lease the IP Address.')
hm2_dhcp_server_lease_remote_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 9), octet_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseRemoteId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseRemoteId.setDescription('The Remote Identifier of the entry that was used to lease the IP Address.')
hm2_dhcp_server_lease_circuit_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 10), octet_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseCircuitId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseCircuitId.setDescription('The Cicuit Identifier of the entry that was used to lease the IP Address.')
hm2_dhcp_server_lease_start_time = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 11), unsigned32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseStartTime.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseStartTime.setDescription('Lease start Time.')
hm2_dhcp_server_lease_action = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 12), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('other', 1), ('release', 2)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseAction.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseAction.setDescription('Manually release this ip address for new assignment.')
hm2_dhcp_server_lease_vlan_id = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 2, 1, 1, 13), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseVlanId.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerLeaseVlanId.setDescription('The Vlan ID of the entry that is used to lease the IP Address. A value of -1 corresponds to management vlan (the default), any other value (1-4042) represents a specific VLAN')
hm2_dhcp_server_if_config_table = mib_table((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1))
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigTable.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigTable.setDescription('A table containing current configuration information for each interface.')
hm2_dhcp_server_if_config_entry = mib_table_row((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1)).setIndexNames((0, 'HM2-DHCPS-MIB', 'hm2DHCPServerIfConfigIndex'))
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigEntry.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigEntry.setDescription('A logical row in the hm2DHCPServerIfConfigTable.')
hm2_dhcp_server_if_config_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1, 1), interface_index()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigIndex.setDescription('The index of the interface.')
hm2_dhcp_server_if_config_mode = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 3, 1, 1, 2), hm_enabled_status().clone('enable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigMode.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerIfConfigMode.setDescription('Enable or disable DHCP server on this interface.')
hm2_dhcp_server_counter_if_table = mib_table((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2))
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfTable.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfTable.setDescription('A table containing current configuration information for each interface.')
hm2_dhcp_server_counter_if_entry = mib_table_row((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1)).setIndexNames((0, 'HM2-DHCPS-MIB', 'hm2DHCPServerCounterIfIndex'))
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfEntry.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfEntry.setDescription('A logical row in the hm2DHCPServerCounterIfTable.')
hm2_dhcp_server_counter_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 1), interface_index()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfIndex.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterIfIndex.setDescription('The index of the interface.')
hm2_dhcp_server_counter_bootp_requests = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 2), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpRequests.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpRequests.setDescription('The number of packets received that contain a Message Type of 1 (BOOTREQUEST) in the first octet and do not contain option number 53 (DHCP Message Type) in the options.')
hm2_dhcp_server_counter_bootp_invalids = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 3), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpInvalids.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpInvalids.setDescription('The number of packets received that do not contain a Message Type of 1 (BOOTREQUEST) in the first octet or are not valid BOOTP packets (e.g., too short, invalid field in packet header).')
hm2_dhcp_server_counter_bootp_replies = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 4), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpReplies.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpReplies.setDescription('The number of packets sent that contain a Message Type of 2 (BOOTREPLY) in the first octet and do not contain option number 53 (DHCP Message Type) in the options.')
hm2_dhcp_server_counter_bootp_dropped_unknown_clients = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 5), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpDroppedUnknownClients.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpDroppedUnknownClients.setDescription('The number of BOOTP packets dropped due to the server not recognizing or not providing service to the hardware address received in the incoming packet.')
hm2_dhcp_server_counter_bootp_dropped_not_serving_subnet = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 6), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpDroppedNotServingSubnet.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterBootpDroppedNotServingSubnet.setDescription('The number of BOOTP packets dropped due to the server not being configured or not otherwise able to serve addresses on the subnet from which this message was received.')
hm2_dhcp_server_counter_dhcpv4_discovers = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 20), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Discovers.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Discovers.setDescription('The number of DHCPDISCOVER (option 53 with value 1) packets received.')
hm2_dhcp_server_counter_dhcpv4_offers = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 21), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Offers.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Offers.setDescription('The number of DHCPOFFER (option 53 with value 2) packets sent.')
hm2_dhcp_server_counter_dhcpv4_requests = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 22), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Requests.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Requests.setDescription('The number of DHCPREQUEST (option 53 with value 3) packets received.')
hm2_dhcp_server_counter_dhcpv4_declines = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 23), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Declines.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Declines.setDescription('The number of DHCPDECLINE (option 53 with value 4) packets received.')
hm2_dhcp_server_counter_dhcpv4_acks = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 24), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Acks.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Acks.setDescription('The number of DHCPACK (option 53 with value 5) packets sent.')
hm2_dhcp_server_counter_dhcpv4_naks = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 25), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Naks.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Naks.setDescription('The number of DHCPNACK (option 53 with value 6) packets sent.')
hm2_dhcp_server_counter_dhcpv4_releases = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 26), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Releases.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Releases.setDescription('The number of DHCPRELEASE (option 53 with value 7) packets received.')
hm2_dhcp_server_counter_dhcpv4_informs = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 27), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Informs.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Informs.setDescription('The number of DHCPINFORM (option 53 with value 8) packets received.')
hm2_dhcp_server_counter_dhcpv4_forced_renews = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 28), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4ForcedRenews.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4ForcedRenews.setDescription('The number of DHCPFORCERENEW (option 53 with value 9) packets sent.')
hm2_dhcp_server_counter_dhcpv4_invalids = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 29), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Invalids.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4Invalids.setDescription('The number of DHCP packets received whose DHCP message type (i.e., option number 53) is not understood or handled by the server.')
hm2_dhcp_server_counter_dhcpv4_dropped_unknown_client = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 30), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4DroppedUnknownClient.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4DroppedUnknownClient.setDescription('The number of DHCP packets dropped due to the server not recognizing or not providing service to the client-id and/or hardware address received in the incoming packet.')
hm2_dhcp_server_counter_dhcpv4_dropped_not_serving_subnet = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 31), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet.setDescription('The number of DHCP packets dropped due to the server not being configured or not otherwise able to serve addresses on the subnet from which this message was received.')
hm2_dhcp_server_counter_misc_other_dhcp_server = mib_table_column((1, 3, 6, 1, 4, 1, 248, 11, 91, 1, 1, 4, 2, 1, 40), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
hm2DHCPServerCounterMiscOtherDhcpServer.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerCounterMiscOtherDhcpServer.setDescription('The number of BOOTP and DHCP packets from another DHCP server seen on this interface.')
hm2_dhcp_server_row_status_invalid_configuration_error_return = object_identity((1, 3, 6, 1, 4, 1, 248, 11, 91, 3, 1))
if mibBuilder.loadTexts:
hm2DHCPServerRowStatusInvalidConfigurationErrorReturn.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerRowStatusInvalidConfigurationErrorReturn.setDescription('DHCP Server pool with index {0} cannot be enabled, errors in data entry.')
hm2_dhcp_server_conflict_dhcp_rrelay_error_return = object_identity((1, 3, 6, 1, 4, 1, 248, 11, 91, 3, 2))
if mibBuilder.loadTexts:
hm2DHCPServerConflictDHCPRrelayErrorReturn.setStatus('current')
if mibBuilder.loadTexts:
hm2DHCPServerConflictDHCPRrelayErrorReturn.setDescription('{0} and {1} cannot be active at the same time.')
mibBuilder.exportSymbols('HM2-DHCPS-MIB', hm2DHCPServerPoolMacAddress=hm2DHCPServerPoolMacAddress, hm2DHCPServerLeaseAction=hm2DHCPServerLeaseAction, hm2DHCPServerIfConfigIndex=hm2DHCPServerIfConfigIndex, hm2DHCPServerMibNotifications=hm2DHCPServerMibNotifications, hm2DHCPServerConfigGroup=hm2DHCPServerConfigGroup, hm2DHCPServerCounterMiscOtherDhcpServer=hm2DHCPServerCounterMiscOtherDhcpServer, hm2DHCPServerCounterDhcpv4Discovers=hm2DHCPServerCounterDhcpv4Discovers, hm2DHCPServerLeaseStartTime=hm2DHCPServerLeaseStartTime, hm2DHCPServerPoolLeaseTime=hm2DHCPServerPoolLeaseTime, hm2DHCPServerPoolErrorStatus=hm2DHCPServerPoolErrorStatus, hm2DHCPServerCounterIfTable=hm2DHCPServerCounterIfTable, hm2DHCPServerLeaseEntry=hm2DHCPServerLeaseEntry, hm2DHCPServerCounterBootpDroppedUnknownClients=hm2DHCPServerCounterBootpDroppedUnknownClients, hm2DHCPServerPoolEntry=hm2DHCPServerPoolEntry, hm2DHCPServerIfConfigMode=hm2DHCPServerIfConfigMode, hm2DHCPServerCounterBootpReplies=hm2DHCPServerCounterBootpReplies, hm2DHCPServerLeaseGroup=hm2DHCPServerLeaseGroup, hm2DHCPServerLeaseClientId=hm2DHCPServerLeaseClientId, hm2DHCPServerCounterIfIndex=hm2DHCPServerCounterIfIndex, hm2DHCPServerCounterDhcpv4Naks=hm2DHCPServerCounterDhcpv4Naks, hm2DHCPServerPoolVlanId=hm2DHCPServerPoolVlanId, hm2DHCPServerIfConfigEntry=hm2DHCPServerIfConfigEntry, hm2DHCPServerCounterDhcpv4ForcedRenews=hm2DHCPServerCounterDhcpv4ForcedRenews, hm2DHCPServerPoolOptionWINS=hm2DHCPServerPoolOptionWINS, hm2DHCPServerInterfaceGroup=hm2DHCPServerInterfaceGroup, hm2DHCPServerPoolOptionNetmask=hm2DHCPServerPoolOptionNetmask, hm2DHCPServerCounterDhcpv4Acks=hm2DHCPServerCounterDhcpv4Acks, hm2DHCPServerCounterDhcpv4Releases=hm2DHCPServerCounterDhcpv4Releases, PYSNMP_MODULE_ID=hm2DhcpsMib, hm2DHCPServerPoolRemoteId=hm2DHCPServerPoolRemoteId, hm2DHCPServerPoolTable=hm2DHCPServerPoolTable, hm2DHCPServerSNMPExtensionGroup=hm2DHCPServerSNMPExtensionGroup, hm2DHCPServerPoolEndIpAddress=hm2DHCPServerPoolEndIpAddress, hm2DHCPServerLeaseTable=hm2DHCPServerLeaseTable, hm2DHCPServerLeaseState=hm2DHCPServerLeaseState, hm2DHCPServerMibObjects=hm2DHCPServerMibObjects, hm2DHCPServerPoolRowStatus=hm2DHCPServerPoolRowStatus, hm2DHCPServerCounterDhcpv4Declines=hm2DHCPServerCounterDhcpv4Declines, hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet=hm2DHCPServerCounterDhcpv4DroppedNotServingSubnet, hm2DHCPServerLeasePoolIndex=hm2DHCPServerLeasePoolIndex, hm2DHCPServerPoolMethod=hm2DHCPServerPoolMethod, hm2DHCPServerIfConfigTable=hm2DHCPServerIfConfigTable, hm2DHCPServerCounterDhcpv4Requests=hm2DHCPServerCounterDhcpv4Requests, hm2DhcpsMib=hm2DhcpsMib, hm2DHCPServerRowStatusInvalidConfigurationErrorReturn=hm2DHCPServerRowStatusInvalidConfigurationErrorReturn, hm2DHCPServerCounterIfEntry=hm2DHCPServerCounterIfEntry, hm2DHCPServerCounterBootpDroppedNotServingSubnet=hm2DHCPServerCounterBootpDroppedNotServingSubnet, hm2DHCPServerMaxLeaseEntries=hm2DHCPServerMaxLeaseEntries, hm2DHCPServerLeaseIfIndex=hm2DHCPServerLeaseIfIndex, hm2DHCPServerPoolIndex=hm2DHCPServerPoolIndex, hm2DHCPServerCounterBootpInvalids=hm2DHCPServerCounterBootpInvalids, hm2DHCPServerPoolStartIpAddress=hm2DHCPServerPoolStartIpAddress, hm2DHCPServerPoolFlags=hm2DHCPServerPoolFlags, hm2DHCPServerCounterDhcpv4Informs=hm2DHCPServerCounterDhcpv4Informs, hm2DHCPServerGroup=hm2DHCPServerGroup, hm2DHCPServerCounterGroup=hm2DHCPServerCounterGroup, hm2DHCPServerPoolOptionGateway=hm2DHCPServerPoolOptionGateway, hm2DHCPServerLeaseGateway=hm2DHCPServerLeaseGateway, hm2DHCPServerPoolHirschmannClient=hm2DHCPServerPoolHirschmannClient, hm2DHCPServerLeaseClientMacAddress=hm2DHCPServerLeaseClientMacAddress, hm2DHCPServerCounterDhcpv4Invalids=hm2DHCPServerCounterDhcpv4Invalids, hm2DHCPServerMode=hm2DHCPServerMode, hm2DHCPServerPoolClientId=hm2DHCPServerPoolClientId, hm2DHCPServerLeaseIpAddress=hm2DHCPServerLeaseIpAddress, hm2DHCPServerCounterDhcpv4DroppedUnknownClient=hm2DHCPServerCounterDhcpv4DroppedUnknownClient, hm2DHCPServerPoolIfIndex=hm2DHCPServerPoolIfIndex, hm2DHCPServerLeaseRemoteId=hm2DHCPServerLeaseRemoteId, hm2DHCPServerConflictDHCPRrelayErrorReturn=hm2DHCPServerConflictDHCPRrelayErrorReturn, hm2DHCPServerLeaseTimeRemaining=hm2DHCPServerLeaseTimeRemaining, hm2DHCPServerPoolOptionDNS=hm2DHCPServerPoolOptionDNS, hm2DHCPServerCounterBootpRequests=hm2DHCPServerCounterBootpRequests, hm2DHCPServerLeaseVlanId=hm2DHCPServerLeaseVlanId, hm2DHCPServerPoolCircuitId=hm2DHCPServerPoolCircuitId, hm2DHCPServerPoolGateway=hm2DHCPServerPoolGateway, hm2DHCPServerPoolOptionHostname=hm2DHCPServerPoolOptionHostname, hm2DHCPServerLeaseCircuitId=hm2DHCPServerLeaseCircuitId, hm2DHCPServerCounterDhcpv4Offers=hm2DHCPServerCounterDhcpv4Offers, hm2DHCPServerMaxPoolEntries=hm2DHCPServerMaxPoolEntries, hm2DHCPServerPoolOptionConfFileName=hm2DHCPServerPoolOptionConfFileName) |
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(root, value):
"""
For a binary search tree
"""
if root is None:
return Node(value)
node = root
parent = None
new = Node(value)
while True:
parent = node
if value < parent.data:
node = parent.left
if node is None:
parent.left = new
break
else:
node = parent.right
if node is None:
parent.right = new
break
def search(node, value):
if node is None:
return None
while node is not None:
if value == node.data:
return node
if value < node.data:
node = node.left
else:
node = node.right
return None
def deepest_level(node):
if node is None:
return -1
left_level = deepest_level(node.left) + 1
right_level = deepest_level(node.right) + 1
if left_level > right_level:
return left_level
else:
return right_level
def deepest_level2(node):
if node is None:
return 0
left_level = deepest_level(node.left)
right_level = deepest_level(node.right)
return max(left_level, right_level) + 1
def inOrderTraversal(node, level = 0):
if node is not None:
inOrderTraversal(node.left, level + 1)
visit(node, level)
inOrderTraversal(node.right, level + 1)
def preOrderTraversal(node, level = 0):
if node is not None:
visit(node, level)
preOrderTraversal(node.left, level + 1)
preOrderTraversal(node.right, level + 1)
def postOrderTraversal(node, level = 0):
if node is not None:
postOrderTraversal(node.left, level + 1)
postOrderTraversal(node.right, level + 1)
visit(node, level)
def visit(node, level, left = True):
print(' ' * level, node.data)
if __name__ == "__main__":
root = Node(10)
root.left = Node(5)
root.right = Node(15)
root.left.left = Node(2)
root.right.left = Node(12)
root.right.right = Node(18)
inOrderTraversal(root)
print('-' * 50)
preOrderTraversal(root)
print('-' * 50)
postOrderTraversal(root)
print('-' * 50)
bst = insert(None, 10)
inOrderTraversal(bst)
print('.' * 50)
insert(bst, 5)
inOrderTraversal(bst)
print('.' * 50)
insert(bst, 15)
inOrderTraversal(bst)
print('.' * 50)
insert(bst, 3)
insert(bst, 8)
inOrderTraversal(bst)
print('.' * 50)
print('-' * 50, "Search")
print('Should be none: ', search(None, 10))
print('Should be none: ', search(bst, 10).data)
print('Should be none: ', search(bst, 5).data)
insert(bst, 30)
insert(bst, 50)
inOrderTraversal(bst)
print('Should be none: ', search(bst, 30).data)
print('-' * 50, "Deepest level")
print('Level: ', deepest_level(None))
print('Level: ', deepest_level(Node(1)))
print('Level: ', deepest_level(root))
print('Level: ', deepest_level(bst)) | class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def insert(root, value):
"""
For a binary search tree
"""
if root is None:
return node(value)
node = root
parent = None
new = node(value)
while True:
parent = node
if value < parent.data:
node = parent.left
if node is None:
parent.left = new
break
else:
node = parent.right
if node is None:
parent.right = new
break
def search(node, value):
if node is None:
return None
while node is not None:
if value == node.data:
return node
if value < node.data:
node = node.left
else:
node = node.right
return None
def deepest_level(node):
if node is None:
return -1
left_level = deepest_level(node.left) + 1
right_level = deepest_level(node.right) + 1
if left_level > right_level:
return left_level
else:
return right_level
def deepest_level2(node):
if node is None:
return 0
left_level = deepest_level(node.left)
right_level = deepest_level(node.right)
return max(left_level, right_level) + 1
def in_order_traversal(node, level=0):
if node is not None:
in_order_traversal(node.left, level + 1)
visit(node, level)
in_order_traversal(node.right, level + 1)
def pre_order_traversal(node, level=0):
if node is not None:
visit(node, level)
pre_order_traversal(node.left, level + 1)
pre_order_traversal(node.right, level + 1)
def post_order_traversal(node, level=0):
if node is not None:
post_order_traversal(node.left, level + 1)
post_order_traversal(node.right, level + 1)
visit(node, level)
def visit(node, level, left=True):
print(' ' * level, node.data)
if __name__ == '__main__':
root = node(10)
root.left = node(5)
root.right = node(15)
root.left.left = node(2)
root.right.left = node(12)
root.right.right = node(18)
in_order_traversal(root)
print('-' * 50)
pre_order_traversal(root)
print('-' * 50)
post_order_traversal(root)
print('-' * 50)
bst = insert(None, 10)
in_order_traversal(bst)
print('.' * 50)
insert(bst, 5)
in_order_traversal(bst)
print('.' * 50)
insert(bst, 15)
in_order_traversal(bst)
print('.' * 50)
insert(bst, 3)
insert(bst, 8)
in_order_traversal(bst)
print('.' * 50)
print('-' * 50, 'Search')
print('Should be none: ', search(None, 10))
print('Should be none: ', search(bst, 10).data)
print('Should be none: ', search(bst, 5).data)
insert(bst, 30)
insert(bst, 50)
in_order_traversal(bst)
print('Should be none: ', search(bst, 30).data)
print('-' * 50, 'Deepest level')
print('Level: ', deepest_level(None))
print('Level: ', deepest_level(node(1)))
print('Level: ', deepest_level(root))
print('Level: ', deepest_level(bst)) |
table_pkeys_map = {
"account": [
"account_id"
],
"account_assignd_cert": [
"account_id",
"x509_cert_id",
"x509_key_usg"
],
"account_auth_log": [
"account_id",
"account_auth_ts",
"auth_resource",
"account_auth_seq"
],
"account_coll_type_relation": [
"account_collection_type",
"account_collection_relation"
],
"account_collection": [
"account_collection_id"
],
"account_collection_account": [
"account_id",
"account_collection_id"
],
"account_collection_hier": [
"account_collection_id",
"child_account_collection_id"
],
"account_password": [
"account_id",
"account_realm_id",
"password_type"
],
"account_realm": [
"account_realm_id"
],
"account_realm_acct_coll_type": [
"account_realm_id",
"account_collection_type"
],
"account_realm_company": [
"account_realm_id",
"company_id"
],
"account_realm_password_type": [
"password_type",
"account_realm_id"
],
"account_ssh_key": [
"account_id",
"ssh_key_id"
],
"account_token": [
"account_token_id"
],
"account_unix_info": [
"account_id"
],
"appaal": [
"appaal_id"
],
"appaal_instance": [
"appaal_instance_id"
],
"appaal_instance_device_coll": [
"appaal_instance_id",
"device_collection_id"
],
"appaal_instance_property": [
"app_key",
"appaal_group_name",
"appaal_group_rank",
"appaal_instance_id"
],
"approval_instance": [
"approval_instance_id"
],
"approval_instance_item": [
"approval_instance_item_id"
],
"approval_instance_link": [
"approval_instance_link_id"
],
"approval_instance_step": [
"approval_instance_step_id"
],
"approval_instance_step_notify": [
"approv_instance_step_notify_id"
],
"approval_process": [
"approval_process_id"
],
"approval_process_chain": [
"approval_process_chain_id"
],
"asset": [
"asset_id"
],
"badge": [
"card_number"
],
"badge_type": [
"badge_type_id"
],
"certificate_signing_request": [
"certificate_signing_request_id"
],
"chassis_location": [
"chassis_location_id"
],
"circuit": [
"circuit_id"
],
"company": [
"company_id"
],
"company_collection": [
"company_collection_id"
],
"company_collection_company": [
"company_id",
"company_collection_id"
],
"company_collection_hier": [
"child_company_collection_id",
"company_collection_id"
],
"company_type": [
"company_type",
"company_id"
],
"component": [
"component_id"
],
"component_property": [
"component_property_id"
],
"component_type": [
"component_type_id"
],
"component_type_component_func": [
"component_function",
"component_type_id"
],
"component_type_slot_tmplt": [
"component_type_slot_tmplt_id"
],
"contract": [
"contract_id"
],
"contract_type": [
"contract_id",
"contract_type"
],
"department": [
"account_collection_id"
],
"device": [
"device_id"
],
"device_collection": [
"device_collection_id"
],
"device_collection_assignd_cert": [
"device_collection_id",
"x509_cert_id",
"x509_key_usg"
],
"device_collection_device": [
"device_id",
"device_collection_id"
],
"device_collection_hier": [
"device_collection_id",
"child_device_collection_id"
],
"device_collection_ssh_key": [
"device_collection_id",
"ssh_key_id",
"account_collection_id"
],
"device_encapsulation_domain": [
"device_id",
"encapsulation_type"
],
"device_layer2_network": [
"layer2_network_id",
"device_id"
],
"device_management_controller": [
"manager_device_id",
"device_id"
],
"device_note": [
"note_id"
],
"device_ssh_key": [
"ssh_key_id",
"device_id"
],
"device_ticket": [
"ticketing_system_id",
"device_id",
"ticket_number"
],
"device_type": [
"device_type_id"
],
"device_type_module": [
"device_type_module_name",
"device_type_id"
],
"device_type_module_device_type": [
"device_type_module_name",
"device_type_id",
"module_device_type_id"
],
"dns_change_record": [
"dns_change_record_id"
],
"dns_domain": [
"dns_domain_id"
],
"dns_domain_collection": [
"dns_domain_collection_id"
],
"dns_domain_collection_dns_dom": [
"dns_domain_collection_id",
"dns_domain_id"
],
"dns_domain_collection_hier": [
"child_dns_domain_collection_id",
"dns_domain_collection_id"
],
"dns_domain_ip_universe": [
"dns_domain_id",
"ip_universe_id"
],
"dns_record": [
"dns_record_id"
],
"dns_record_relation": [
"dns_record_id",
"related_dns_record_id",
"dns_record_relation_type"
],
"encapsulation_domain": [
"encapsulation_type",
"encapsulation_domain"
],
"encapsulation_range": [
"encapsulation_range_id"
],
"encryption_key": [
"encryption_key_id"
],
"inter_component_connection": [
"inter_component_connection_id"
],
"ip_universe": [
"ip_universe_id"
],
"ip_universe_visibility": [
"visible_ip_universe_id",
"ip_universe_id"
],
"kerberos_realm": [
"krb_realm_id"
],
"klogin": [
"klogin_id"
],
"klogin_mclass": [
"device_collection_id",
"klogin_id"
],
"l2_network_coll_l2_network": [
"layer2_network_collection_id",
"layer2_network_id"
],
"l3_network_coll_l3_network": [
"layer3_network_id",
"layer3_network_collection_id"
],
"layer2_connection": [
"layer2_connection_id"
],
"layer2_connection_l2_network": [
"layer2_connection_id",
"layer2_network_id"
],
"layer2_network": [
"layer2_network_id"
],
"layer2_network_collection": [
"layer2_network_collection_id"
],
"layer2_network_collection_hier": [
"child_l2_network_coll_id",
"layer2_network_collection_id"
],
"layer3_network": [
"layer3_network_id"
],
"layer3_network_collection": [
"layer3_network_collection_id"
],
"layer3_network_collection_hier": [
"layer3_network_collection_id",
"child_l3_network_coll_id"
],
"logical_port": [
"logical_port_id"
],
"logical_port_slot": [
"logical_port_id",
"slot_id"
],
"logical_volume": [
"logical_volume_id"
],
"logical_volume_property": [
"logical_volume_property_id"
],
"logical_volume_purpose": [
"logical_volume_purpose",
"logical_volume_id"
],
"mlag_peering": [
"mlag_peering_id"
],
"netblock": [
"netblock_id"
],
"netblock_collection": [
"netblock_collection_id"
],
"netblock_collection_hier": [
"netblock_collection_id",
"child_netblock_collection_id"
],
"netblock_collection_netblock": [
"netblock_collection_id",
"netblock_id"
],
"network_interface": [
"network_interface_id"
],
"network_interface_netblock": [
"network_interface_id",
"device_id",
"netblock_id"
],
"network_interface_purpose": [
"network_interface_purpose",
"device_id"
],
"network_range": [
"network_range_id"
],
"network_service": [
"network_service_id"
],
"operating_system": [
"operating_system_id"
],
"operating_system_snapshot": [
"operating_system_snapshot_id"
],
"person": [
"person_id"
],
"person_account_realm_company": [
"account_realm_id",
"person_id",
"company_id"
],
"person_auth_question": [
"person_id",
"auth_question_id"
],
"person_company": [
"company_id",
"person_id"
],
"person_company_attr": [
"person_company_attr_name",
"company_id",
"person_id"
],
"person_company_badge": [
"company_id",
"person_id",
"badge_id"
],
"person_contact": [
"person_contact_id"
],
"person_image": [
"person_image_id"
],
"person_image_usage": [
"person_image_usage",
"person_image_id"
],
"person_location": [
"person_location_id"
],
"person_note": [
"note_id"
],
"person_parking_pass": [
"person_id",
"person_parking_pass_id"
],
"person_vehicle": [
"person_vehicle_id"
],
"physical_address": [
"physical_address_id"
],
"physical_connection": [
"physical_connection_id"
],
"physicalish_volume": [
"physicalish_volume_id"
],
"private_key": [
"private_key_id"
],
"property": [
"property_id"
],
"property_collection": [
"property_collection_id"
],
"property_collection_hier": [
"property_collection_id",
"child_property_collection_id"
],
"property_collection_property": [
"property_collection_id",
"property_type",
"property_name"
],
"pseudo_klogin": [
"pseudo_klogin_id"
],
"rack": [
"rack_id"
],
"rack_location": [
"rack_location_id"
],
"service_environment": [
"service_environment_id"
],
"service_environment_coll_hier": [
"child_service_env_coll_id",
"service_env_collection_id"
],
"service_environment_collection": [
"service_env_collection_id"
],
"shared_netblock": [
"shared_netblock_id"
],
"shared_netblock_network_int": [
"shared_netblock_id",
"network_interface_id"
],
"site": [
"site_code"
],
"slot": [
"slot_id"
],
"slot_type": [
"slot_type_id"
],
"slot_type_prmt_comp_slot_type": [
"slot_type_id",
"component_slot_type_id"
],
"slot_type_prmt_rem_slot_type": [
"slot_type_id",
"remote_slot_type_id"
],
"snmp_commstr": [
"snmp_commstr_id"
],
"ssh_key": [
"ssh_key_id"
],
"static_route": [
"static_route_id"
],
"static_route_template": [
"static_route_template_id"
],
"sudo_acct_col_device_collectio": [
"sudo_alias_name",
"account_collection_id",
"device_collection_id"
],
"sudo_alias": [
"sudo_alias_name"
],
"svc_environment_coll_svc_env": [
"service_environment_id",
"service_env_collection_id"
],
"sw_package": [
"sw_package_id"
],
"ticketing_system": [
"ticketing_system_id"
],
"token": [
"token_id"
],
"token_collection": [
"token_collection_id"
],
"token_collection_hier": [
"token_collection_id",
"child_token_collection_id"
],
"token_collection_token": [
"token_collection_id",
"token_id"
],
"token_sequence": [
"token_id"
],
"unix_group": [
"account_collection_id"
],
"val_account_collection_relatio": [
"account_collection_relation"
],
"val_account_collection_type": [
"account_collection_type"
],
"val_account_role": [
"account_role"
],
"val_account_type": [
"account_type"
],
"val_app_key": [
"appaal_group_name",
"app_key"
],
"val_app_key_values": [
"app_value",
"app_key",
"appaal_group_name"
],
"val_appaal_group_name": [
"appaal_group_name"
],
"val_approval_chain_resp_prd": [
"approval_chain_response_period"
],
"val_approval_expiration_action": [
"approval_expiration_action"
],
"val_approval_notifty_type": [
"approval_notify_type"
],
"val_approval_process_type": [
"approval_process_type"
],
"val_approval_type": [
"approval_type"
],
"val_attestation_frequency": [
"attestation_frequency"
],
"val_auth_question": [
"auth_question_id"
],
"val_auth_resource": [
"auth_resource"
],
"val_badge_status": [
"badge_status"
],
"val_cable_type": [
"cable_type"
],
"val_company_collection_type": [
"company_collection_type"
],
"val_company_type": [
"company_type"
],
"val_company_type_purpose": [
"company_type_purpose"
],
"val_component_function": [
"component_function"
],
"val_component_property": [
"component_property_name",
"component_property_type"
],
"val_component_property_type": [
"component_property_type"
],
"val_component_property_value": [
"component_property_name",
"valid_property_value",
"component_property_type"
],
"val_contract_type": [
"contract_type"
],
"val_country_code": [
"iso_country_code"
],
"val_device_auto_mgmt_protocol": [
"auto_mgmt_protocol"
],
"val_device_collection_type": [
"device_collection_type"
],
"val_device_mgmt_ctrl_type": [
"device_mgmt_control_type"
],
"val_device_status": [
"device_status"
],
"val_diet": [
"diet"
],
"val_dns_class": [
"dns_class"
],
"val_dns_domain_collection_type": [
"dns_domain_collection_type"
],
"val_dns_domain_type": [
"dns_domain_type"
],
"val_dns_record_relation_type": [
"dns_record_relation_type"
],
"val_dns_srv_service": [
"dns_srv_service"
],
"val_dns_type": [
"dns_type"
],
"val_encapsulation_mode": [
"encapsulation_type",
"encapsulation_mode"
],
"val_encapsulation_type": [
"encapsulation_type"
],
"val_encryption_key_purpose": [
"encryption_key_purpose",
"encryption_key_purpose_version"
],
"val_encryption_method": [
"encryption_method"
],
"val_filesystem_type": [
"filesystem_type"
],
"val_image_type": [
"image_type"
],
"val_ip_namespace": [
"ip_namespace"
],
"val_iso_currency_code": [
"iso_currency_code"
],
"val_key_usg_reason_for_assgn": [
"key_usage_reason_for_assign"
],
"val_layer2_network_coll_type": [
"layer2_network_collection_type"
],
"val_layer3_network_coll_type": [
"layer3_network_collection_type"
],
"val_logical_port_type": [
"logical_port_type"
],
"val_logical_volume_property": [
"filesystem_type",
"logical_volume_property_name"
],
"val_logical_volume_purpose": [
"logical_volume_purpose"
],
"val_logical_volume_type": [
"logical_volume_type"
],
"val_netblock_collection_type": [
"netblock_collection_type"
],
"val_netblock_status": [
"netblock_status"
],
"val_netblock_type": [
"netblock_type"
],
"val_network_interface_purpose": [
"network_interface_purpose"
],
"val_network_interface_type": [
"network_interface_type"
],
"val_network_range_type": [
"network_range_type"
],
"val_network_service_type": [
"network_service_type"
],
"val_operating_system_family": [
"operating_system_family"
],
"val_os_snapshot_type": [
"operating_system_snapshot_type"
],
"val_ownership_status": [
"ownership_status"
],
"val_package_relation_type": [
"package_relation_type"
],
"val_password_type": [
"password_type"
],
"val_person_company_attr_dtype": [
"person_company_attr_data_type"
],
"val_person_company_attr_name": [
"person_company_attr_name"
],
"val_person_company_attr_value": [
"person_company_attr_value",
"person_company_attr_name"
],
"val_person_company_relation": [
"person_company_relation"
],
"val_person_contact_loc_type": [
"person_contact_location_type"
],
"val_person_contact_technology": [
"person_contact_technology",
"person_contact_type"
],
"val_person_contact_type": [
"person_contact_type"
],
"val_person_image_usage": [
"person_image_usage"
],
"val_person_location_type": [
"person_location_type"
],
"val_person_status": [
"person_status"
],
"val_physical_address_type": [
"physical_address_type"
],
"val_physicalish_volume_type": [
"physicalish_volume_type"
],
"val_processor_architecture": [
"processor_architecture"
],
"val_production_state": [
"production_state"
],
"val_property": [
"property_type",
"property_name"
],
"val_property_collection_type": [
"property_collection_type"
],
"val_property_data_type": [
"property_data_type"
],
"val_property_type": [
"property_type"
],
"val_property_value": [
"valid_property_value",
"property_name",
"property_type"
],
"val_pvt_key_encryption_type": [
"private_key_encryption_type"
],
"val_rack_type": [
"rack_type"
],
"val_raid_type": [
"raid_type"
],
"val_service_env_coll_type": [
"service_env_collection_type"
],
"val_shared_netblock_protocol": [
"shared_netblock_protocol"
],
"val_slot_function": [
"slot_function"
],
"val_slot_physical_interface": [
"slot_function",
"slot_physical_interface_type"
],
"val_snmp_commstr_type": [
"snmp_commstr_type"
],
"val_ssh_key_type": [
"ssh_key_type"
],
"val_sw_package_type": [
"sw_package_type"
],
"val_token_collection_type": [
"token_collection_type"
],
"val_token_status": [
"token_status"
],
"val_token_type": [
"token_type"
],
"val_volume_group_purpose": [
"volume_group_purpose"
],
"val_volume_group_relation": [
"volume_group_relation"
],
"val_volume_group_type": [
"volume_group_type"
],
"val_x509_certificate_file_fmt": [
"x509_file_format"
],
"val_x509_certificate_type": [
"x509_certificate_type"
],
"val_x509_key_usage": [
"x509_key_usg"
],
"val_x509_key_usage_category": [
"x509_key_usg_cat"
],
"val_x509_revocation_reason": [
"x509_revocation_reason"
],
"volume_group": [
"volume_group_id"
],
"volume_group_physicalish_vol": [
"volume_group_id",
"physicalish_volume_id"
],
"volume_group_purpose": [
"volume_group_id",
"volume_group_purpose"
],
"x509_key_usage_attribute": [
"x509_cert_id",
"x509_key_usg"
],
"x509_key_usage_categorization": [
"x509_key_usg_cat",
"x509_key_usg"
],
"x509_key_usage_default": [
"x509_key_usg",
"x509_signed_certificate_id"
],
"x509_signed_certificate": [
"x509_signed_certificate_id"
]
} | table_pkeys_map = {'account': ['account_id'], 'account_assignd_cert': ['account_id', 'x509_cert_id', 'x509_key_usg'], 'account_auth_log': ['account_id', 'account_auth_ts', 'auth_resource', 'account_auth_seq'], 'account_coll_type_relation': ['account_collection_type', 'account_collection_relation'], 'account_collection': ['account_collection_id'], 'account_collection_account': ['account_id', 'account_collection_id'], 'account_collection_hier': ['account_collection_id', 'child_account_collection_id'], 'account_password': ['account_id', 'account_realm_id', 'password_type'], 'account_realm': ['account_realm_id'], 'account_realm_acct_coll_type': ['account_realm_id', 'account_collection_type'], 'account_realm_company': ['account_realm_id', 'company_id'], 'account_realm_password_type': ['password_type', 'account_realm_id'], 'account_ssh_key': ['account_id', 'ssh_key_id'], 'account_token': ['account_token_id'], 'account_unix_info': ['account_id'], 'appaal': ['appaal_id'], 'appaal_instance': ['appaal_instance_id'], 'appaal_instance_device_coll': ['appaal_instance_id', 'device_collection_id'], 'appaal_instance_property': ['app_key', 'appaal_group_name', 'appaal_group_rank', 'appaal_instance_id'], 'approval_instance': ['approval_instance_id'], 'approval_instance_item': ['approval_instance_item_id'], 'approval_instance_link': ['approval_instance_link_id'], 'approval_instance_step': ['approval_instance_step_id'], 'approval_instance_step_notify': ['approv_instance_step_notify_id'], 'approval_process': ['approval_process_id'], 'approval_process_chain': ['approval_process_chain_id'], 'asset': ['asset_id'], 'badge': ['card_number'], 'badge_type': ['badge_type_id'], 'certificate_signing_request': ['certificate_signing_request_id'], 'chassis_location': ['chassis_location_id'], 'circuit': ['circuit_id'], 'company': ['company_id'], 'company_collection': ['company_collection_id'], 'company_collection_company': ['company_id', 'company_collection_id'], 'company_collection_hier': ['child_company_collection_id', 'company_collection_id'], 'company_type': ['company_type', 'company_id'], 'component': ['component_id'], 'component_property': ['component_property_id'], 'component_type': ['component_type_id'], 'component_type_component_func': ['component_function', 'component_type_id'], 'component_type_slot_tmplt': ['component_type_slot_tmplt_id'], 'contract': ['contract_id'], 'contract_type': ['contract_id', 'contract_type'], 'department': ['account_collection_id'], 'device': ['device_id'], 'device_collection': ['device_collection_id'], 'device_collection_assignd_cert': ['device_collection_id', 'x509_cert_id', 'x509_key_usg'], 'device_collection_device': ['device_id', 'device_collection_id'], 'device_collection_hier': ['device_collection_id', 'child_device_collection_id'], 'device_collection_ssh_key': ['device_collection_id', 'ssh_key_id', 'account_collection_id'], 'device_encapsulation_domain': ['device_id', 'encapsulation_type'], 'device_layer2_network': ['layer2_network_id', 'device_id'], 'device_management_controller': ['manager_device_id', 'device_id'], 'device_note': ['note_id'], 'device_ssh_key': ['ssh_key_id', 'device_id'], 'device_ticket': ['ticketing_system_id', 'device_id', 'ticket_number'], 'device_type': ['device_type_id'], 'device_type_module': ['device_type_module_name', 'device_type_id'], 'device_type_module_device_type': ['device_type_module_name', 'device_type_id', 'module_device_type_id'], 'dns_change_record': ['dns_change_record_id'], 'dns_domain': ['dns_domain_id'], 'dns_domain_collection': ['dns_domain_collection_id'], 'dns_domain_collection_dns_dom': ['dns_domain_collection_id', 'dns_domain_id'], 'dns_domain_collection_hier': ['child_dns_domain_collection_id', 'dns_domain_collection_id'], 'dns_domain_ip_universe': ['dns_domain_id', 'ip_universe_id'], 'dns_record': ['dns_record_id'], 'dns_record_relation': ['dns_record_id', 'related_dns_record_id', 'dns_record_relation_type'], 'encapsulation_domain': ['encapsulation_type', 'encapsulation_domain'], 'encapsulation_range': ['encapsulation_range_id'], 'encryption_key': ['encryption_key_id'], 'inter_component_connection': ['inter_component_connection_id'], 'ip_universe': ['ip_universe_id'], 'ip_universe_visibility': ['visible_ip_universe_id', 'ip_universe_id'], 'kerberos_realm': ['krb_realm_id'], 'klogin': ['klogin_id'], 'klogin_mclass': ['device_collection_id', 'klogin_id'], 'l2_network_coll_l2_network': ['layer2_network_collection_id', 'layer2_network_id'], 'l3_network_coll_l3_network': ['layer3_network_id', 'layer3_network_collection_id'], 'layer2_connection': ['layer2_connection_id'], 'layer2_connection_l2_network': ['layer2_connection_id', 'layer2_network_id'], 'layer2_network': ['layer2_network_id'], 'layer2_network_collection': ['layer2_network_collection_id'], 'layer2_network_collection_hier': ['child_l2_network_coll_id', 'layer2_network_collection_id'], 'layer3_network': ['layer3_network_id'], 'layer3_network_collection': ['layer3_network_collection_id'], 'layer3_network_collection_hier': ['layer3_network_collection_id', 'child_l3_network_coll_id'], 'logical_port': ['logical_port_id'], 'logical_port_slot': ['logical_port_id', 'slot_id'], 'logical_volume': ['logical_volume_id'], 'logical_volume_property': ['logical_volume_property_id'], 'logical_volume_purpose': ['logical_volume_purpose', 'logical_volume_id'], 'mlag_peering': ['mlag_peering_id'], 'netblock': ['netblock_id'], 'netblock_collection': ['netblock_collection_id'], 'netblock_collection_hier': ['netblock_collection_id', 'child_netblock_collection_id'], 'netblock_collection_netblock': ['netblock_collection_id', 'netblock_id'], 'network_interface': ['network_interface_id'], 'network_interface_netblock': ['network_interface_id', 'device_id', 'netblock_id'], 'network_interface_purpose': ['network_interface_purpose', 'device_id'], 'network_range': ['network_range_id'], 'network_service': ['network_service_id'], 'operating_system': ['operating_system_id'], 'operating_system_snapshot': ['operating_system_snapshot_id'], 'person': ['person_id'], 'person_account_realm_company': ['account_realm_id', 'person_id', 'company_id'], 'person_auth_question': ['person_id', 'auth_question_id'], 'person_company': ['company_id', 'person_id'], 'person_company_attr': ['person_company_attr_name', 'company_id', 'person_id'], 'person_company_badge': ['company_id', 'person_id', 'badge_id'], 'person_contact': ['person_contact_id'], 'person_image': ['person_image_id'], 'person_image_usage': ['person_image_usage', 'person_image_id'], 'person_location': ['person_location_id'], 'person_note': ['note_id'], 'person_parking_pass': ['person_id', 'person_parking_pass_id'], 'person_vehicle': ['person_vehicle_id'], 'physical_address': ['physical_address_id'], 'physical_connection': ['physical_connection_id'], 'physicalish_volume': ['physicalish_volume_id'], 'private_key': ['private_key_id'], 'property': ['property_id'], 'property_collection': ['property_collection_id'], 'property_collection_hier': ['property_collection_id', 'child_property_collection_id'], 'property_collection_property': ['property_collection_id', 'property_type', 'property_name'], 'pseudo_klogin': ['pseudo_klogin_id'], 'rack': ['rack_id'], 'rack_location': ['rack_location_id'], 'service_environment': ['service_environment_id'], 'service_environment_coll_hier': ['child_service_env_coll_id', 'service_env_collection_id'], 'service_environment_collection': ['service_env_collection_id'], 'shared_netblock': ['shared_netblock_id'], 'shared_netblock_network_int': ['shared_netblock_id', 'network_interface_id'], 'site': ['site_code'], 'slot': ['slot_id'], 'slot_type': ['slot_type_id'], 'slot_type_prmt_comp_slot_type': ['slot_type_id', 'component_slot_type_id'], 'slot_type_prmt_rem_slot_type': ['slot_type_id', 'remote_slot_type_id'], 'snmp_commstr': ['snmp_commstr_id'], 'ssh_key': ['ssh_key_id'], 'static_route': ['static_route_id'], 'static_route_template': ['static_route_template_id'], 'sudo_acct_col_device_collectio': ['sudo_alias_name', 'account_collection_id', 'device_collection_id'], 'sudo_alias': ['sudo_alias_name'], 'svc_environment_coll_svc_env': ['service_environment_id', 'service_env_collection_id'], 'sw_package': ['sw_package_id'], 'ticketing_system': ['ticketing_system_id'], 'token': ['token_id'], 'token_collection': ['token_collection_id'], 'token_collection_hier': ['token_collection_id', 'child_token_collection_id'], 'token_collection_token': ['token_collection_id', 'token_id'], 'token_sequence': ['token_id'], 'unix_group': ['account_collection_id'], 'val_account_collection_relatio': ['account_collection_relation'], 'val_account_collection_type': ['account_collection_type'], 'val_account_role': ['account_role'], 'val_account_type': ['account_type'], 'val_app_key': ['appaal_group_name', 'app_key'], 'val_app_key_values': ['app_value', 'app_key', 'appaal_group_name'], 'val_appaal_group_name': ['appaal_group_name'], 'val_approval_chain_resp_prd': ['approval_chain_response_period'], 'val_approval_expiration_action': ['approval_expiration_action'], 'val_approval_notifty_type': ['approval_notify_type'], 'val_approval_process_type': ['approval_process_type'], 'val_approval_type': ['approval_type'], 'val_attestation_frequency': ['attestation_frequency'], 'val_auth_question': ['auth_question_id'], 'val_auth_resource': ['auth_resource'], 'val_badge_status': ['badge_status'], 'val_cable_type': ['cable_type'], 'val_company_collection_type': ['company_collection_type'], 'val_company_type': ['company_type'], 'val_company_type_purpose': ['company_type_purpose'], 'val_component_function': ['component_function'], 'val_component_property': ['component_property_name', 'component_property_type'], 'val_component_property_type': ['component_property_type'], 'val_component_property_value': ['component_property_name', 'valid_property_value', 'component_property_type'], 'val_contract_type': ['contract_type'], 'val_country_code': ['iso_country_code'], 'val_device_auto_mgmt_protocol': ['auto_mgmt_protocol'], 'val_device_collection_type': ['device_collection_type'], 'val_device_mgmt_ctrl_type': ['device_mgmt_control_type'], 'val_device_status': ['device_status'], 'val_diet': ['diet'], 'val_dns_class': ['dns_class'], 'val_dns_domain_collection_type': ['dns_domain_collection_type'], 'val_dns_domain_type': ['dns_domain_type'], 'val_dns_record_relation_type': ['dns_record_relation_type'], 'val_dns_srv_service': ['dns_srv_service'], 'val_dns_type': ['dns_type'], 'val_encapsulation_mode': ['encapsulation_type', 'encapsulation_mode'], 'val_encapsulation_type': ['encapsulation_type'], 'val_encryption_key_purpose': ['encryption_key_purpose', 'encryption_key_purpose_version'], 'val_encryption_method': ['encryption_method'], 'val_filesystem_type': ['filesystem_type'], 'val_image_type': ['image_type'], 'val_ip_namespace': ['ip_namespace'], 'val_iso_currency_code': ['iso_currency_code'], 'val_key_usg_reason_for_assgn': ['key_usage_reason_for_assign'], 'val_layer2_network_coll_type': ['layer2_network_collection_type'], 'val_layer3_network_coll_type': ['layer3_network_collection_type'], 'val_logical_port_type': ['logical_port_type'], 'val_logical_volume_property': ['filesystem_type', 'logical_volume_property_name'], 'val_logical_volume_purpose': ['logical_volume_purpose'], 'val_logical_volume_type': ['logical_volume_type'], 'val_netblock_collection_type': ['netblock_collection_type'], 'val_netblock_status': ['netblock_status'], 'val_netblock_type': ['netblock_type'], 'val_network_interface_purpose': ['network_interface_purpose'], 'val_network_interface_type': ['network_interface_type'], 'val_network_range_type': ['network_range_type'], 'val_network_service_type': ['network_service_type'], 'val_operating_system_family': ['operating_system_family'], 'val_os_snapshot_type': ['operating_system_snapshot_type'], 'val_ownership_status': ['ownership_status'], 'val_package_relation_type': ['package_relation_type'], 'val_password_type': ['password_type'], 'val_person_company_attr_dtype': ['person_company_attr_data_type'], 'val_person_company_attr_name': ['person_company_attr_name'], 'val_person_company_attr_value': ['person_company_attr_value', 'person_company_attr_name'], 'val_person_company_relation': ['person_company_relation'], 'val_person_contact_loc_type': ['person_contact_location_type'], 'val_person_contact_technology': ['person_contact_technology', 'person_contact_type'], 'val_person_contact_type': ['person_contact_type'], 'val_person_image_usage': ['person_image_usage'], 'val_person_location_type': ['person_location_type'], 'val_person_status': ['person_status'], 'val_physical_address_type': ['physical_address_type'], 'val_physicalish_volume_type': ['physicalish_volume_type'], 'val_processor_architecture': ['processor_architecture'], 'val_production_state': ['production_state'], 'val_property': ['property_type', 'property_name'], 'val_property_collection_type': ['property_collection_type'], 'val_property_data_type': ['property_data_type'], 'val_property_type': ['property_type'], 'val_property_value': ['valid_property_value', 'property_name', 'property_type'], 'val_pvt_key_encryption_type': ['private_key_encryption_type'], 'val_rack_type': ['rack_type'], 'val_raid_type': ['raid_type'], 'val_service_env_coll_type': ['service_env_collection_type'], 'val_shared_netblock_protocol': ['shared_netblock_protocol'], 'val_slot_function': ['slot_function'], 'val_slot_physical_interface': ['slot_function', 'slot_physical_interface_type'], 'val_snmp_commstr_type': ['snmp_commstr_type'], 'val_ssh_key_type': ['ssh_key_type'], 'val_sw_package_type': ['sw_package_type'], 'val_token_collection_type': ['token_collection_type'], 'val_token_status': ['token_status'], 'val_token_type': ['token_type'], 'val_volume_group_purpose': ['volume_group_purpose'], 'val_volume_group_relation': ['volume_group_relation'], 'val_volume_group_type': ['volume_group_type'], 'val_x509_certificate_file_fmt': ['x509_file_format'], 'val_x509_certificate_type': ['x509_certificate_type'], 'val_x509_key_usage': ['x509_key_usg'], 'val_x509_key_usage_category': ['x509_key_usg_cat'], 'val_x509_revocation_reason': ['x509_revocation_reason'], 'volume_group': ['volume_group_id'], 'volume_group_physicalish_vol': ['volume_group_id', 'physicalish_volume_id'], 'volume_group_purpose': ['volume_group_id', 'volume_group_purpose'], 'x509_key_usage_attribute': ['x509_cert_id', 'x509_key_usg'], 'x509_key_usage_categorization': ['x509_key_usg_cat', 'x509_key_usg'], 'x509_key_usage_default': ['x509_key_usg', 'x509_signed_certificate_id'], 'x509_signed_certificate': ['x509_signed_certificate_id']} |
""" Largest element in the array. """
class Solution:
""" Naive Solution.
Time complexity: O(n^2)
"""
def largestElementInArray(self, arr):
i = 0
while i <len(arr):
j =i+1
while j< len(arr):
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
j += 1
i += 1
return arr[i-1]
class Solution_2:
""" Improved version
2. Largest element
3. Also find second largest element.
"""
def largestElementInArray(self, arr):
largest = []
i = 0
while i <len(arr):
if len(largest):
if arr[largest[0]] < arr[i]:
largest[1] = largest[0] if len(largest) > 1 else largest.append(largest[0])
largest[0] = i
elif len(largest) > 1 and arr[largest[1]] < arr[i]:
largest[1] = i
else:
largest.append(i)
i += 1
return largest[0], largest[1] if len(largest) > 1 else None
if __name__ == "__main__":
s = Solution()
res = s.largestElementInArray([8, 9, 10, 1, 2, 20, 30, -1, -50])
print("The largest element from the given array is ", res)
s_1 = Solution_2()
res = s_1.largestElementInArray([8, 9, 10, 1, 2, 20, 30, -1, -50])
print("The largest element from the given array is ", res)
res = s_1.largestElementInArray([10, 10, 10])
print("The largest element from the given array is ", res)
| """ Largest element in the array. """
class Solution:
""" Naive Solution.
Time complexity: O(n^2)
"""
def largest_element_in_array(self, arr):
i = 0
while i < len(arr):
j = i + 1
while j < len(arr):
if arr[i] > arr[j]:
(arr[i], arr[j]) = (arr[j], arr[i])
j += 1
i += 1
return arr[i - 1]
class Solution_2:
""" Improved version
2. Largest element
3. Also find second largest element.
"""
def largest_element_in_array(self, arr):
largest = []
i = 0
while i < len(arr):
if len(largest):
if arr[largest[0]] < arr[i]:
largest[1] = largest[0] if len(largest) > 1 else largest.append(largest[0])
largest[0] = i
elif len(largest) > 1 and arr[largest[1]] < arr[i]:
largest[1] = i
else:
largest.append(i)
i += 1
return (largest[0], largest[1] if len(largest) > 1 else None)
if __name__ == '__main__':
s = solution()
res = s.largestElementInArray([8, 9, 10, 1, 2, 20, 30, -1, -50])
print('The largest element from the given array is ', res)
s_1 = solution_2()
res = s_1.largestElementInArray([8, 9, 10, 1, 2, 20, 30, -1, -50])
print('The largest element from the given array is ', res)
res = s_1.largestElementInArray([10, 10, 10])
print('The largest element from the given array is ', res) |
# LANGUAGE: Python
# AUTHOR: randy
# GITHUB: https://github.com/randy1369
print('Hello, python!') | print('Hello, python!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.