content
stringlengths 7
1.05M
|
|---|
"""
PyCrypto has an own secure random function "Crypto.Random.random"
but it does not use a seed value as expected by rule 6
"""
|
"""
This module contains shared functions for testing
As it does not begin with "test", it will not be detected by the
auto-discovery of Pytest.
"""
def assert_pyspark_df_equal(actual_df, expected_df):
"""
Tests if two DataFrames are equal. Adapted from code originally
written by Dave Greasley (https://github.com/DaveGreasley).
First tests that the columns are equal, including the column order. If
not an AssertionError is raised.
Then uses .exceptAll() twice to find rows that are in the expected_df
but not in the actual_df, and vice versa. If they are identical
then the count of both of these should be zero; if not, an
assertionError is raised.
This function can be expanded to print out more information about any
test failure, e.g. diff_ab.union(diff_ba).show() if the count is
greater than zero. You can also modify it to account for columns
having a different order by testing set equality and then setting
expected_df = expected_df.select(actual_df.columns). For this
example however it is left as simple as possible.
Pytest knows this function is not to be ran as a test as it doesn't
start or end with the word "test" and is contained in a non-test
module.
Args:
actual_df (spark DataFrame): DF to be tested
expected_df (spark DataFrame): expected result
"""
if actual_df.columns != expected_df.columns:
raise AssertionError(
f"DataFrames have different columns"
)
diff_ab = actual_df.exceptAll(expected_df)
diff_ba = expected_df.exceptAll(actual_df)
if diff_ab.union(diff_ba).count() > 0:
raise AssertionError(
f"Dataframes are not equal."
)
|
"""
Put here common independent stuff,
such as utilities, management commands, etc.,
what can not belong to any app.
DO NOT let this package to be dump of ugly code.
Putting code here, you rather make an exception,
than an ordinary core organization.
"""
|
class input_format():
def __init__(self,format_tag):
self.tag = format_tag
def print_sample_input(self):
if self.tag =='FCC_BCC_Edge_Ternary':
print('''
Sample JSON for using pseudo-ternary predictions of solid solution strength by Curtin edge dislocation model.
------------------------------------------------------------
{
"material":"MnFeCoNiAl",
"structure":"FCC",
"pseudo-ternary":{
"increment": 1,
"psA": {"grouped_elements":["Mn","Co"],
"ratio": [1,1],
"range":[0,100]},
"psB": {"grouped_elements":["Fe","Ni"],
"ratio": [1,1],
"range":[0,100]},
"psC": {"grouped_elements":["Al"],
"ratio": [1],
"range":[0,100]}
},
"elements":{
"Co": {"Vn":11.12,"E":262.9,"G":101.7,"nu":0.292},
"Ni": {"Vn":10.94,"E":199.1,"G":76.0,"nu":0.309},
"Al": {"Vn":16.472,"E":65.5,"G":23.9,"nu":0.369},
"Mn": {"Vn":12.60,"E":197.7,"G":73.4,"nu":0.347},
"Fe": {"Vn":12.09,"E":194.3,"G":73.4}
},
"uncertainty_level":{
"on/off":"on",
"a":0.01,
"elastic_constants":0.05
},
"conditions":{"temperature":300,"strain_r":0.001},
"model":{
"name":"FCC_Varvenne-Curtin-2016"
},
"savefile":"MnFeCoNiAl_out"
}
------------------------------------------------------------
Nesessary tags:
"material": material name
--
"structure": "FCC" or "BCC"
--
"pseudo-ternary": containing "psA" "psB" 'psC' for pseudo-ternary components
"psA": pseudo-ternary component, can be a single element or grouped element.
"grouped_elements": # group specific elements in psA
# eg. "grouped_elements":["Ni","Co"], Mn and Co are grouped
"ratio": # specify the ratio between elements in A
# eg. "ratio": [1,1], represent Co:Ni=1:1
"range": # specify the concentration range for "psA"
# eg. "range":[0,100], range from 0 to 100 at.%
--
"elements": input data for elements:
"Co": element symbol for Co
"Vn": atomic volume
"a": lattice constant
"b": Burgers vector
# NOTE, just need to specify one of "Vn", "a" or "b"
"E": Young's modulus
"G": shear modulus
"nu": Poisson's ratio
# NOTE, in Voigt notation, as indicated in the paper.
# Need to specify 2 of the "E", "G", and "nu" for isotropic.
--
"conditions": experimental conditions
"temperature": Kelvin
"strain_r": experiment strain rate,
typical tensile tests: 0.001 /s
"model":
IMPORTANT!!!
"name": name of the model,
use "FCC_Varvenne-Curtin-2016" for FCC and
use "BCC_edge_Maresca-Curtin-2019" for BCC
The following are adjustable parameters for the model
"f1": # dimensionless pressure field parameter for athermal yield stress
"f2": # dimensionless pressure field parameter for energy barrier
"alpha": # dislocation line tension parameter
IMPORTANT:
If you don't know f1, f2, and alpha for your material,
DO NOT change f1, f2 and alpha.
The default values were optimized for FCC HEAs and BCC HEAs.
Read Curtin's papers.
-------
Optional tags:
"uncertainty_level": allow uncertainty evaluation on input data.
"on/off":"on" # turn on/off the uncertainty calculation
# if off, no need to set the following tags
# if on, specify the standard deviations
for lattice constants and elastic constants
"a": 0.01 # applied 1% standard deviation to lattice constants
# 1000 data points were generated to evaluate the average and standar deviation
# this means for each element,
# a new lattice constant will be generated using normal distribution,
# centered at the value "a" (lattice constants) in "elements"
# with a standard deviation 0.01a.
"elastic_constants": 0.05 # applied 5% standard deviation to elastic constants
# 1000 data points were generated to evaluate the average and standar deviation
# this means for each element,
# new elastic constants will be generated using normal distribution,
# centered at the values "E", "G", "nu" (elastic constants) in "elements"
# with a standard deviation 0.01a.
"savefile": output filename, CSV file.
END
''')
elif self.tag =='FCC_BCC_Edge_Composition_Temperature':
print('''
Sample JSON for composition-temperature predictions of solid solution strength by Curtin edge dislocation model.
------------------------------------------------------------
{
"material":"MnFeCoNi",
"structure":"FCC",
"elements":{
"Co": {"Vn":11.12,"E":262.9,"G":101.7,"nu":0.292},
"Ni": {"Vn":10.94,"E":199.1,"G":76.0,"nu":0.309},
"Mn": {"Vn":12.60,"E":197.7,"G":73.4,"nu":0.347},
"Fe": {"Vn":12.09,"E":194.3,"G":73.4}
},
"compositions":{
"element_order": ["Co","Ni","Fe","Mn"],
"concentrations": [
[25,25,25,25],
[20,20,30,30],
[30,30,20,20]
]
},
"uncertainty_level":{
"on/off":"on",
"a":0.01,
"elastic_constants":0.05
},
"conditions":{
"temperature":{
"min": 300,
"max": 600,
"inc": 10
},
"strain_r":0.001
},
"model":{
"name":"FCC_Varvenne-Curtin-2016"
},
"savefile":"MnFeCoNi_out"
}
------------------------------------------------------------
Nesessary tags:
"material": material name
--
"structure": "FCC" or "BCC"
--
"compositions": containing element symbols and concentrations for calculation.
"element_order": a list of element symbols in order, be consistent with the "concentrations"
"concentrations": a list of concentrations in at.% for elements in the "element_order",
add up to 100.
--
"elements": input data for elements:
"Co": element symbol for Co
"Vn": atomic volume
"a": lattice constant
"b": Burgers vector
# NOTE, just need to specify one of "Vn", "a" or "b"
"E": Young's modulus
"G": shear modulus
"nu": Poisson's ratio
# NOTE, in Voigt notation, as indicated in the paper.
# Need to specify 2 of the "E", "G", and "nu" for isotropic.
--
"conditions": experimental conditions
"temperature": specify temperature (Kelvin) range and increment for the calculations.
"max": max T
"min": min T
"inc": increment.
"strain_r": experiment strain rate,
typical tensile tests: 0.001 /s
"model":
IMPORTANT!!!
"name": name of the model,
use "FCC_Varvenne-Curtin-2016" for FCC and
use "BCC_edge_Maresca-Curtin-2019" for BCC
The following are adjustable parameters for the model
"f1": # dimensionless pressure field parameter for athermal yield stress
"f2": # dimensionless pressure field parameter for energy barrier
"alpha": # dislocation line tension parameter
IMPORTANT:
If you don't know f1, f2, and alpha for your material,
DO NOT change f1, f2 and alpha.
The default values were optimized for FCC HEAs and BCC HEAs.
Read Curtin's papers.
-------
Optional tags:
"uncertainty_level": allow uncertainty evaluation on input data.
"on/off":"on" # turn on/off the uncertainty calculation
# if off, no need to set the following tags
# if on, specify the standard deviations
for lattice constants and elastic constants
"a": 0.01 # applied 1% standard deviation to lattice constants
# 1000 data points were generated to evaluate the average and standar deviation
# this means for each element,
# a new lattice constant will be generated using normal distribution,
# centered at the value "a" (lattice constants) in "elements"
# with a standard deviation 0.01a.
"elastic_constants": 0.05 # applied 5% standard deviation to elastic constants
# 1000 data points were generated to evaluate the average and standar deviation
# this means for each element,
# new elastic constants will be generated using normal distribution,
# centered at the values "E", "G", "nu" (elastic constants) in "elements"
# with a standard deviation 0.05*value.
"savefile": output filename, CSV file.
END
''')
elif self.tag =='BCC_Screw_Curtin_Ternary':
print('''
Sample JSON for predictions of solid solution strength for pseudo-ternary BCC by Curtin screw dislocation model.
Screw dislocation in BCC.
------------------------------------------------------------
{
"material":"NbMoW",
"pseudo-ternary":{
"increment": 1,
"psA": {"grouped_elements":["Nb"],
"ratio": [1],
"range":[0,100]},
"psB": {"grouped_elements":["Mo"],
"ratio": [1],
"range":[0,100]},
"psC": {"grouped_elements":["W"],
"ratio": [1],
"range":[0,100]}
},
"elements":{
"Nb": {"a":3.30,"Delta_E_p":0.0345,"E_k":0.6400,"E_v":2.9899,"E_si":5.2563,"Delta_V_p":0.020},
"Mo": {"a":3.14,"Delta_E_p":0.1579,"E_k":0.5251,"E_v":2.9607,"E_si":7.3792,"Delta_V_p":0.020},
"W": {"a":3.16,"Delta_E_p":0.1493,"E_k":0.9057,"E_v":3.5655,"E_si":9.5417,"Delta_V_p":0.020}
},
"adjustables":{
"kink_width":10,
"Delta_V_p_scaler":1,
"Delta_E_p_scaler":1
},
"conditions":{"temperature":300,"strain_r":0.001},
"model":{
"name":"BCC_screw_Maresca-Curtin-2019"
},
"savefile":"NbMoW_out"
}
------------------------------------------------------------
Nesessary tags:
"material": material name
--
"pseudo-ternary": containing "psA" "psB" 'psC' for pseudo-ternary components
"psA": pseudo-ternary component, can be a single element or grouped elements.
"grouped_elements": # group specific elements in psA
# eg. "grouped_elements":["W","Ta"], Mn and Co are grouped
"ratio": # specify the ratio between elements in A
# eg. "ratio": [1,1], represent W:Ta=1:1
"range": # specify the concentration range for "psA"
# eg. "range":[0,100], range from 0 to 100 at.%
--
"elements": input data for elements:
"W": element symbol for W
below are necessary inputs.
"a": lattice constant
"E_k": screw dislocation kink formation energy (usually by DFT or MD calculations)
"E_v": vacancy formation energy (usually by DFT or MD)
"E_si": self-interstitial formation energy (usually by DFT or MD)
"Delta_E_p": solute-dislocation interaction energy (usually by DFT or MD)
"Delta_V_p": Peierls barrier (usually by DFT or MD)
--
"adjustables": adjustable parameters for the model. Be VERY careful to change the values.
"kink_width":10 kink width, default is 10, (unit: burgers vector), usually between 10b to 20b.
"Delta_V_p_scaler":1, Peierls barrier scaler, DFT values are usually very high compared to experiments.
So rescaling was taken to fit the experimental yield strengths.
"Delta_E_p_scaler":1 Solute-dislocation interaction energy scaler.
This is also rescaled for DFT/MD values.
--
"conditions": experimental conditions
"temperature": specify temperature (Kelvin) the calculations.
"strain_r": experiment strain rate,
typical tensile tests: 0.001 /s
--
"model": "BCC_screw_Maresca-Curtin-2019",
-------
Optional tags:
"savefile": output filename, CSV file.
END
''')
elif self.tag =='BCC_Screw_Curtin_Composition_Temperature':
print('''
Sample JSON for composition-temperature predictions of BCC solid solution strength by Curtin screw dislocation model.
Screw dislocation in BCC.
------------------------------------------------------------
{
"material":"Nb95Mo5",
"model":"BCC_screw_Maresca-Curtin-2019",
"properties":{
"a": 3.289,
"E_k": 0.6342,
"E_v": 2.989,
"E_si": 5.361,
"Delta_E_p": 0.0488,
"Delta_V_p": 0.020
},
"conditions":{
"temperature":{
"max":500,
"min":0,
"inc":10
},
"strain_r":0.001
},
"adjustables":{
"kink_width":10,
"Delta_V_p_scaler":1,
"Delta_E_p_scaler":1
},
"savefile":"Nb95Mo5_out"
}
------------------------------------------------------------
Nesessary tags:
"material": material name
--
"properties": input data for the material:
"a": lattice constant
"E_k": screw dislocation kink formation energy (usually by DFT or MD calculations)
"E_v": vacancy formation energy (usually by DFT or MD)
"E_si": self-interstitial formation energy (usually by DFT or MD)
"Delta_E_p": solute-dislocation interaction energy (usually by DFT or MD)
"Delta_V_p": Peierls barrier (usually by DFT or MD)
--
"conditions": experimental conditions
"temperature": specify temperature (Kelvin) range and increment for the calculations.
"max": max T
"min": min T
"inc": increment.
"strain_r": experiment strain rate,
typical tensile tests: 0.001 /s
--
"adjustables": adjustable parameters for the model. Be VERY careful to change the values.
"kink_width":10 kink width, default is 10, (unit: burgers vector), usually between 10b to 20b.
"Delta_V_p_scaler":1, Peierls barrier scaler, DFT values are usually very high compared to experiments.
So rescaling was taken to fit the experimental yield strengths.
"Delta_E_p_scaler":1 Solute-dislocation interaction energy scaler.
This is also rescaled for DFT/MD values.
--
"model": "BCC_screw_Maresca-Curtin-2019",
-------
Optional tags:
"savefile": output filename, CSV file.
END
''')
elif self.tag =='BCC_Screw_Suzuki_Temperature':
print('under development')
else:
print('NOT a valid name. Available input formats: \n'
'FCC_BCC_Edge_Ternary\n'
'FCC_BCC_Edge_Composition_Temperature\n'
'BCC_Screw_Curtin_Ternary\n'
'BCC_Screw_Curtin_Composition_Temperature\n'
'BCC_Screw_Suzuki_Temperature\n')
|
# After closing time, the store manager would like to know how much business was
# transacted during the day. Modify the CashRegister class to enable this functionality.
# Supply methods getSalesTotal and getSalesCount to get the total amount of all sales
# and the number of sales. Supply a method resetSales that resets any counters and
# totals so that the next day’s sales start from zero.
class CashRegister():
def __init__(self):
self._items = []
self._sales = []
def add_item(self, item):
self._items.append(item)
def clear(self):
self._sales.append(self._items)
print(self._sales)
self._items[:] = []
def get_sales_count(self):
return len(self._sales)
def get_sales_total(self):
total = 0
for sale in self._sales:
for item in sale:
total += item.get_price()
return total
def reset_sales(self):
self._sales[:] = []
def get_count(self):
return len(self._items)
def get_total(self):
total = 0
for item_object in self._items:
total += item_object.get_price()
return total
def display_items(self):
output = []
for item_object in self._items:
output.append("{} - {}".format(item_object.get_name(), item_object.get_price()))
return "\n".join(output)
|
class opcode(object):
nul = 1
hello = 2
rhello = 130
get = 160
rget = 161
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 4 21:10:59 2018
@author: User
"""
def username():
first = 'arvind' #input('firstname')
last = 'KARIR' # input('lastname')
(yob) = str(1234) #int( input('yob') )
yob = yob[:3]
last = last[-4:]
print(first[:1].lower()+last.upper()+ yob[::-1])
username()
|
questions = open('youtube_chat.txt', 'r').readlines()
with open('question_dataset.txt', 'w+') as file:
for s in set(questions):
print(s.rstrip()[1:-1], file=file)
|
A = 'avalue'
B = {
'key' : 'value'
}
C = ['array']
|
CARGO = "Cargo"
COMPOSER = "Composer"
GO = "Go"
MAVEN = "Maven"
NPM = "npm"
NUGET = "NuGet"
PYPI = PIP = "pip"
RUBYGEMS = "RubyGems"
ecosystems = [CARGO, COMPOSER, GO, MAVEN, NPM, NUGET, PYPI, RUBYGEMS]
|
# CPU: 0.08 s
n_villagers = int(input())
villagers = {key: set() for key in range(1, n_villagers + 1)}
song_counter = 0
for _ in range(int(input())):
_, *participants = map(int, input().split())
if 1 in participants:
song_counter += 1
for participant in participants:
villagers[participant].add(song_counter)
else:
for participant in participants:
for song in villagers[participant]:
for participant in participants:
villagers[participant].add(song)
for villager, songs in villagers.items():
if len(songs) == song_counter:
print(villager)
|
# Given an n x n array, return the array elements arranged from outermost elements to the middle element, traveling clockwise.
def snail(array):
snail_array = []
while len(array) > 0:
snail_array.extend(array.pop(0))
length_array = len(array)
for i in range(length_array):
adder = array[i].pop(-1)
snail_array.append(adder)
if length_array > 0:
array[-1].reverse()
snail_array.extend(array.pop(-1))
length_array = len(array)
for i in range(length_array -1, -1, -1):
adder = array[i].pop(0)
snail_array.append(adder)
return snail_array
|
class NodeRegistry:
def __init__(self):
self.nodes = set()
def register(self, *nodes: List[Type[Node]]):
self.nodes.update(nodes)
def pipeline_factory(self, pipeline_spec):
"""Construct a pipeline according to the spec.
"""
...
@staticmethod
def _port_to_tuple(port: Port):
return (
None,
inspect.cleandoc(port.help) if port.help else None
)
@staticmethod
def _parse_docstr(obj):
try:
return docstring_parser.parse(obj.__doc__)
except:
print("Error parsing docstring of {}".format(obj.__name__))
raise
@staticmethod
def _parse_arguments(node_cls: Type[Node]):
# Use type annotations to determine the type.
# Use the docstring for each argument.
annotations = node_cls.__init__.__annotations__
# Get docstring for each argument
arg_desc = {
p.arg_name: p.description
for p in NodeRegistry._parse_docstr(node_cls).params
}
arg_desc.update({
p.arg_name: p.description
for p in NodeRegistry._parse_docstr(node_cls.__init__).params
})
return {
k: (annotations[k], arg_desc[k])
for k in annotations.keys() & arg_desc.keys()
}
@classmethod
def _node_to_dict(cls, node_cls: Type[Node]):
doc = cls._parse_docstr(node_cls)
return {
"name": node_cls.__name__,
"short_description": doc.short_description,
"long_description": doc.long_description,
"inputs": {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, "inputs", [])},
"outputs": {p.name: cls._port_to_tuple(p) for p in getattr(node_cls, "outputs", [])},
"options": cls._parse_arguments(node_cls),
}
def to_dict(self) -> dict:
return {
n.__name__: self._node_to_dict(n) for n in self.nodes
}
|
db_config = {
'user': '##username##',
'passwd': '##password##',
'host': '##host##',
'db': 'employees',
}
|
class Solution(object):
def reachNumber(self, target):
"""
:type target: int
:rtype: int
"""
target = abs(target)
n = int((target * 2) ** 0.5)
steps = n * (n+1) // 2
while steps < target or (steps - target) % 2:
n += 1
steps += n
return n
|
a="J`e^\x1cf_l]_WiUa\x12UQ]\x0esdj^hp\x1a\\mZ_\x15hT`XQ]\x0eumrrg\x1bg^fZ[\\U[\x12aU]gea_o]i\x1a<gm_Y!$+\x11iPOh-\x1e?pr\x1am``i\x15]f\x12j_d` ej^c\x1b4\x19;K<GoV&c#Ng0tp\\o.f_W+dYS'^h$ha_bs`-Zn-f^*cq!\x12=_eS sml\x1cn_^\x18`j\x15Vhf\x11]PYe\x1fwlqm\x1afaeZ\x15[_ah\x10d^"
b=""
for i in range(len(a)):
print(a[i])
b+=chr(ord(a[i])+(i%19))
print(b)
|
""" Defines the dataset configuration class for csv, libsvm, arff data formats
Dataset configuration object contains information for reading and preprocessing the dataset file """
class CsvConfig:
""" Dataset configuration class for CSV data format """
dataset_filetype = 'csv'
def __init__(self, sep=',', skiprows=None, header_row=None, usecols=None, target_col=-1, categorical_cols='infer', na_values=None, **kargs):
"""
Parameters
----------
sep : str, optional
Column delimiter. **Accepted values:** ``None`` implies autodetect delimiter, ``'\s+'`` uses combination of spaces and tabs, regular expressions. (default is ``','`` ).
skiprows : list of int or int, optional
List of line indices to skip or the number of starting lines to skip. (default value ``None`` implies don't skip any lines)
header_row : int, optional
Relative Zero-Index (index of rows after skipping rows using ``skiprows`` parameter) of the row containing column names. Note: All preceding rows are ignored. (default value ``None`` implies no header row)
usecols : list, optional
List of column names (or column indices, if no header row specified) to consider. (default value ``None`` indicates use of all columns)
target_col : int, optional
Relative Zero-Index of column (after filtering columns using ``usecols`` parameter) to use as target values. ``None`` indicates absence of target value columns. (default value ``-1`` implies use the last column as target values)
categorical_cols : 'infer' or list or str or int or 'all', optional
List (str or int if singleton) of column names (or absolute indices of columns, if no header row specified) of categorical columns to encode. Default value ``'infer'`` autodetects nominal categorical columns. ``'all'`` implies all columns are nominal categorical. ``None`` implies no nominal categorical columns exist.
na_values : scalar or str or list-like or dict, optional
Additional strings to recognize as NA/NaN. If dict is passed, it specifies per-column NA values. By default the following values are interpreted as NaN: ‘’, ‘#N/A’, ‘#N/A N/A’, ‘#NA’, ‘-1.#IND’, ‘-1.#QNAN’, ‘-NaN’, ‘-nan’, ‘1.#IND’, ‘1.#QNAN’, ‘N/A’, ‘NA’, ‘NULL’, ‘NaN’, ‘n/a’, ‘nan’, ‘null’. (default value ``None`` implies no additional values to intrepret as NaN)
**kargs
Other keyword arguments accepted by :func:`pandas.read_csv` such as ``comment`` and ``lineterminator``.
Notes
-----
* ``skiprows`` parameter uses absolute row indices whereas ``header_row`` parameter uses relative index (i.e., zero-index after removing rows specied by ``skiprows`` parameter).
* ``usecols`` and ``categorical_cols`` parameters use absolute column names (or indices, if no header row) whereas ``target_cols`` parameter uses relative column indices (or names) after filtering out columns specified by ``usecols`` parameter.
* ``categorical_cols='infer'`` identifies and encodes nominal features (i.e., features of 'string' type, with fewer unique entries than a value heuristically determined from the number of data samples) and drops other 'string' and 'date' type features from the dataset. Use :func:`automs.eda.max_classes_nominal` to find the heuristically determined value of maximum number of distinct entries in nominal features for a given number of samples.
* Data samples with any NA/NaN features are implicitly dropped.
"""
self.sep = sep
self.skiprows = skiprows
self.header_row = header_row
self.usecols = usecols
self.target_col = target_col
self.encode_target = True
self.categorical_cols = categorical_cols
self.na_values = na_values
self.nrows = None
# self.kargs = kargs
for key, value in kargs.items():
setattr(self, key, value)
class LibsvmConfig:
""" Dataset configuration class for LIBSVM data format """
dataset_filetype = 'libsvm'
def __init__(self):
pass
class ArffConfig:
""" Dataset configuration class for ARFF data format """
dataset_filetype = 'arff'
def __init__(self, target_attr='class', numeric_categorical_attrs=None):
"""
Parameters
----------
target_attr : str, optional
Attribute name of the target column. ``None`` implies no target columns. (default value is ``'class'``)
numeric_categorical_attrs : list of str, optional
List of names of numeric attributes to be inferred as nominal and to be encoded. Note: All nominal attributes are implicitly encoded. (default value ``None`` implies no numeric attributes are to be infered as nominal)
Notes
-----
All nominal type attributes are implicitly encoded.
"""
self.target_attr = target_attr
self.encode_target = True
self.numeric_categorical_attrs = numeric_categorical_attrs
DATASET_FILETYPE_CONFIG = {
'csv': CsvConfig,
'libsvm': LibsvmConfig,
'arff': ArffConfig
}
|
real = float(input("Quanto tinheito você tem? R$ "))
us = real / 3.78
ca = real / 2.85
eu = real / 4.31
lb = real / 4.98
au = real / 2.71
ps = real / 0.0983
print("R$ {:.2f} reais equivale a: \n\nUS$ {:.2f} dólares americanos \nCAD$ {:.2f} dólares canadenses \n€$ {:.2f} euros \n£$ {:.2f} libras \nAUD$ {:.2f} dólares australianos \nPA$ {:.2f} pesos argentinos " .format( real, us, ca, eu, lb, au, ps))
print(' Casa de Câmbio BR')
print('-' * 50)
print('DADOS do Cliente')
print()
nome = input('Nome: ')
idade = input('Idade: ')
cpf = input('CPF: ')
telefone = input('Telefone: ')
print('-' * 30)
print('Dados do câmbio')
print()
real = float(input('Quanto você deseja trocar: R$ '))
print()
dolar = real / 3.27
euro = real / 3.67
libra = real / 4.37
print('Listamos alguns valores que você pode adquirir de acordo com a quantia que você deseja trocar')
print()
print('Com R$ {:.2f} você pode trocar por US$ {:.2f}'.format(real, dolar))
print('Com R$ {:.2f} você pode trocar por € {:.2f}'.format(real, euro))
print('com R$ {:.2f} você pode trocar por £ {:.2f}'.format(real, libra))
print()
print('{} a BR Casa de Câmbio Agradece pela Preferência, Volte Sempre!'.format(nome))
print('-' * 70)
|
factor = int(input())
count = int(input())
list = []
counter = factor
for _ in range(count):
list.append(counter)
counter += factor
print(list)
|
# Definition for a binary tree node.
class _TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSameTree(self, p: _TreeNode, q: _TreeNode) -> bool:
# If both are none, the nodes are the same.
if p is None and q is None:
return True
# If either is none, one is not.
if p is None or q is None:
return False
# No need to recurse if they're the same object.
if id(p) == id(q):
return True
# Check the values and recurse, DFS.
return (
p.val == q.val
and self.isSameTree(p.left, q.left)
and self.isSameTree(p.right, q.right)
)
|
"""Top-level package for DRF Compose."""
__author__ = """Sotunde Abiodun"""
__email__ = "sotundeabiodun00@gmail.com"
__version__ = "0.1.1"
|
# Based on https://github.com/zricethezav/gitleaks/blob/6f5ad9dc0b385c872f652324188ce91da7157c7c/test_data/test_repos/test_dir_1/server.test2.py
# Do not hard code credentials
client = boto3.client(
's3',
# Hard coded strings as credentials, not recommended.
aws_access_key_id='AKIAIO5FODNN7EXAMPLE',
aws_secret_access_key='ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE'
)
# gh_pat = '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'
|
"""
Bisect Squares.
Given two squares on a two-dimensional plane, find
a line that would cut these two squares in half. Assume
that the top and the bottom sides of the square run
parallel to the x-axis.
"""
class BisectSquares():
class Square():
class Line():
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def __repr__(self):
return (self.p1, self.p1)
def __str__(self):
return f'[{self.p1}, {self.p2}]'
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f'({self.x}, {self.y})'
def __init__(self, left, top, bottom, right):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
def size(self) -> Point:
return (self.right - self.left) * (self.top - self.bottom)
def mid(self) -> Point:
return self.Point((self.left + self.right) / 2.0,
(self.top + self.bottom) / 2.0)
def extend(self, mid1, mid2, size):
xdir = -1 if mid1.x < mid2.x else 1
ydir = -1 if mid1.y < mid2.y else 1
if mid1.x == mid2.x:
return self.Point(mid1.x, mid1.y + ydir * size / 2.0)
slope = (mid1.y - mid2.y) / (mid1.x - mid2.x)
y1, y2 = 0, 0
if abs(slope) == 1:
x1 = mid1.x + xdir * size / 2.0
y1 = mid1.y + ydir * size / 2.0
elif abs(slope) < 1:
x1 = mid1.x + xdir * size / 2.0
y1 = slope * (x1 - mid1.x) + mid1.y
else:
y1 = mid1.y + ydir * size / 2.0
x1 = (y1 - mid1.y) / slope + mid1.x
return self.Point(x1, y1)
def cut(self, other) -> Line:
p1 = self.extend(self.mid(), other.mid(), self.size())
p2 = self.extend(self.mid(), other.mid(), -1 * self.size())
p3 = self.extend(other.mid(), self.mid(), other.size())
p4 = self.extend(other.mid(), self.mid(), -1 * other.size())
start, end = p1, p1
for point in [p2, p3, p4]:
if point.x < start.x or \
point.x == start.x and point.y < start.y:
start = point
elif point.x > end.x or \
point.x == end.x and point.y > end.y:
end = point
return self.Line(start, end)
|
def find_range_values(curr_range):
return list(map(int, curr_range.split(",")))
def find_set(curr_range):
start_value, end_value = find_range_values(curr_range)
curr_set = set(range(start_value, end_value + 1))
return curr_set
def find_longest_intersection(n):
longest_intersection = set()
for _ in range(n):
first_range, second_range = input().split("-")
first_set = find_set(first_range)
second_set = find_set(second_range)
curr_intersection = first_set.intersection(second_set)
if len(curr_intersection) > len(longest_intersection):
longest_intersection = curr_intersection
return list(longest_intersection)
def print_result(longest_intersection):
print(f"Longest intersection is {longest_intersection} "
f"with length {len(longest_intersection)}")
print_result(find_longest_intersection(int(input())))
|
# Complete solution
# https://leetcode.com/problems/remove-sub-folders-from-the-filesystem/discuss/409028/JavaPython-3-3-methods-from-O(n-*-(logn-%2B-m-2))-to-O(n-*-m)-w-brief-explanation-and-analysis.
# use startswith
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
"""
Sort folders, so that parent will always occur in front of child
For each folder, check if it starts with parent folder
If it does, it's a subfolder, skip it. If not, make it next parent folder.
"""
folders = folder
folders.sort()
output = []
parent = ' '
for folder in folders:
if not folder.startswith(parent):
output.append(folder)
parent = folder + '/'
return output
# Time: O(NlogN)
# Space:O(1), not count the output, if count, then O(N)
# trie
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
Node = lambda: defaultdict(Node)
trie = Node()
ans = []
for path in sorted(folder):
n = trie
for c in path[1:].split('/'):
n = n[c]
if '$' in n:
break
else:
n['$'] = True
ans.append(path)
return ans
# Time: O(NM)
# Space:O(1)
|
#!/usr/bin/python
# -*-coding:utf-8 -*
nb_coup = 8
name_score_file = "scores"
word = [
"banane",
"pomme",
"poire",
"tomate",
"ananas",
"prune",
"fraise",
"rhubarbe",
"cerise",
"kiwi",
"abricot",
"amande",
"figue",
"framboise",
"melon",
"brugnon",
"cassis",
"groseille",
"mirabelle",
"mûre",
"myrtille",
"pastèque",
"pêche",
]
|
CKAN_ROOT = "https://data.wprdc.org/"
API_PATH = "api/3/action/"
SQL_SEARCH_ENDPOINT = "datastore_search_sql"
API_URL = CKAN_ROOT + API_PATH + SQL_SEARCH_ENDPOINT
|
"""Role testing files using testinfra"""
def test_daemon_config(host):
"""Check docker daemon config"""
f = host.file("/etc/docker/daemon.json")
assert f.is_file
assert f.user == "root"
assert f.group == "root"
config = (
"{\n"
" \"live-restore\": true,\n"
" \"log-driver\": \"local\",\n"
" \"log-opts\": {\n"
" \"max-size\": \"100m\"\n"
" }\n"
"}"
)
assert config in f.content_string
def test_cron_job(host):
"""Check cron job"""
cmd = "docker system prune --all --volumes --force"
f = host.file("/var/spool/cron/crontabs/root").content_string
assert cmd in f
def test_docker_service(host):
"""Check docker service"""
s = host.service("docker")
assert s.is_running
assert s.is_enabled
|
# variables 3
a = "abc"
print("a:", a, type(a))
a = 3
print("a:", a, type(a))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using Greedy Algorithm to solve balloon burst problem.
'''
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
if not points:
return 0
points = sorted(points, key=lambda i:i[1])
count = 1
end = points[0][1]
for i in points:
start = i[0]
if start > end:
count += 1
end = i[1]
return count
|
# String
nombre = "Uriel"
apellido = 'Rdguez'
## Contatenar (unir dos texto)
nombre + ' ' + apellido
## Se puede multiplar un estring con un número entero
nombre * 4 # 'Uriel Uriel Uriel Uriel'
# Float
numero_decimal = 3.4
#Boolean
es_estudiante = True # True
es_estudiante = True # False
|
num = int(input("Digite um número para seu fatorial ser calculado: "))
m = num
fat = 1
print('Calculando {}! = '.format(num), end='')
while m > 0:
print(' {} '.format(m), end='')
print(' x ' if m > 1 else ' = ', end='')
fat *= m
m -= 1
print('{}'.format(fat))
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'tiānfǔ'
CN=u'天府'
NAME=u'tianfu13'
CHANNEL='lung'
CHANNEL_FULLNAME='LungChannelofHand-Taiyin'
SEQ='LU3'
if __name__ == '__main__':
pass
|
def put_languages(self, root):
if hasattr(self, "languages") and self.languages:
lang_string = ",".join(["/".join(x) for x in self.languages])
root.attrib["languages"] = lang_string
def put_address(self, root):
if self.address:
if isinstance(self.address, str):
root.attrib["address"] = self.address
else:
root.attrib["address"] = str("|".join(self.address))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""RackTablesDB - a python library to access the racktables database.
"""
__author__ = "John van Zantvoort"
__email__ = "john.van.zantvoort@snow.nl"
__license__ = "The MIT License (MIT)"
__version__ = "1.0.1"
|
# n = nums.length
# time = 0(n)
# space = O(1)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
ret = max(nums)
sub_sum = 0
for num in nums:
sub_sum = max(0, sub_sum) + num
ret = max(ret, sub_sum)
return ret
|
# -*- coding: utf-8 -*-
# @Time: 2020/7/16 16:28
# @Author: GraceKoo
# @File: interview_14.py
# @Desc: https://www.nowcoder.com/practice/529d3ae5a407492994ad2a246518148a?tpId=13&rp=1&ru=%2Fta%2Fcoding-interviews&qr
# u=%2Fta%2Fcoding-interviews%2Fquestion-ranking
class Solution:
def FindKthToTail(self, head, k):
# write code here
if not head:
return None
# 让p,q相隔k个
p = head
q = head
count = 0
while p:
p = p.next
count += 1
# count至少大于等于2的时候才p才走
if count >= k + 1:
q = q.next
# 当k的长度比链表都长时,直接返回None
if k > count:
return None
return q
|
class Heroes3(object):
def __init__(self):
super(Heroes3, self).__init__()
self._army_size = {
"Few" : (1, 4),
"Several" : (5, 9),
"Pack" : (10, 19),
"Lots" : (20, 49),
"Horde" : (50, 100),
"Throng" : (100, 249),
"Swarm" : (250, 499),
"Zounds" : (500, 999),
"Legion" : (1000, float("inf"))
}
def get_all(self):
return self._army_size
def get_army_description(self, size):
for key in self._army_size:
minimum, maximum = self._army_size[key]
if size >= minimum and size <= maximum:
return key
else:
return "Unknown"
def main():
print("Hello, Heroes of Might and Magic 3!")
if __name__ == "__main__":
main()
|
"""
572
subtree of another tree
easy
Given the roots of two binary trees root and subRoot, return true if
there is a subtree of root with the same structure and node values of
subRoot and false otherwise.
A subtree of a binary tree tree is a tree that consists of a node in
tree and all of this node's descendants. The tree tree could also be
considered as a subtree of itself.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
def compare(n1, n2):
if n1 is None and n2 is None:
return True
elif n1 is None or n2 is None:
return False
else:
if n1.val != n2.val:
return False
return compare(n1.left, n2.left) and compare(n1.right, n2.right)
stack = [s]
while stack:
current = stack.pop()
if compare(current, t):
return True
if current.left is not None:
stack.append(current.left)
if current.right is not None:
stack.append(current.right)
return False
|
count = int(input())
for i in range(count):
k = int(input())
n = int(input())
people = [j for j in range(1,n+1)]
for x in range (k):
for v in range(n-1):
people[v+1] += people[v]
print(people[-1])
|
class Solution:
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
num=n
if len(flowerbed)<=1:
if (num==1 and flowerbed==[0]) or (num==0):
return True
else:
return False
if flowerbed[0]==0 and flowerbed[1]==0:
flowerbed[0]=1
num-=1
if flowerbed[-1]==0 and flowerbed[-2]==0:
flowerbed[-1]=1
num-=1
for i in range(1,len(flowerbed)-2):
if flowerbed[i]!=1 and flowerbed[i+1]!=1 and flowerbed[i-1]!=1:
flowerbed[i]=1
num-=1
if num<=0:
return True
return False
|
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
i, j = 2, 2
while i < len(nums):
if not (nums[j-1] == nums[j-2] == nums[i]):
nums[j] = nums[i]
j += 1
i += 1
return j
if __name__ == "__main__":
print(Solution().removeDuplicates([0, 0, 1, 1, 1, 1, 2, 3, 3]))
|
"""Project exceptions"""
class ProjectImportError (Exception):
"""Failure to import a project from a repository."""
pass
|
""" Remote repositories, used by this project itself """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repositories():
_all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "bazel_skylib",
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
],
)
http_archive(
name = "ninja_build",
build_file_content = _all_content,
sha256 = "3810318b08489435f8efc19c05525e80a993af5a55baa0dfeae0465a9d45f99f",
strip_prefix = "ninja-1.10.0",
urls = [
"https://github.com/ninja-build/ninja/archive/v1.10.0.tar.gz",
],
)
http_archive(
name = "cmake",
build_file_content = _all_content,
sha256 = "fc77324c4f820a09052a7785549b8035ff8d3461ded5bbd80d252ae7d1cd3aa5",
strip_prefix = "cmake-3.17.2",
urls = [
"https://github.com/Kitware/CMake/releases/download/v3.17.2/cmake-3.17.2.tar.gz",
],
)
|
"""Patch Server for Jamf Pro"""
__title__ = "Patch Server"
__version__ = "2020.10.02"
__author__ = "Bryson Tyrrell"
|
def goTo(logic, x, y):
hero.moveXY(x, y)
hero.say(logic)
hero.moveXY(26, 16);
a = hero.findNearestFriend().getSecretA()
b = hero.findNearestFriend().getSecretB()
c = hero.findNearestFriend().getSecretC()
goTo(a and b or c, 25, 26)
goTo((a or b) and c, 26, 32)
goTo((a or c) and (b or c), 35, 32)
goTo((a and b) or (not c and b), 40, 22)
|
def afl(x):
"""
If no 'l' key is included, add a list of None's the same length as key 'a'.
"""
if 'l' in x:
return x
else:
x.update({'l': ['']*len(x['a'])})
return x
class V1:
def __init__(self,version='std',**kwargs):
self.version=version
@property
def doms(self):
return {'uc': 'qD', 'currapp': 'qD', 'potapp': 'qD'}
@property
def conds(self):
return {'uc': 'KT_subset', 'currapp': 'u_subset', 'potapp': 'c_subset'}
@property
def vartext(self):
if self.version is 'std':
return {'PwT': afl({'a': [None]}),
'qD' : afl({'a': [None, 'a_aa', 'a_aaa']}),
'theta_c': afl({'a': [None]}),
'theta_p': afl({'a': [None]}),
'cbar': afl({'a': [None]}),
'n': afl({'a': ['a_aa','a_aaa']}),
'k2t': afl({'a': [None]}),
'u2c': afl({'a': [None]}),
'c2e': afl({'a': [None, ['a_aa','aa_aaa']]})}
elif self.version is 'Q2P':
return {'PwT': afl({'a': ['a_aaa']}),
'qD' : afl({'a': [None, 'a_aa', 'a_aaa']}),
'theta_c': afl({'a': [None]}),
'theta_p': afl({'a': [None]}),
'cbar': afl({'a': [None]}),
'n': afl({'a': ['a_aa','a_aaa']}),
'k2t': afl({'a': [None]}),
'u2c': afl({'a': [None]}),
'c2e': afl({'a': [None, ['a_aa','aa_aaa']]}),
'q2p': afl({'a': ['aa_aaa']})
}
|
# http://code.activestate.com/recipes/119466-dijkstras-algorithm-for-shortest-paths/
"""
G = {'s':{'u':10, 'x':5},
'u':{'v':1, 'x':2},
'v':{'y':4},
'x':{'u':3, 'v':9, 'y':2},
'y':{'s':7, 'v':6}}
"""
def graph_to_dot(G):
s = """digraph G {\nnode [width=.3,height=.3,shape=octagon,style=filled,color=skyblue];\noverlap="false";\nrankdir="LR";\n%s}"""
r = ''
for i in G:
for j in G[i]:
r+='%s -> %s [label="%s"];\n' % (i, j, str(G[i][j]))
return s % (r)
# http://graphviz-dev.appspot.com/
# http://ashitani.jp/gv/#
|
def print_array(array):
for i in array:
print(i, end=" ")
print("")
def bubble_sort(array):
for i in range(len(array)):
swapped = False
for j in range(0, len(array)-i-1):
if array[j] >= array[j+1]:
tmp = array[j+1]
array[j+1] = array[j]
array[j] = tmp
swapped = True
if not swapped:
break
foo_array = [9, 4, 3, 5, 1]
print_array(foo_array)
bubble_sort(foo_array)
print_array(foo_array)
|
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
size_s = len(s)
size_p = len(p)
counter1 = collections.defaultdict(int)
counter2 = collections.defaultdict(int)
ans = []
for c in p:
counter2[c] += 1
for c in s[:size_p-1]:
counter1[c] += 1
for i in range(size_p-1, size_s):
counter1[s[i]] += 1
if i - size_p >= 0:
counter1[s[i-size_p]] -= 1
if counter1[s[i-size_p]] == 0:
del counter1[s[i-size_p]]
if counter1 == counter2:
ans.append(i-size_p+1)
return ans
|
'''
We have an array A of integers, and an array queries of queries.
For the i-th query val = queries[i][0], index = queries[i][1], we add val to A[index]. Then, the answer to the i-th query is the sum of the even values of A.
(Here, the given index = queries[i][1] is a 0-based index, and each query permanently modifies the array A.)
Return the answer to all queries. Your answer array should have answer[i] as the answer to the i-th query.
Example 1:
Input: A = [1,2,3,4], queries = [[1,0],[-3,1],[-4,0],[2,3]]
Output: [8,6,2,4]
'''
class Solution(object):
def sumEvenAfterQueries(self, A, queries):
"""
:type A: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
result = 0
for val in A:
if val%2 == 0:
result += val
f_result = []
for val_index in queries:
val, index = val_index[0], val_index[1]
prev_val = A[index]
if prev_val%2 == 0:
result -= prev_val
new_val = prev_val + val
if new_val %2 == 0:
result += new_val
A[index] = new_val
f_result.append(result)
return f_result
|
'''
Created on Oct 3, 2015
@author: bcy-3
'''
|
app_name = "pusta2"
prefix_url = "pusta2"
static_files = {
'js': {
'pusta2/js/': ['main.js', ]
},
'css': {
'pusta2/css/': ['main.css', ]
},
'html': {
'pusta2/html/': ['index.html', ]
}
}
permissions = {
"edit": "Editing actualy nothing.",
"sample1": "sample1longversion",
}
|
"""This problem was asked by Facebook.
We have some historical clickstream data gathered from our site anonymously using cookies.
The histories contain URLs that users have visited in chronological order.
Write a function that takes two users' browsing histories as input and returns the longest
contiguous sequence of URLs that appear in both.
For example, given the following two users' histories:
user1 = ['/home', '/register', '/login', '/user', '/one', '/two']
user2 = ['/home', '/red', '/login', '/user', '/one', '/pink']
You should return the following:
['/login', '/user', '/one']
"""
|
#TODO: Complete os espaços em branco com uma possível solução para o problema.
X = int(input())
Y = int(input())
if (Y > X):
for i in range(X + 1, Y ):
if ( i % 5 == 2) or ( i % 5 == 3):
print(i)
elif (X > Y):
for i in range(Y + 1, X ):
if (i % 5 == 2) or ( i % 5 == 3):
print(i)
|
input = open('input.txt', 'r').read().split("\n")
preamble_length = 25
invalid = 0
for i in range(preamble_length, len(input)):
current = int(input[i])
found = False
for j in range(i - preamble_length, i):
for k in range (j + 1, i):
sum = int(input[j]) + int(input[k])
if sum == current:
found = True
if not found:
invalid = int(input[i])
print("No match found for " + input[i])
break
def find_sequence(search):
for i in range(0, len(input)) :
sum_window = []
sum = 0
for k in range(i, len(input)):
value = int(input[k])
sum += value
sum_window.append(value)
if sum == search:
return sum_window
if sum > search:
break
sequence = find_sequence(invalid)
min_val = min(sequence)
max_val = max(sequence)
print(sequence)
print("min = " + str(min_val))
print("max = " + str(max_val))
print("min + max = " + str(min_val + max_val))
|
x,y = map(float, input().split())
if (x == y == 0):
print("Origem")
elif (y == 0):
print("Eixo X")
elif (x == 0):
print("Eixo Y")
elif (x > 0) and (y > 0):
print("Q1")
elif (x < 0) and (y > 0):
print("Q2")
elif (x < 0) and (y < 0):
print("Q3")
elif (x > 0) and (y < 0):
print("Q4")
|
"""ROM methods."""
def read_all_rom(self) -> list:
"""
Return the values of all the locations of ROM.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM
The values of all the locations of ROM
"""
return self.ROM
def read_all_rom_ports(self) -> list:
"""
Return the values of all the ROM ports.
Parameters
----------
self : Processor, mandatory
The instance of the processor containing the registers, accumulator etc
Returns
-------
ROM_PORT
The values of all the ROM ports
"""
return self.ROM_PORT
|
class Point(object):
def __init__(self, x, y):
self._x = x
self._y = y
def get_x(self):
return self._x
def set_x(self, x):
self._x = x
def get_y(self):
return self._y
def set_y(self, y):
self._y = y
def euclidean_distance(a, b):
ax = a.get_x()
ay = a.get_y()
bx = b.get_x()
by = b.get_y()
dist = ((ax - bx) ** 2 + (ay - by) ** 2) ** 0.5
return dist
|
print("Height: ", end='')
while True:
height = input()
# check if int
try:
height = int(height)
except ValueError:
print("Retry: ", end='')
continue
# check if suitable value
if height >= 0 and height <= 23:
break
else:
print("Height: ", end='')
# draw pyramid
for i in range(height):
hashes = "#" * (i + 1)
line = hashes.rjust(height) + ' ' + hashes
print(line)
|
# In Search for the Lost Memory [Explorer Pirate + Jett] (3527)
recoveredMemory = 7081
kyrin = 1090000
sm.setSpeakerID(kyrin)
sm.sendNext("A stable position, with a calm demanor-- but I can tell you're hiding your explosive attacking abilities-- "
"you've become quite an impressive pirate, #h #. It's been a while.")
sm.sendSay("You used to be a kid that was scared of water-- and look at you now. "
"I knew you'd grow to a formidable pirate, but like this? I am thrilled to see you all grown up like this.")
sm.sendSay("What I can tell you is-- keep going. "
"As the person responsible for making you a pirate, I have no doubt in my mind that you still have room to grow-- "
"and that you will become an even more powerful force.")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, "1", False)
|
class Powerup:
def __init__(self, coord):
self.coord = coord
def use(self, player):
raise NotImplementedError
def ascii(self):
return "P"
|
languages = {
"c": "c",
"cpp": "cpp",
"cc": "cpp",
"cs": "csharp",
"java": "java",
"py": "python",
"rb": "ruby"
}
|
# Create database engine for data.db
engine = create_engine('sqlite:///data.db')
# Write query to get date, tmax, and tmin from weather
query = """
SELECT date,
tmax,
tmin
FROM weather;
"""
# Make a data frame by passing query and engine to read_sql()
temperatures = pd.read_sql(query, engine)
# View the resulting data frame
print(temperatures)
'''
script.py> output:
date tmax tmin
0 12/01/2017 52 42
1 12/02/2017 48 39
2 12/03/2017 48 42
3 12/04/2017 51 40
...
119 03/30/2018 62 44
120 03/31/2018 58 39
[121 rows x 3 columns]
Selecting columns is useful when you only want a few columns from a table.
If you want most of the columns, it may be easier to load them all and then use pandas to drop unwanted columns.
'''
|
#Arquivo que contem os parametros do jogo
quantidade_jogadores = 2 #quantidade de Jogadores
jogadores = [] #array que contem os jogadores(na ordem de jogo)
tamanho_tabuleiro = 40 #tamanho do array do tabuleiro (sempre multiplo de 4 para o tabuleiro ficar quadrado)
quantidade_dados = 2 #quantos dados serao usados
quantidade_reves = int(tamanho_tabuleiro/5) #quantos Sorte/Reves existirao no tabuleiro
dinheiro_inicial = 10000000 #dinheiro inicial de cada Jogador
jogadas_default = 1 #quantidade de Jogadas que cada jogador possui(pode alterarse dados forem iguais)
#cadeia e vai para cadeia devem ficar em cantos opostos do tabuleiro. Dividimos o tabuleiro em 4,
# e colocamos o vai para cadeia na primeira "esquina", e a cadeia na terceira esquina
pos_vai_para_cadeia = int(tamanho_tabuleiro/4) #Posicao da casa "vai para cadeia"
pos_Cadeia = int(pos_vai_para_cadeia * 3) #posicao da casa "cadeia"
contrucoes={
'1': 'Nada',
'2': 'Casa',
'3': 'Hotel'
}
possiveis_sorte = [
{"Ganhou na loteria!": "500"},
{"Foi promovido no emprego!": "1500"}
]
possiveis_reves = [
{"Perdeu o mindinho da mao esquerda": "500"},
{"Seu filho pegou Piolho": "50"},
{"Policia Apreendeu seus 15 hectares de maconha, por pouco nao foi preso!": "3500"}
]
|
s = 'azcbobobegghakl'
num = 0
for i in range(0, len(s) - 2):
if s[i] + s[i + 1] + s[i + 2] == 'bob':
num += 1
print('Number of times bob occurs is: ' + str(num))
|
# --- Day 14: Docking Data ---
# As your ferry approaches the sea port, the captain asks for your help again. The computer system that runs this port isn't compatible with the docking program on the ferry, so the docking parameters aren't being correctly initialized in the docking program's memory.
# After a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its initialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!
# The initialization program (your puzzle input) can either update the bitmask or write a value to memory. Values and memory addresses are both 36-bit unsigned integers. For example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.
# The bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the left and the least significant bit (2^0, that is, the 1s bit) on the right. The current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value unchanged.
# For example, consider the following program:
# mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# mem[8] = 11
# mem[7] = 101
# mem[8] = 0
# This program starts by specifying a bitmask (mask = ....). The mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s bit is overwritten with 1.
# The program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits, the mask is applied as follows:
# value: 000000000000000000000000000000001011 (decimal 11)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001001001 (decimal 73)
# So, because of the mask, the value 73 is written to memory address 8 instead. Then, the program tries to write 101 to address 7:
# value: 000000000000000000000000000001100101 (decimal 101)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001100101 (decimal 101)
# This time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set. Finally, the program tries to write 0 to address 8:
# value: 000000000000000000000000000000000000 (decimal 0)
# mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
# result: 000000000000000000000000000001000000 (decimal 64)
# 64 is written to address 8 instead, overwriting the value that was there previously.
# To initialize your ferry's docking program, you need the sum of all values left in memory after the initialization program completes. (The entire 36-bit address space begins initialized to the value 0 at every address.) In the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) - producing a sum of 165.
# Execute the initialization program. What is the sum of all values left in memory after it completes?
def fileInput():
f = open(inputFile, 'r')
with open(inputFile) as f:
read_data = f.read().split('\n')
f.close()
return read_data
def splitData(data):
dataLine = []
maxSize = 0
global mem
for line in data:
newLine = line.split(' = ')
if newLine[0] != 'mask':
newLine[0] = int(newLine[0].lstrip("mem[").rstrip("]"))
maxSize = max(maxSize,newLine[0]+1)
newLine[1] = f'{int(newLine[1]):036b}'
dataLine.append(newLine)
mem = [0 for x in range(maxSize)]
return dataLine
def processData(data):
global mask
global mem
global maskCount
for line in data:
if line[0] == 'mask':
mask = line[1]
maskCount = mask.count('X')
else:
line[1] = updateBits(mask,line[1])
mem[line[0]] = int(line[1],2)
def updateBits(mask,bits):
mask = [bit for bit in mask]
bits = [bit for bit in bits]
for i in range(36):
if mask[i] != 'X':
bits[i] = mask[i]
return ''.join(bits)
#///////////////////////////////////////////////////
inputFile = 'day14-input.txt'
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
maskCount = 0
mem = []
if __name__ == "__main__":
data = fileInput()
data = splitData(data)
processData(data)
print(sum(mem))
|
#!/usr/bin/env python3
class DNSMasq_DHCP_Generic_Switchable:
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
if self.value is None:
return self.name
elif self.value is not None:
return self.name + "=" + self.value
class DNSMasq_DHCP_Option:
def __init__(self, option, value):
scope = None
self.option = option
self.value = value
def __init__(self, scope, option, value):
self.scope = scope
self.option = option
self.value = value
def get_scope(self):
return self.scope
def get_option(self):
return self.option
def get_value(self):
return self.value
def get_comment(self):
if self.get_option() == "3":
return "# Default Gateway"
elif self.get_option() == "6":
return "# Default DNS"
elif self.get_option() == "42":
return "# Default NTP"
else:
return ""
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_option()))
res.append(str(self.get_value()))
return "dhcp-option=" + \
",".join(res) + \
" " + \
str(self.get_comment())
class DNSMasq_DHCP_Range:
def __init__(self, range_min, range_max, netmask, lease_time):
scope = None
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def __init__(self, scope, range_min, range_max, netmask, lease_time):
self.scope = scope
self.range_min = range_min
self.range_max = range_max
self.netmask = netmask
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_range_min(self):
return self.range_min
def get_range_max(self):
return self.range_max
def get_netmask(self):
return self.netmask
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_range_min()))
res.append(str(self.get_range_max()))
res.append(str(self.get_netmask()))
res.append(str(self.get_lease_time()))
return "dhcp-range=" + \
",".join(res)
class DNSMasq_DHCP_Host:
def __init__(self, mac_address, hostname, ip_address, lease_time):
scope = None
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def __init__(self, scope, mac_address, hostname, ip_address, lease_time):
self.scope = scope
self.mac_address = mac_address
self.hostname = hostname
self.ip_address = ip_address
self.lease_time = lease_time
def get_scope(self):
return self.scope
def get_mac_address(self):
return self.mac_address
def get_hostname(self):
return self.hostname
def get_ip_address(self):
return self.ip_address
def get_lease_time(self):
return self.lease_time
def __add__(self, o):
return self.get_str() + o
def __str__(self):
return self.get_str()
def get_str(self):
res = []
if self.get_scope() is not None:
res.append(str(self.get_scope()))
res.append(str(self.get_mac_address()))
res.append(str(self.get_hostname()))
res.append(str(self.get_ip_address()))
res.append(str(self.get_lease_time()))
return "dhcp-host=" + \
",".join(res)
class DNSMasq_DHCP_Section:
def __init__(self):
self.site = None
self.role = None
self.vlan_id = None
self.vlan_name = None
self.vrf_name = None
self.prefix = None
self.dhcp_options = []
self.dhcp_ranges = []
self.dhcp_hosts = []
def set_site(self, site):
self.site = site
def set_role(self, role):
self.role = role
def set_vlan_id(self, vlan_id):
self.vlan_id = vlan_id
def set_vlan_name(self, vlan_name):
self.vlan_name = vlan_name
def set_vrf_name(self, vrf_name):
self.vrf_name = vrf_name
def set_prefix(self, prefix):
self.prefix = prefix
def append_dhcp_option(self, dhcp_option):
self.dhcp_options.append(dhcp_option)
def append_dhcp_range(self, dhcp_range):
self.dhcp_ranges.append(dhcp_range)
def append_dhcp_host(self, dhcp_host):
self.dhcp_hosts.append(dhcp_host)
def get_header(self):
# Example
### Site: Home
### Role: Untagged
### Vlan: 66 (Home VLAN) with ID: 66
### VRF: vrf_66_homelan
### Prefix: 192.168.1.0/24
res = []
if self.site is not None:
res.append("### Site: " + self.site)
if self.role is not None:
res.append("### Role: " + self.role)
if self.vlan_id is not None and self.vlan_name is not None:
res.append("### Vlan: " + self.vlan_name + " with ID: " + str(self.vlan_id))
elif self.vlan_id is not None:
res.append("### Vlan ID: " + str(self.vlan_id))
elif self.vlan_name is not None:
res.append("### Vlan: " + self.vlan_name)
if self.vrf_name is not None:
res.append("### VRF: " + self.vrf_name)
if self.prefix is not None:
res.append("### Prefix: " + self.prefix)
return "\n".join(res)
def get_options(self):
return self.dhcp_options
def get_ranges(self):
return self.dhcp_ranges
def get_hosts(self):
return self.dhcp_hosts
class DNSMasq_DHCP_Config:
def __init__(self):
self.dhcp_config_generic_switches = []
self.dhcp_config_sections = []
def append_to_dhcp_config_generic_switches(self, obj):
self.dhcp_config_generic_switches.append(obj)
def append_to_dhcp_config_sections(self, obj):
self.dhcp_config_sections.append(obj)
def print(self):
print(self)
def __str__(self):
res = []
for sw in self.dhcp_config_generic_switches:
res.append(str(sw))
for sec in self.dhcp_config_sections:
res.append(str(""))
res.append(str(""))
res.append(str(sec.get_header()))
res.append(str(""))
for opts in sec.get_options():
res.append(str(opts))
res.append(str(""))
for ran in sec.get_ranges():
res.append(str(ran))
res.append(str(""))
for host in sec.get_hosts():
res.append(str(host))
return "\n".join(res)
|
df4 = pandas.read_csv('supermarkets-commas.txt')
df4
df5 = pandas.read_csv('supermarkets-semi-colons.txt',sep=';')
df5
|
class DummyScheduler(object):
def __init__(self, optimizer):
pass
def step(self):
pass
|
"""
@Time : 2018/7/15 20:19
@Author : 郭家兴
@Email : 302802003@qq.com
@File : secure.py
@Desc : 敏感项配置文件
"""
SECRET_KEY = 'X88d\S00DS9FL234SDF234\S00FS8DF$%^AS\X09DL\DFX00934'
DEBUG = False
HOST = '0.0.0.0'
PORT = '5005'
THREADED = True
LEVEL = 'DEBUG'
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 8 12:07:42 2019
@author: DiPu
"""
for i in range(1,6):
print("*"*i)
for j in range(4,0,-1):
print("*"*j)
|
a = [int(x) for x in input().split()]
aset = set()
for i in range(5):
for j in range(i+1, 5):
for k in range(j+1, 5):
aset.add(a[i] + a[j] + a[k])
print(sorted(aset, reverse=True)[2])
|
""" Store a person's name, and include some whitespace
characters at beginning and end of the name. Make sure you
use each character combination "\t" and "\n" at least one. """
name = ' James '
print(name.lstrip())
print(name.rstrip())
print(name.strip())
print('\tJames Noria')
print('Name:\nJames Noria')
|
def drive(start, end, step, parameters):
step_results = {
"P:sir.out.S": list(),
"P:sir.out.I": list(),
"P:sir.out.R": list(),
"P:sir.in.dt": list(),
}
S = parameters["P:sir.in.S"]
I = parameters["P:sir.in.I"]
R = parameters["P:sir.in.R"]
for i in range(start, end + 1, step):
(S, I, R) = sir(
S,
I,
R,
parameters["P:sir.in.beta"],
parameters["P:sir.in.gamma"],
step,
)
step_results["P:sir.out.S"].append(S)
step_results["P:sir.out.I"].append(I)
step_results["P:sir.out.R"].append(R)
step_results["P:sir.in.dt"].append(i)
return step_results
"""
Derived from the following:
********************************************************************************
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
********************************************************************************
subroutine sir(S, I, R, beta, gamma, dt)
implicit none
double precision S, I, R, beta, gamma, dt
double precision infected, recovered
infected = ((beta*S*I) / (S + I + R)) * dt
recovered = (gamma*I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
end subroutine sir
"""
def sir(S: float, I: float, R: float, beta: float, gamma: float, dt: float):
"""
! Input Variables:
! S Amount of susceptible members at the current timestep
! I Amount of infected members at the current timestep
! R Amount of recovered members at the current timestep
! beta Rate of transmission via contact
! gamma Rate of recovery from infection
! dt Next inter-event time
!
! State Variables:
! infected Increase in infected at the current timestep
! recovered Increase in recovered at the current timestep
"""
infected = ((beta * S * I) / (S + I + R)) * dt
recovered = (gamma * I) * dt
S = S - infected
I = I + infected - recovered
R = R + recovered
return (S, I, R)
|
class RequestParseError(Exception):
"""Error raised when the inbound request could not be parsed."""
pass
class AttachmentTooLargeError(Exception):
"""Error raised when an attachment is too large."""
def __init__(self, email, filename, size):
super(AttachmentTooLargeError, self)
self.email = email
self.filename = filename
self.size = size
class AuthenticationError(Exception):
"""Error raised when the request is not authenticated."""
pass
|
# Configuration file for interface "rpc". This interface is
# used in conjunction with RPC resource for cage-to-cage RPC calls.
#
# If location discovery at runtime is used (which is recommended),
# then all the cages that wish to share the same RPC "namespace" need
# identical broadcast ports, broadcast addresses that face the same
# subnet and the same flock_id, which is an arbitrary identifier around
# which all the related cages are grouped, same port broadcasts with
# different flock id will be ignored.
#
# The RPC listener is bound to a random port in specified range,
# which is later advertised at runtime to other cages. In case
# such broadcast advertisement are forbidden an exact port number
# can be specified, as a positive number (vs. negative for range).
# In this case other cages will likely have an entry in
# config_resource_rpc.py exact_locations parameter specifying this
# cage's address.
#
# There is no need to make a copy of this file for each cage,
# but you may need to modify the broadcast_address parameter
# if your OS doesn't work with 255.255.255.255 broadcasts,
# for example, under FreeBSD change it to something like
# "192.168.0.1/192.168.255.255".
config = dict \
(
protocol = "rpc", # meta
random_port = -63000, # tcp, negative means "in range 63000..63999"
max_connections = 100, # tcp
broadcast_address = ("0.0.0.0/255.255.255.255", 12480), # rpc, "interface address/broadcast address", port
ssl_ciphers = None, # ssl, optional str
ssl_protocol = None, # ssl, optional "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" or "TLS"
flock_id = "DEFAULT", # rpc
marshaling_methods = ("msgpack", "pickle"), # rpc, allowed marshaling methods
max_packet_size = 1048576, # rpc, maximum allowed request/response size in bytes
)
# DO NOT TOUCH BELOW THIS LINE
__all__ = [ "get", "copy" ]
get = lambda key, default = None: pmnc.config.get_(config, {}, key, default)
copy = lambda: pmnc.config.copy_(config, {})
# EOF
|
EXAMPLE_TWEETS = [
"Trump for President!!! #MAGA",
"Trump is the best ever!",
"RT @someuser: Trump is, by far, the best POTUS in history. \n\nBonus: He^s friggin^ awesome!\n\nTrump gave Pelosi and the Dems the ultimate\u2026 ",
"If Clinton is elected, I'm moving to Canada",
"Trump is doing a great job so far. Keep it up man.",
"He is awesome, make american great again. Democrats is taking off. We love democrats.",
"This tweet is about basketball, and I'm watching tonight on CBS Sports",
"Hillary for President!!! #StrongerTogether",
"Trump is the worst ever!",
"If Trump is elected, I'm moving to Canada",
"RT @MotherJones: A scientist who resisted Trump administration censorship of climate report just lost her job",
"Trump is doing a terrible job so far. Vote him out ASAP."
]
|
"""
1. Use for position param, variable params, keyword argument
"""
def test(a, b, *args, m=1, n=2):
print(a)
print(b)
print(args)
print(m)
print(n)
test(1, 2, 3, 4, 5)
print()
test(1, 2, 3, 4, 5, m=10, n=20)
print()
"""
1. Use **kwargs for dict on keyword arguments
"""
def foo(**kwargs):
print(kwargs)
foo(a=10, b=20)
print()
def test2(a, b, *args, m=1, n=2, **kwargs):
print(a)
print(b)
print(args)
print(m)
print(n)
print(kwargs)
###############################################
# 1. position args, *args, keyword args, dict args
test2(1, 2, 3, 4, 5, m=10, n=20, x=100, y=200)
print()
def test3(m, **kwargs):
print(m)
print(kwargs)
test3(2, x=12, y=24)
|
test_item = {
"stac_version": "1.0.0",
"stac_extensions": [],
"type": "Feature",
"id": "20201211_223832_CS2",
"bbox": [
172.91173669923782,
1.3438851951615003,
172.95469614953714,
1.3690476620161975
],
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
172.91173669923782,
1.3438851951615003
],
[
172.95469614953714,
1.3438851951615003
],
[
172.95469614953714,
1.3690476620161975
],
[
172.91173669923782,
1.3690476620161975
],
[
172.91173669923782,
1.3438851951615003
]
]
]
},
"properties": {
"datetime": "2020-12-11T22:38:32.125000Z"
},
"collection": "simple-collection",
"links": [
{
"rel": "collection",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
},
{
"rel": "root",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
},
{
"rel": "parent",
"href": "./collection.json",
"type": "application/json",
"title": "Simple Example Collection"
}
],
"assets": {
"visual": {
"href": "https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.tif",
"type": "image/tiff; application=geotiff; profile=cloud-optimized",
"title": "3-Band Visual",
"roles": [
"visual"
]
},
"thumbnail": {
"href": "https://storage.googleapis.com/open-cogs/stac-examples/20201211_223832_CS2.jpg",
"title": "Thumbnail",
"type": "image/jpeg",
"roles": [
"thumbnail"
]
}
}
}
|
# output: ok
assert([x for x in ()] == [])
assert([x for x in range(0, 3)] == [0, 1, 2])
assert([(x, y) for x in range(0, 2) for y in range(2, 4)] ==
[(0, 2), (0, 3), (1, 2), (1, 3)])
assert([x for x in range(0, 3) if x >= 1] == [1, 2])
def inc(x):
return x + 1
assert([inc(y) for y in (1, 2, 3)] == [2, 3, 4])
a = 1
assert([a for y in (1, 2, 3)] == [1, 1, 1])
assert([(lambda x: x * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
assert([(lambda x: y * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
print('ok')
|
#
# PySNMP MIB module ASCEND-MIBVDSLNET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBVDSLNET-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:28:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Counter64, Integer32, Bits, TimeTicks, ObjectIdentity, Unsigned32, iso, NotificationType, MibIdentifier, ModuleIdentity, Gauge32, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "Integer32", "Bits", "TimeTicks", "ObjectIdentity", "Unsigned32", "iso", "NotificationType", "MibIdentifier", "ModuleIdentity", "Gauge32", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
mibvdslNetworkProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 9))
mibvdslNetworkProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 9, 1), )
if mibBuilder.loadTexts: mibvdslNetworkProfileTable.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslNetworkProfileTable.setDescription('A list of mibvdslNetworkProfile profile entries.')
mibvdslNetworkProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1), ).setIndexNames((0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Shelf-o"), (0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Slot-o"), (0, "ASCEND-MIBVDSLNET-MIB", "vdslNetworkProfile-Item-o"))
if mibBuilder.loadTexts: mibvdslNetworkProfileEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mibvdslNetworkProfileEntry.setDescription('A mibvdslNetworkProfile entry containing objects that maps to the parameters of mibvdslNetworkProfile profile.')
vdslNetworkProfile_Shelf_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 1), Integer32()).setLabel("vdslNetworkProfile-Shelf-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Shelf_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Shelf_o.setDescription('')
vdslNetworkProfile_Slot_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 2), Integer32()).setLabel("vdslNetworkProfile-Slot-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Slot_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Slot_o.setDescription('')
vdslNetworkProfile_Item_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 3), Integer32()).setLabel("vdslNetworkProfile-Item-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: vdslNetworkProfile_Item_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Item_o.setDescription('')
vdslNetworkProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 4), DisplayString()).setLabel("vdslNetworkProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Name.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Name.setDescription('For future use. The current design does not use the name field but instead references Vdsl lines by the physical address; we may in the future support referencing Vdsl lines by name as well as by address. The name consists of a null terminated ascii string supplied by the user; it defaults to the ascii form of the Vdsl line physical address.')
vdslNetworkProfile_PhysicalAddress_Shelf = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("anyShelf", 1), ("shelf1", 2), ("shelf2", 3), ("shelf3", 4), ("shelf4", 5), ("shelf5", 6), ("shelf6", 7), ("shelf7", 8), ("shelf8", 9), ("shelf9", 10)))).setLabel("vdslNetworkProfile-PhysicalAddress-Shelf").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Shelf.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Shelf.setDescription('The number of the shelf that the addressed physical device resides on.')
vdslNetworkProfile_PhysicalAddress_Slot = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 55, 56, 57, 58, 49, 50, 42, 53, 54, 45, 46, 51, 59))).clone(namedValues=NamedValues(("anySlot", 1), ("slot1", 2), ("slot2", 3), ("slot3", 4), ("slot4", 5), ("slot5", 6), ("slot6", 7), ("slot7", 8), ("slot8", 9), ("slot9", 10), ("slot10", 11), ("slot11", 12), ("slot12", 13), ("slot13", 14), ("slot14", 15), ("slot15", 16), ("slot16", 17), ("slot17", 18), ("slot18", 19), ("slot19", 20), ("slot20", 21), ("slot21", 22), ("slot22", 23), ("slot23", 24), ("slot24", 25), ("slot25", 26), ("slot26", 27), ("slot27", 28), ("slot28", 29), ("slot29", 30), ("slot30", 31), ("slot31", 32), ("slot32", 33), ("slot33", 34), ("slot34", 35), ("slot35", 36), ("slot36", 37), ("slot37", 38), ("slot38", 39), ("slot39", 40), ("slot40", 41), ("aLim", 55), ("bLim", 56), ("cLim", 57), ("dLim", 58), ("leftController", 49), ("rightController", 50), ("controller", 42), ("firstControlModule", 53), ("secondControlModule", 54), ("trunkModule1", 45), ("trunkModule2", 46), ("controlModule", 51), ("slotPrimary", 59)))).setLabel("vdslNetworkProfile-PhysicalAddress-Slot").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Slot.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_Slot.setDescription('The number of the slot that the addressed physical device resides on.')
vdslNetworkProfile_PhysicalAddress_ItemNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 13), Integer32()).setLabel("vdslNetworkProfile-PhysicalAddress-ItemNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_ItemNumber.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_PhysicalAddress_ItemNumber.setDescription('A number that specifies an addressable entity within the context of shelf and slot.')
vdslNetworkProfile_Enabled = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("vdslNetworkProfile-Enabled").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Enabled.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Enabled.setDescription('TRUE if the line is enabled, otherwise FALSE.')
vdslNetworkProfile_SparingMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("manual", 2), ("automatic", 3)))).setLabel("vdslNetworkProfile-SparingMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_SparingMode.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_SparingMode.setDescription('Port sparing operational mode for this port.')
vdslNetworkProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("systemDefined", 1), ("no", 2), ("yes", 3)))).setLabel("vdslNetworkProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_IgnoreLineup.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_IgnoreLineup.setDescription('Ignore line up value for this port.')
vdslNetworkProfile_LineConfig_NailedGroup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 19), Integer32()).setLabel("vdslNetworkProfile-LineConfig-NailedGroup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_NailedGroup.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_NailedGroup.setDescription('A number that identifies the this unique physical DSL line.')
vdslNetworkProfile_LineConfig_VpSwitchingVpi = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 20), Integer32()).setLabel("vdslNetworkProfile-LineConfig-VpSwitchingVpi").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_VpSwitchingVpi.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_VpSwitchingVpi.setDescription('The Vpi to be used for the VP switching. Rest of the VPIs within valid vpi-vci-range will be used for the VC switching. Changes in this range will take effect immediately. THE USER SHOULD BE VERY CAREFUL WHILE CHANGING THIS VALUE BECAUSE ALL CONNECTIONS ON THE LIM WHERE THIS PORT BELONGS WILL BE DROPPED IN ORDER TO MAKE THIS NEW VALUE EFFECTIVE IMMEDIATELY.')
vdslNetworkProfile_LineConfig_UpStreamFixedRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("n-1206667", 1), ("n-965333", 2), ("n-1930667", 3), ("n-3861333", 4)))).setLabel("vdslNetworkProfile-LineConfig-UpStreamFixedRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_UpStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_UpStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Up Stream range: 0.965Mbps - 3.861Mbps.')
vdslNetworkProfile_LineConfig_DownStreamFixedRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("n-1206667", 1), ("n-11463333", 2), ("n-15626333", 3), ("n-19306667", 4)))).setLabel("vdslNetworkProfile-LineConfig-DownStreamFixedRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_DownStreamFixedRate.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_DownStreamFixedRate.setDescription('The following Up/Down stream rate relationships are supported: (0.965Mbps/19.306Mbps); (1.930Mbps/11.463Mbps); (3.861Mbps/11.463Mbps); (3.861Mbps/15.626Mbps). Down Stream range: 11.463Mbps - 15.626Mbps.')
vdslNetworkProfile_LineConfig_ConfigLoopback = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("digital", 2), ("analog", 3)))).setLabel("vdslNetworkProfile-LineConfig-ConfigLoopback").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_ConfigLoopback.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_ConfigLoopback.setDescription('Configuration of different modem loopbacks.')
vdslNetworkProfile_LineConfig_PsdValue = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("n-53dbm", 1), ("n-60dbm", 2)))).setLabel("vdslNetworkProfile-LineConfig-PsdValue").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_PsdValue.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_PsdValue.setDescription('Configuration of PSD parameter. It defines the power that is allowed to be sent to the line.')
vdslNetworkProfile_LineConfig_LinkStatecmd = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(16, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("autoConnectCmd", 16), ("disconnectState", 1), ("connectState", 2), ("quietState", 3), ("idleReqState", 4), ("backToServState", 5), ("changeIdleParamState", 6), ("changeWarmStartParamState", 7), ("changeCurrentParamState", 8)))).setLabel("vdslNetworkProfile-LineConfig-LinkStatecmd").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_LinkStatecmd.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_LineConfig_LinkStatecmd.setDescription('Sets the link connect state. Use this to control status of the VDSL link connect state machine. The auto-connect-cmd will train modem up to the final service. All the other commands are used to manualy operate the VDSL link connect state machine.')
vdslNetworkProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 9, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("vdslNetworkProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: vdslNetworkProfile_Action_o.setStatus('mandatory')
if mibBuilder.loadTexts: vdslNetworkProfile_Action_o.setDescription('')
mibBuilder.exportSymbols("ASCEND-MIBVDSLNET-MIB", vdslNetworkProfile_Slot_o=vdslNetworkProfile_Slot_o, vdslNetworkProfile_Name=vdslNetworkProfile_Name, vdslNetworkProfile_LineConfig_LinkStatecmd=vdslNetworkProfile_LineConfig_LinkStatecmd, vdslNetworkProfile_LineConfig_VpSwitchingVpi=vdslNetworkProfile_LineConfig_VpSwitchingVpi, vdslNetworkProfile_PhysicalAddress_Slot=vdslNetworkProfile_PhysicalAddress_Slot, vdslNetworkProfile_PhysicalAddress_Shelf=vdslNetworkProfile_PhysicalAddress_Shelf, mibvdslNetworkProfileTable=mibvdslNetworkProfileTable, vdslNetworkProfile_IgnoreLineup=vdslNetworkProfile_IgnoreLineup, vdslNetworkProfile_SparingMode=vdslNetworkProfile_SparingMode, vdslNetworkProfile_PhysicalAddress_ItemNumber=vdslNetworkProfile_PhysicalAddress_ItemNumber, vdslNetworkProfile_LineConfig_PsdValue=vdslNetworkProfile_LineConfig_PsdValue, vdslNetworkProfile_LineConfig_DownStreamFixedRate=vdslNetworkProfile_LineConfig_DownStreamFixedRate, vdslNetworkProfile_Enabled=vdslNetworkProfile_Enabled, vdslNetworkProfile_LineConfig_NailedGroup=vdslNetworkProfile_LineConfig_NailedGroup, DisplayString=DisplayString, vdslNetworkProfile_Action_o=vdslNetworkProfile_Action_o, vdslNetworkProfile_Shelf_o=vdslNetworkProfile_Shelf_o, mibvdslNetworkProfile=mibvdslNetworkProfile, mibvdslNetworkProfileEntry=mibvdslNetworkProfileEntry, vdslNetworkProfile_Item_o=vdslNetworkProfile_Item_o, vdslNetworkProfile_LineConfig_UpStreamFixedRate=vdslNetworkProfile_LineConfig_UpStreamFixedRate, vdslNetworkProfile_LineConfig_ConfigLoopback=vdslNetworkProfile_LineConfig_ConfigLoopback)
|
def __init__(self):
self.meetings = []
def book(self, start: int, end: int) -> bool:
for s, e in self.meetings:
if s < end and start < e:
return False
self.meetings.append([start, end])
return True
|
# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
#
# The number of elements initialized in nums1 and nums2 are m and n respectively.
# You may assume that nums1 has enough space (size that is equal to m + n) to hold additional elements from nums2.
# Source - https://leetcode.com/problems/merge-sorted-array/
# 2 pointers approach
class Solution:
def merge(self, nums1, m: int, nums2, n: int):
i = m - 1
j = n - 1
k = m + n - 1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
if j >= 0:
nums1[:k + 1] = nums2[:j + 1]
print(nums1)
#
# Time complexity : O(m+n)log(m+n)
# Space complexity : O(1)
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
s = Solution().merge(nums1, m, nums2, n)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Advent of Code 2020, day five."""
INPUT_FILE = 'data/day_05.txt'
def main() -> None:
"""Identify missing ticket."""
with open(INPUT_FILE, encoding='utf-8') as input_file:
tkt = sorted([int(x.strip().replace('F', '0').replace('B', '1')
.replace('L', '0').replace('R', '1'), 2)
for x in input_file])
print(f'Part One: Highest Seat Id: {tkt[-1]}')
# Finding the missing ticket.
# Using triangular numbers to get sum from 0 to last seat
# Removing sum from 0 to seat before first one.
sum_all = (tkt[-1] * (tkt[-1] + 1) - (tkt[0] - 1) * tkt[0]) // 2
missing_ticket = sum_all - sum(tkt)
print(f'Part Two: Missing Seat Id: {missing_ticket}')
main()
|
"""Python3 Code to solve problem 1253: Reconstruct a 2-Row Binary Matrix. """
class Solution(object):
def reconstructMatrix(self, upper: int, lower: int, colsum: list) -> list:
zero_col = set()
two_col = set()
col_num = len(colsum)
for col_id, col_sum in enumerate(colsum):
if col_sum == 0:
zero_col.add(col_id)
elif col_sum == 2:
two_col.add(col_id)
one_col_num = col_num - len(zero_col) - len(two_col)
one_col_upper_num = upper - len(two_col)
one_col_lower_num = lower - len(two_col)
if (one_col_upper_num < 0
or one_col_lower_num < 0
or one_col_upper_num + one_col_lower_num != one_col_num):
return []
result = [[0] * col_num for _ in range(2)]
one_added_upper_num = 0
for i in range(col_num):
if i in zero_col:
continue
elif i in two_col:
result[0][i] = 1
result[1][i] = 1
elif one_added_upper_num < one_col_upper_num:
result[0][i] = 1
one_added_upper_num += 1
else:
result[1][i] = 1
return result
|
"""
You're given a substring s of some cyclic string.
What's the length of the smallest possible string that can be concatenated to itself many times to obtain this cyclic string?
Example
For s = "cabca", the output should be
cyclicString(s) = 3.
"cabca" is a substring of a cycle string "abcabcabcabc..." that can be obtained by concatenating "abc" to itself.
Thus, the answer is 3.
"""
def cyclicString(s1):
for answer in range(1, len(s1)):
correct = True
for position in range(answer, len(s1)):
if s1[position] != s1[position - answer]:
correct = False
if correct:
return answer
return len(s1)
|
# Example of mutual recursion with even/odd.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
def is_even(n):
if n == 0:
return True
else:
return is_odd(n - 1)
def is_odd(n):
if n == 0:
return False
else:
return is_even(n - 1)
def is_even_thunked(n):
if n == 0:
return True
else:
return lambda: is_odd_thunked(n - 1)
def is_odd_thunked(n):
if n == 0:
return False
else:
return lambda: is_even_thunked(n - 1)
def trampoline(f, *args):
v = f(*args)
while callable(v):
v = v()
return v
if __name__ == '__main__':
print(is_even(800))
# If I try to run is_even(1000) with the default system recursion limit, it
# blows up. But trampolining keeps the stack depth constant and small.
print(trampoline(is_even_thunked, 1000))
|
X = {}
print(5 in X)
print(X[4])
print(X[5])
|
# -*- coding: utf-8 -*-
# Author: Tonio Teran <tonio@stateoftheart.ai>
# Copyright: Stateoftheart AI PBC 2021.
'''NEAR AI's library wrapper.
Dataset information taken from:
'''
SOURCE_METADATA = {
'name': 'nearai',
'original_name': 'NEAR Program Synthesis',
'url': 'https://github.com/nearai/program_synthesis'
}
DATASETS = {'Program Synthesis': ['AlgoLisp', 'Karel', 'NAPS']}
def load_dataset(name: str) -> dict:
return {'name': name, 'source': 'nearai'}
|
#!/usr/bin/python
inp = input(">> ")
print(inp)
count = 0
while True:
print("hi")
count += 1
if count == 3:
break
|
class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ''
zip_strs = zip(*strs)
for i, letter_group in enumerate(zip_strs):
if len(set(letter_group)) > 1:
return strs[0][:i] # return as there are letter not equal
return min(strs) # return as all letters are equal
|
#VERSION: 1.0
INFO = {"example":("test","This is an example mod")}
RLTS = {"cls":(),"funcs":("echo"),"vars":()}
def test(cmd):
echo(0,cmd)
|
queries = [
"""SELECT * WHERE { ?s ?p ?o }""",
"""SELECT ?point ?point_type WHERE {
?point rdf:type brick:Point .
?point rdf:type ?point_type
}""",
"SELECT ?meter WHERE { ?meter rdf:type brick:Green_Button_Meter }",
""" SELECT ?t WHERE { ?t rdf:type brick:Weather_Temperature_Sensor }""",
"""SELECT ?sensor WHERE {
?sensor rdf:type brick:Zone_Air_Temperature_Sensor .
?sensor brick:isPointOf ?equip
}""",
"""SELECT ?sp WHERE {
?sp rdf:type brick:Zone_Air_Temperature_Setpoint .
?sp brick:isPointOf ?equip
}""",
"SELECT ?meter WHERE { ?meter rdf:type brick:Building_Electric_Meter }",
"SELECT ?point WHERE { ?point rdf:type brick:Occupancy_Sensor }",
"""SELECT ?tstat ?room ?zone ?state ?temp ?hsp ?csp WHERE {
?tstat brick:hasLocation ?room .
?zone brick:hasPart ?room .
?tstat brick:hasPoint ?state .
?tstat brick:hasPoint ?temp .
?tstat brick:hasPoint ?hsp .
?tstat brick:hasPoint ?csp .
?zone rdf:type brick:Zone .
?tstat rdf:type brick:Thermostat .
?state rdf:type brick:Thermostat_Status .
?temp rdf:type brick:Temperature_Sensor .
?hsp rdf:type brick:Supply_Air_Temperature_Heating_Setpoint .
?csp rdf:type brick:Supply_Air_Temperature_Cooling_Setpoint
}
""",
"""SELECT ?sensor ?sp ?equip WHERE {
?sensor rdf:type brick:Air_Flow_Sensor .
?sp rdf:type brick:Air_Flow_Setpoint .
?sensor brick:isPointOf ?equip .
?sp brick:isPointOf ?equip
}""",
"""SELECT ?cooling_point ?heating_point ?ahu WHERE {
?cooling_point rdf:type brick:Cooling_Valve_Command .
?heating_point rdf:type brick:Heating_Valve_Command .
?ahu brick:hasPoint ?cooling_point .
?ahu brick:hasPoint ?heating_point
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:isFedBy ?ahu .
?ahu brick:hasPoint ?upstream_ta .
?equip brick:hasPoint ?dnstream_ta .
?upstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?dnstream_ta rdf:type brick:Supply_Air_Temperature_Sensor .
?equip brick:hasPoint ?vlv .
?vlv rdf:type brick:Valve_Command
}""",
"""SELECT * WHERE {
?equip rdf:type brick:VAV .
?equip brick:hasPoint ?air_flow .
?air_flow rdf:type brick:Supply_Air_Flow_Sensor
}""",
"""SELECT * WHERE {
?vlv rdf:type brick:Valve_Command .
?vlv rdf:type ?vlv_type .
?equip brick:hasPoint ?vlv .
?equip rdf:type brick:Air_Handling_Unit .
?air_temps rdf:type brick:Supply_Air_Temperature_Sensor .
?equip brick:hasPoint ?air_temps .
?air_temps rdf:type ?temp_type
}""",
"""SELECT * WHERE {
?vlv rdf:type brick:Valve_Command .
?vlv rdf:type ?vlv_type .
?equip brick:hasPoint ?vlv .
?equip rdf:type brick:Air_Handling_Unit .
?air_temps rdf:type brick:Return_Air_Temperature_Sensor .
?equip brick:hasPoint ?air_temps .
?air_temps rdf:type ?temp_type
}""",
"""SELECT ?vav WHERE {
?vav rdf:type brick:VAV
}""",
#"""SELECT DISTINCT ?sensor ?room
#WHERE {
#
# ?sensor rdf:type brick:Zone_Temperature_Sensor .
# ?room rdf:type brick:Room .
# ?vav rdf:type brick:VAV .
# ?zone rdf:type brick:HVAC_Zone .
#
# ?vav brick:feeds+ ?zone .
# ?zone brick:hasPart ?room .
#
# {?sensor brick:isPointOf ?vav }
# UNION
# {?sensor brick:isPointOf ?room }
#}""",
"""SELECT ?sensor ?room WHERE {
?sensor rdf:type brick:Zone_Temperature_Sensor .
?room rdf:type brick:Room .
?vav rdf:type brick:VAV .
?zone rdf:type brick:HVAC_Zone .
?vav brick:feeds+ ?zone .
?zone brick:hasPart ?room .
?vav brick:hasPoint ?sensor
}""",
# """SELECT ?vlv_cmd ?vav
# WHERE {
# { ?vlv_cmd rdf:type brick:Reheat_Valve_Command }
# UNION
# { ?vlv_cmd rdf:type brick:Cooling_Valve_Command }
# ?vav rdf:type brick:VAV .
# ?vav brick:hasPoint+ ?vlv_cmd .
# }""",
"""SELECT ?floor ?room ?zone WHERE {
?floor rdf:type brick:Floor .
?room rdf:type brick:Room .
?zone rdf:type brick:HVAC_Zone .
?room brick:isPartOf+ ?floor .
?room brick:isPartOf+ ?zone
}""",
]
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA == headB:
return headA
headA_start = headA
headB_start = headB
len_A = 0
len_B = 0
while headA:
headA = headA.next
len_A += 1
while headB:
headB = headB.next
len_B += 1
headA = headA_start
headB = headB_start
if len_A > len_B:
diff = len_A - len_B
for i in range(diff):
headA = headA.next
else:
diff = len_B - len_A
for i in range(diff):
headB = headB.next
if headA == headB:
return headA
while headA != headB:
if headA.next and headB.next and headA.next == headB.next:
return headA.next
elif headA == headB:
return headA
headA = headA.next
headB = headB.next
return None
|
#so sai quando escrever sair
nome = str(input("Escreva nome :(sair para terminar)"))
while nome != "sair":
nome = str(input("Escreva nome: (sair para terminar)"))
|
def heap_sort(l: list):
# flag is init
def sort(num: int, node: int, flag: bool):
# print(str(node) +' '+str(num))
if num == len(l) - 1:
return
if node < 0:
l[0], l[-(num) - 1] = l[-(num) - 1], l[0] #swap topest
num += 1
node = 0
if node * 2 + 1 < len(l) - num:
if l[node] < l[node * 2 + 1]:
l[node], l[node * 2 + 1] = l[node * 2 + 1], l[node]
sort(num, node * 2 + 1, False)
if node * 2 + 2 < len(l) - num:
if l[node] < l[node * 2 + 2]:
l[node], l[node * 2 + 2] = l[node * 2 + 2], l[node]
sort(num, node * 2 + 2, False)
if flag:
sort(num, node - 1, True)
sort(0, int(len(l) / 2) - 1, True) #last non-leaf
if __name__ == "__main__":
l = [6, 1, 17, 4, 20, 15, 33, 10, 194, 54, 99, 1004, 5, 477]
heap_sort(l)
print(l)
|
# directories where to look for duplicates
dir_list = [
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\catalog\product",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\multishopifystoremageconnect",
r"C:\Users\Gamer\Documents\Projekte\azzap\azzap-docker-dev\data\html\pub\media\import\mpmultishopifystoremageconnect",
]
|
""" User assistance/help texts """
AGGREGATE_DATA = """
Here you can see an overview and aggregated data over all batches. Please note that only \
the data from instances that are part of experimental batches is considered here, not data from instances that \
have been started in between batches.
"""
CUSTOMER_CATEGORIES_INPUT = "e.g. public-gov"
DEFAULT_VERSION_INPUT = "The version that is used in between batches"
MIN_DURATION_INPUT = """
Please enter the minimum and maximum duration of the past process data (old version) here. This data \
is used to calculate the reward of new instances more reliably.
"""
DETAILED_DATA = """
Here you can see the details of each and every instance that has been part of an experimental batch.
"""
BATCH_NUMBER_CHOICE = "Number 1 means the first batch set for a process, number 2 means the second, and so on"
BATCH_SIZE_HELP = """
Here you can choose how many of the next incoming process instantiation requests will be part of this experimental \
batch.
"""
HISTORY_UPLOAD_DEFAULT = """
Should be a .json file with this content format:
{
"interarrivalTime": 0.98,
"durations": [
0.198,
0.041,
0.124,
0.04,
0.099,
0.144
]
}
"""
CONTROLS_HELP = "This is where the human process expert can control the experiment."
DEV_MODE_HELP = """
If you are just using the app for development purposes or to try it out \
you can simulate process instantiation requests instead of having real requests from customers/clients. \
An additional client simulator area will pop up in the dashboard if you activate dev mode.
"""
EXPERIMENT_METADATA = "Here, you can find useful metadata about the running experiment."
COOL_OFF_DETAILED = """
*Q: What is Cool-Off/the Cool-Off period?* \n
A: When you want to conclude the experiment, there might still be some long running, \
unevaluated process instances. Since it is important to take them into account for your \
final decision, we have implemented the cool off period. This makes sure, that all the \
instances that were part of any experimental batch are finished and have been evaluated and taken \
into account before we present the final proposal and you make the final decision.
"""
EXPERIMENTAL_INSTANCE = """
'Experimental instance' refers to an instance that has been started as part of a batch. It is called like that,
since only the instances that have been started as part of a batch are evaluated and part of the experiment.
"""
MANUAL_TRIGGER_FETCH_LEARN = """
Trigger polling of process engine
for instance data and learning with that data
(this will also happen automatically periodically, at about every n-th incoming instantiation request - with n being
half of the average batch size)
"""
SUBMIT_CHOICE_BUTTON = """
After submitting the choice/final decision, all incoming instantiation requests for the process will be
routed in accordance with this decision
"""
|
print("NFC West W L T")
print("-----------------------")
print("Seattle 13 3 0")
print("San Francisco 12 4 0")
print("Arizona 10 6 0")
print("St. Louis 7 9 0\n")
print("NFC North W L T")
print("-----------------------")
print("Green Bay 8 7 1")
print("Chicago 8 8 0")
print("Detroit 7 9 0")
print("Minnesota 5 10 1")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.