content
stringlengths 5
1.05M
|
|---|
import fcntl
import time
import os
class Mutex(object):
NetmaxLockI2C_handle = None
def __init__(self, debug = False):
self.mutex_debug = debug
self.NetmaxLockI2C_handle_filename = '/run/lock/NetmaxLockI2C'
self.NetmaxOverallMutex_filename = '/run/lock/NetmaxOS_overall_mutex'
try:
open(self.NetmaxLockI2C_handle_filename, 'w')
if os.path.isfile(self.NetmaxLockI2C_handle_filename):
os.chmod(self.NetmaxLockI2C_handle_filename, 0o777)
except Exception as e:
pass
def acquire(self):
if self.mutex_debug:
print("I2C mutex acquire")
acquired = False
while not acquired:
try:
self.NetmaxLockI2C_handle = open(self.NetmaxLockI2C_handle_filename, 'w')
# lock
fcntl.lockf(self.NetmaxLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
acquired = True
except IOError: # already locked by a different process
time.sleep(0.001)
except Exception as e:
print(e)
if self.mutex_debug:
print("I2C mutex acquired {}".format(time.time()))
def release(self):
if self.mutex_debug:
print("I2C mutex release: {}".format(time.time()))
if self.NetmaxLockI2C_handle is not None and self.NetmaxLockI2C_handle is not True:
self.NetmaxLockI2C_handle.close()
self.NetmaxLockI2C_handle = None
time.sleep(0.001)
def enableDebug(self):
self.mutex_debug = True
def disableDebug(self):
self.mutex_debug = False
def set_overall_mutex(self):
try:
self.overall_mutex_handle = open(self.NetmaxOverallMutex_filename, 'w')
# debating whether we want to open up control of this file to any other process,
# or if control should be limited to the process that started it.
# For now, open it up and let's see.
os.chmod(self.NetmaxOverallMutex_filename, 0o777)
except Exception as e:
print(e)
pass
def release_overall_mutex(self):
try:
self.overall_mutex_handle.close()
os.remove(self.NetmaxOverallMutex_filename)
except:
pass
def overall_mutex(self):
if os.path.isfile(self.NetmaxOverallMutex_filename):
return True
else:
return False
def __enter__(self):
if self.mutex_debug:
print("I2C mutex enter")
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.mutex_debug:
print("I2C mutex exit")
self.release()
|
class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
N = len(nums)
h = [(-nums[0],0)]
for i in range(1,N):
while h[0][1] < i - k:
heappop(h)
max_so_far = h[0][0]
heappush(h, (max_so_far - nums[i], i))
if i == N -1:
return -(max_so_far - nums[i])
return nums[0]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 15.10.2019
import decimal
import hashlib
import json
import typing
from boltons.iterutils import windowed_iter
import ijson
import pandas as pd
import numpy as np
from scipy.spatial.distance import cosine
from sourmash import SourmashSignature
from sourmash._lowlevel import ffi, lib
from sourmash.utils import RustObject, rustcall, decode_str
from goetia import __version__
class DraffSignature:
def __init__(self, sketch, name='', filename='', license='CC0',
W=None, K=None, version=__version__):
if not isinstance(sketch, np.ndarray):
self._sketch = sketch.to_numpy()
else:
self._sketch = sketch
if name:
self._name = name
if filename:
self._filename = filename
self.version = version
self.license = license
self.W = sketch.W if W is None else W
self.K = sketch.K if K is None else K
def md5sum(self):
return hashlib.md5(self._sketch.data).hexdigest()
def similarity(self, other, metric=cosine):
return metric(self.sketch, other.sketch)
def name(self):
if self._name:
return self._name
if self._filename:
return self._filename
else:
return self.md5sum()[:8]
@property
def sketch(self):
return self._sketch
@property
def size(self):
return len(self._sketch)
def to_dict(self):
return {'sketch': self._sketch.tolist(),
'W': self.W,
'K': self.K,
'size': self.size,
'name': self.name(),
'version': self.version,
'license': self.license,
'md5sum': self.md5sum()}
@classmethod
def from_dict(cls, data):
sig = cls(np.array(data['sketch']),
name = data['name'],
license = data['license'],
version = data['version'],
W = int(data['W']),
K = int(data['K']))
return sig
def save(self, stream: typing.TextIO) -> None:
"""Save the signature to disk.
Args:
stream (file): File handle to save to.
name (str): Name of the signature.
"""
data = [self.to_dict()]
json.dump(data, stream)
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
def load_draff_stream(fp):
'''Iteratively parser a JSON file of draff
signatures from the given file.
Args:
fp (file): File handle to parse from.
'''
backend = ijson.get_backend('yajl2')
for signature in backend.items(fp, 'item'):
yield DraffSignature.from_dict(signature)
def load_sourmash_stream(fp):
'''Iteratively parse a JSON file of sourmash
signatures from the given file.
Args:
fp (file): File handle to parse from.
'''
backend = ijson.get_backend('yajl2')
for signature in backend.items(fp, 'item'):
data = json.dumps([signature], cls=DecimalEncoder).encode('utf-8')
size = ffi.new("uintptr_t *")
ptr = rustcall(lib.signatures_load_buffer, data, len(data), False, 0, ffi.NULL, size)
size = ffi.unpack(size, 1)[0]
sigs = []
for i in range(size):
sigs.append(SourmashSignature._from_objptr(ptr[i]))
yield sigs[0]
def distance(sig_a, sig_b, metric=cosine):
if isinstance(sig_a, SourmashSignature):
return sig_a.similarity(sig_b)
elif isinstance(sig_a, DraffSignature):
return sig_a.similarity(sig_b, metric=metric)
else:
raise TypeError(f'Not a support signature type: {type(sig_a)}.')
def find_rolling_distances(sigs, dmetrics=[cosine], window_sizes = [2,4,6,8,10]):
'''
'''
max_window = max(window_sizes)
window_sizes.sort()
times, distances, freqs, metrics = [], [], [], []
window_freqs = {}
for i, window in enumerate(windowed_iter(sigs, max_window)):
for sub_window_size in window_sizes:
sig_a, sig_b = window[0], window[sub_window_size - 1]
t_a = int(sig_a.name().split(':')[1])
t_b = int(sig_b.name().split(':')[1])
if i == 0:
freq = round(t_b - t_a, -4)
window_freqs[sub_window_size] = freq
else:
freq = window_freqs[sub_window_size]
for metric in dmetrics:
times.append(int(t_b))
distances.append(distance(sig_a, sig_b, metric=metric))
metrics.append(metric.__name__)
freqs.append(int(freq))
#print(window[0], window[sub_window_size - 1])
for sub_window_size in window_sizes[:-1]:
for sub_window in windowed_iter(window[1:], sub_window_size):
#print(sub_window[0], sub_window[-1])
sig_a, sig_b = sub_window[0], sub_window[-1]
t_a = int(sig_a.name().split(':')[1])
t_b = int(sig_b.name().split(':')[1])
for metric in dmetrics:
times.append(int(t_b))
distances.append(distance(sig_a, sig_b, metric=metric))
metrics.append(metric.__name__)
freqs.append(window_freqs[sub_window_size])
df = pd.DataFrame({'time': times,
'distance': distances,
'freq': freqs,
'metric': metrics})
df['time_norm'] = df['time'] / df['time'].max()
return df
def find_distances_from_ref(sigs, ref_sig, dmetrics=[cosine], cutoff=1.0):
times, distances, metrics = [], [], []
for sig in sigs:
sample_t = int(sig.name().split(':')[1])
for metric in dmetrics:
times.append(sample_t)
distances.append(distance(sig, ref_sig, metric=metric))
metrics.append(metric.__name__)
df = pd.DataFrame({'time': times,
'distance': distances,
'metric': metrics})
df['time_norm'] = df['time'] / df['time'].max()
return df
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Check input for quantum neural network."""
from collections.abc import Iterable
from mindquantum.circuit import Circuit
def _check_circuit(circuit, msg):
if not isinstance(circuit, Circuit):
raise TypeError("{} requires a quantum circuit, but get {}!".format(
msg, type(circuit)))
def _check_non_parameterized_circuit(circuit: Circuit):
if not isinstance(circuit, Circuit):
raise TypeError(
"Requires a non parameterized quantum circuit, but get {}!".format(
type(circuit)))
for g in circuit:
if g.isparameter:
raise ValueError(
"Requires a non parameterized quantum circuit, but {} is parameterized gate!"
.format(g))
def _check_type_or_iterable_type(inputs, require, msg):
if not isinstance(inputs, Iterable):
if not isinstance(inputs, require):
raise TypeError(
"{msg} requires {req} or several {req}s, but get {inp}!".
format(msg=msg, req=require, inp=type(inputs)))
else:
for inp in inputs:
if not isinstance(inp, require):
raise TypeError(
"{msg} requires {req} or several {req}s, but {inp} is not {req}!"
.format(msg=msg, req=require, inp=inp))
def _check_list_of_string(inputs, msg):
if not isinstance(inputs, list):
raise TypeError("{} requires a list of string, but get {}!".format(
msg, type(inputs)))
for inp in inputs:
if not isinstance(inp, str):
raise TypeError(
"{} requires a list of string, but {} is not string.".format(
msg, inp))
def _check_parameters_of_circuit(encoder_params_names, ansatz_params_names,
circuit: Circuit):
_check_list_of_string(encoder_params_names, 'Encoder parameter names')
_check_list_of_string(ansatz_params_names, 'Ansatz parameter names')
all_names = []
all_names.extend(encoder_params_names)
all_names.extend(ansatz_params_names)
circ_names = circuit.para_name
if not set(all_names) == set(circ_names):
raise ValueError(
"Parameter names you input not match with parameters in circuit.")
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
# import multiprocessing
import pandas as pd
import numpy as np
from activitysim.core import tracing
from activitysim.core import config
from activitysim.core import pipeline
from activitysim.core import simulate
from activitysim.core import inject
from activitysim.core import mem
from activitysim.core import expressions
from activitysim.core import los
from activitysim.core import logit
from activitysim.core.interaction_sample_simulate import interaction_sample_simulate
from activitysim.core.interaction_sample import interaction_sample
from .util import logsums as logsum
from .util import estimation
from .util import tour_destination
from activitysim.abm.tables import shadow_pricing
"""
The school/workplace location model predicts the zones in which various people will
work or attend school.
For locations choices like workplace and school location, we have existing data about the actual
number of workers or students in the various destination zones, and we naturally want the results
of location choice to yield distributions the match these observed distributions as closely as
possible. To achieve this, we use start with size tables with the observed populations by zone
and segment (e.g. number of university, highschool, and gradeschool students in each zone) and
use those populations as attractors (positive utilities) so that high population zones will,
all things being equal, receive more choices. (For instance, we want university-goers to choose
school locations with in zones with university enrollments.)
But since the choice algorithm can result in aggregate distributions of choices (modeled_size)
that don't match observed (predicted_size) counts. The shadow pricing algorithm attempts to
correct these misalignments, by iteratively running the choice model, comparing the modeled_size
of the zones segments to the predicted size, and computing a shadow_price coefficient that is
applied to the size term to boost or attenuate its influence. This iterative process can be
configures to continue until a specified closeness of fit is achieved, or a maximum number of
iterations has occurred. Since the iterative process can be expensive, a facility is provided
to save the computed shadow prices after every iteration, and to load pre-computed shadow prices
on subsequent runs (warm start) to cut down on runtimes.
Since every individual (always person for now) belongs to at most one segment, each segment
(e.g. 'university', 'highschool' , 'gradeschool' for the 'school' location model) is handled
separately and sequentially withing each shadow-price iteration.
The core algorithm has 3 parts:
Because logsum calculations are expensive, rather than computing logsums for all destination
alternatives, we first build a sample of alternatives using simplified (no-logsum) utilities,
and compute logsums only for that sample, and finally chose from among the sampled alternatives.
* run_location_sample - Build a sample destination alternatives using simplified choice criteria
* run_location_logsums - Compute logsums for travel to those alternatives
* run_location_simulate - Rerun the choice model using the logsums to make a final location choice
With shadow pricing, and iterative treatment of each segment, the structure of the code is:
::
repeat
for each segment
run_location_sample
run_location_logsums
run_location_simulate
until convergence
"""
logger = logging.getLogger(__name__)
# column name of logsum in df returned by run_location_logsums (here because used in more than one place)
ALT_LOGSUM = 'mode_choice_logsum'
def write_estimation_specs(estimator, model_settings, settings_file):
"""
write sample_spec, spec, and coefficients to estimation data bundle
Parameters
----------
model_settings
settings_file
"""
estimator.write_model_settings(model_settings, settings_file)
# estimator.write_spec(model_settings, tag='SAMPLE_SPEC')
estimator.write_spec(model_settings, tag='SPEC')
estimator.write_coefficients(model_settings=model_settings)
estimator.write_table(inject.get_injectable('size_terms'), 'size_terms', append=False)
estimator.write_table(inject.get_table('land_use').to_frame(), 'landuse', append=False)
def _location_sample(
segment_name,
choosers,
alternatives,
skims,
estimator,
model_settings,
alt_dest_col_name,
chunk_size, chunk_tag,
trace_label):
"""
select a sample of alternative locations.
Logsum calculations are expensive, so we build a table of persons * all zones
and then select a sample subset of potential locations
The sample subset is generated by making multiple choices (<sample_size> number of choices)
which results in sample containing up to <sample_size> choices for each choose (e.g. person)
and a pick_count indicating how many times that choice was selected for that chooser.)
person_id, dest_zone_id, rand, pick_count
23750, 14, 0.565502716034, 4
23750, 16, 0.711135838871, 6
...
23751, 12, 0.408038878552, 1
23751, 14, 0.972732479292, 2
"""
assert not choosers.empty
logger.info("Running %s with %d persons" % (trace_label, len(choosers.index)))
sample_size = model_settings["SAMPLE_SIZE"]
if config.setting('disable_destination_sampling', False) or (estimator and estimator.want_unsampled_alternatives):
# FIXME interaction_sample will return unsampled complete alternatives with probs and pick_count
logger.info("Estimation mode for %s using unsampled alternatives short_circuit_choices" % (trace_label,))
sample_size = 0
locals_d = {
'skims': skims,
'segment_size': segment_name
}
constants = config.get_model_constants(model_settings)
locals_d.update(constants)
spec = simulate.spec_for_segment(model_settings, spec_id='SAMPLE_SPEC',
segment_name=segment_name, estimator=estimator)
# here since presumably we want this when called for either sample or presample
log_alt_losers = config.setting('log_alt_losers', False)
choices = interaction_sample(
choosers,
alternatives,
spec=spec,
sample_size=sample_size,
alt_col_name=alt_dest_col_name,
log_alt_losers=log_alt_losers,
skims=skims,
locals_d=locals_d,
chunk_size=chunk_size,
chunk_tag=chunk_tag,
trace_label=trace_label)
return choices
def location_sample(
segment_name,
persons_merged,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size, chunk_tag,
trace_label):
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = persons_merged[chooser_columns]
# create wrapper with keys for this lookup - in this case there is a home_zone_id in the choosers
# and a zone_id in the alternatives which get merged during interaction
# (logit.interaction_dataset suffixes duplicate chooser column with '_chooser')
# the skims will be available under the name "skims" for any @ expressions
skim_dict = network_los.get_default_skim_dict()
skims = skim_dict.wrap('home_zone_id', 'zone_id')
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
choices = _location_sample(
segment_name,
choosers,
dest_size_terms,
skims,
estimator,
model_settings,
alt_dest_col_name,
chunk_size, chunk_tag,
trace_label)
return choices
DEST_TAZ = 'dest_TAZ'
HOME_TAZ = 'TAZ'
HOME_MAZ = 'home_zone_id'
DEST_MAZ = 'dest_MAZ'
def aggregate_size_terms(dest_size_terms, network_los):
#
# aggregate MAZ_size_terms to TAZ_size_terms
#
MAZ_size_terms = dest_size_terms.copy()
# add crosswalk DEST_TAZ column to MAZ_size_terms
maz_to_taz = network_los.maz_taz_df[['MAZ', 'TAZ']].set_index('MAZ').sort_values(by='TAZ').TAZ
MAZ_size_terms[DEST_TAZ] = MAZ_size_terms.index.map(maz_to_taz)
weighted_average_cols = ['shadow_price_size_term_adjustment', 'shadow_price_utility_adjustment']
for c in weighted_average_cols:
MAZ_size_terms[c] *= MAZ_size_terms['size_term'] # weighted average
TAZ_size_terms = MAZ_size_terms.groupby(DEST_TAZ).agg(
{'size_term': 'sum',
'shadow_price_size_term_adjustment': 'sum',
'shadow_price_utility_adjustment': 'sum'})
for c in weighted_average_cols:
TAZ_size_terms[c] /= TAZ_size_terms['size_term'] # weighted average
if TAZ_size_terms.isna().any(axis=None):
logger.warning(f"TAZ_size_terms with NAN values\n{TAZ_size_terms[TAZ_size_terms.isna().any(axis=1)]}")
assert not TAZ_size_terms.isna(axis=None).any()
# print(f"TAZ_size_terms\n{TAZ_size_terms}")
# size_term shadow_price_size_term_adjustment shadow_price_utility_adjustment
# dest_TAZ
# 2 1.419 1.0 0
# 3 20.511 1.0 0
# 4 19.737 1.0 0
MAZ_size_terms = MAZ_size_terms[[DEST_TAZ, 'size_term']].reset_index(drop=False)
MAZ_size_terms = MAZ_size_terms.sort_values([DEST_TAZ, 'zone_id']).reset_index(drop=True)
# print(f"MAZ_size_terms\n{MAZ_size_terms}")
# zone_id dest_TAZ size_term
# 0 106097 2 0.774
# 1 124251 2 0.258
# 2 124252 2 0.387
# 3 106165 3 5.031
return MAZ_size_terms, TAZ_size_terms
def location_presample(
segment_name,
persons_merged,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size, chunk_tag,
trace_label):
trace_label = tracing.extend_trace_label(trace_label, 'presample')
logger.info(f"{trace_label} location_presample")
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
assert DEST_TAZ != alt_dest_col_name
MAZ_size_terms, TAZ_size_terms = aggregate_size_terms(dest_size_terms, network_los)
# convert MAZ zone_id to 'TAZ' in choosers (persons_merged)
# persons_merged[HOME_TAZ] = persons_merged[HOME_MAZ].map(maz_to_taz)
assert HOME_MAZ in persons_merged
assert HOME_TAZ in persons_merged # 'TAZ' should already be in persons_merged from land_use
# FIXME - MEMORY HACK - only include columns actually used in spec
# FIXME we don't actually require that land_use provide a TAZ crosswalk
# FIXME maybe we should add it for multi-zone (from maz_taz) if missing?
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
chooser_columns = [HOME_TAZ if c == HOME_MAZ else c for c in chooser_columns]
choosers = persons_merged[chooser_columns]
# create wrapper with keys for this lookup - in this case there is a HOME_TAZ in the choosers
# and a DEST_TAZ in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skim_dict = network_los.get_skim_dict('taz')
skims = skim_dict.wrap(HOME_TAZ, DEST_TAZ)
taz_sample = _location_sample(
segment_name,
choosers,
TAZ_size_terms,
skims,
estimator,
model_settings,
DEST_TAZ,
chunk_size, chunk_tag,
trace_label)
# print(f"taz_sample\n{taz_sample}")
# dest_TAZ prob pick_count
# person_id
# 55227 7 0.009827 1
# 55227 10 0.000656 1
# 55227 18 0.014871 1
# 55227 20 0.035548 3
# choose a MAZ for each DEST_TAZ choice, choice probability based on MAZ size_term fraction of TAZ total
maz_choices = tour_destination.choose_MAZ_for_TAZ(taz_sample, MAZ_size_terms, trace_label)
assert DEST_MAZ in maz_choices
maz_choices = maz_choices.rename(columns={DEST_MAZ: alt_dest_col_name})
return maz_choices
def run_location_sample(
segment_name,
persons_merged,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size, chunk_tag,
trace_label):
"""
select a sample of alternative locations.
Logsum calculations are expensive, so we build a table of persons * all zones
and then select a sample subset of potential locations
The sample subset is generated by making multiple choices (<sample_size> number of choices)
which results in sample containing up to <sample_size> choices for each choose (e.g. person)
and a pick_count indicating how many times that choice was selected for that chooser.)
person_id, dest_zone_id, rand, pick_count
23750, 14, 0.565502716034, 4
23750, 16, 0.711135838871, 6
...
23751, 12, 0.408038878552, 1
23751, 14, 0.972732479292, 2
"""
logger.debug(f"dropping {(~(dest_size_terms.size_term > 0)).sum()} "
f"of {len(dest_size_terms)} rows where size_term is zero")
dest_size_terms = dest_size_terms[dest_size_terms.size_term > 0]
# by default, enable presampling for multizone systems, unless they disable it in settings file
pre_sample_taz = not (network_los.zone_system == los.ONE_ZONE)
if pre_sample_taz and not config.setting('want_dest_choice_presampling', True):
pre_sample_taz = False
logger.info(f"Disabled destination zone presampling for {trace_label} "
f"because 'want_dest_choice_presampling' setting is False")
if pre_sample_taz:
logger.info("Running %s location_presample with %d persons" % (trace_label, len(persons_merged)))
choices = location_presample(
segment_name,
persons_merged,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size,
chunk_tag=f'{chunk_tag}.presample',
trace_label=trace_label)
else:
choices = location_sample(
segment_name,
persons_merged,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size,
chunk_tag=f'{chunk_tag}.sample',
trace_label=trace_label)
return choices
def run_location_logsums(
segment_name,
persons_merged_df,
network_los,
location_sample_df,
model_settings,
chunk_size, chunk_tag,
trace_label):
"""
add logsum column to existing location_sample table
logsum is calculated by running the mode_choice model for each sample (person, dest_zone_id) pair
in location_sample, and computing the logsum of all the utilities
+-----------+--------------+----------------+------------+----------------+
| PERID | dest_zone_id | rand | pick_count | logsum (added) |
+===========+==============+================+============+================+
| 23750 | 14 | 0.565502716034 | 4 | 1.85659498857 |
+-----------+--------------+----------------+------------+----------------+
+ 23750 | 16 | 0.711135838871 | 6 | 1.92315598631 |
+-----------+--------------+----------------+------------+----------------+
+ ... | | | | |
+-----------+--------------+----------------+------------+----------------+
| 23751 | 12 | 0.408038878552 | 1 | 2.40612135416 |
+-----------+--------------+----------------+------------+----------------+
| 23751 | 14 | 0.972732479292 | 2 | 1.44009018355 |
+-----------+--------------+----------------+------------+----------------+
"""
assert not location_sample_df.empty
logsum_settings = config.read_model_settings(model_settings['LOGSUM_SETTINGS'])
# FIXME - MEMORY HACK - only include columns actually used in spec
persons_merged_df = \
logsum.filter_chooser_columns(persons_merged_df, logsum_settings, model_settings)
logger.info("Running %s with %s rows" % (trace_label, len(location_sample_df.index)))
choosers = location_sample_df.join(persons_merged_df, how='left')
tour_purpose = model_settings['LOGSUM_TOUR_PURPOSE']
if isinstance(tour_purpose, dict):
tour_purpose = tour_purpose[segment_name]
logsums = logsum.compute_logsums(
choosers,
tour_purpose,
logsum_settings, model_settings,
network_los,
chunk_size,
chunk_tag,
trace_label)
# "add_column series should have an index matching the table to which it is being added"
# when the index has duplicates, however, in the special case that the series index exactly
# matches the table index, then the series value order is preserved
# logsums now does, since workplace_location_sample was on left side of merge de-dup merge
location_sample_df[ALT_LOGSUM] = logsums
return location_sample_df
def run_location_simulate(
segment_name,
persons_merged,
location_sample_df,
network_los,
dest_size_terms,
want_logsums,
estimator,
model_settings,
chunk_size, chunk_tag,
trace_label):
"""
run location model on location_sample annotated with mode_choice logsum
to select a dest zone from sample alternatives
Returns
-------
choices : pandas.DataFrame indexed by persons_merged_df.index
choice : location choices (zone ids)
logsum : float logsum of choice utilities across alternatives
logsums optional & only returned if DEST_CHOICE_LOGSUM_COLUMN_NAME specified in model_settings
"""
assert not persons_merged.empty
# FIXME - MEMORY HACK - only include columns actually used in spec
chooser_columns = model_settings['SIMULATE_CHOOSER_COLUMNS']
choosers = persons_merged[chooser_columns]
alt_dest_col_name = model_settings['ALT_DEST_COL_NAME']
# alternatives are pre-sampled and annotated with logsums and pick_count
# but we have to merge additional alt columns into alt sample list
alternatives = \
pd.merge(location_sample_df, dest_size_terms,
left_on=alt_dest_col_name, right_index=True, how="left")
logger.info("Running %s with %d persons" % (trace_label, len(choosers)))
# create wrapper with keys for this lookup - in this case there is a home_zone_id in the choosers
# and a zone_id in the alternatives which get merged during interaction
# the skims will be available under the name "skims" for any @ expressions
skim_dict = network_los.get_default_skim_dict()
skims = skim_dict.wrap('home_zone_id', alt_dest_col_name)
locals_d = {
'skims': skims,
'segment_size': segment_name
}
constants = config.get_model_constants(model_settings)
if constants is not None:
locals_d.update(constants)
if estimator:
# write choosers after annotation
estimator.write_choosers(choosers)
estimator.set_alt_id(alt_dest_col_name)
estimator.write_interaction_sample_alternatives(alternatives)
spec = simulate.spec_for_segment(model_settings, spec_id='SPEC', segment_name=segment_name, estimator=estimator)
log_alt_losers = config.setting('log_alt_losers', False)
choices = interaction_sample_simulate(
choosers,
alternatives,
spec=spec,
choice_column=alt_dest_col_name,
log_alt_losers=log_alt_losers,
want_logsums=want_logsums,
skims=skims,
locals_d=locals_d,
chunk_size=chunk_size, chunk_tag=chunk_tag,
trace_label=trace_label,
trace_choice_name=model_settings['DEST_CHOICE_COLUMN_NAME'],
estimator=estimator)
if not want_logsums:
# for consistency, always return a dataframe with canonical column name
assert isinstance(choices, pd.Series)
choices = choices.to_frame('choice')
assert isinstance(choices, pd.DataFrame)
return choices
def run_location_choice(
persons_merged_df,
network_los,
shadow_price_calculator,
want_logsums,
want_sample_table,
estimator,
model_settings,
chunk_size, chunk_tag,
trace_hh_id, trace_label
):
"""
Run the three-part location choice algorithm to generate a location choice for each chooser
Handle the various segments separately and in turn for simplicity of expression files
Parameters
----------
persons_merged_df : pandas.DataFrame
persons table merged with households and land_use
network_los : los.Network_LOS
shadow_price_calculator : ShadowPriceCalculator
to get size terms
want_logsums : boolean
want_sample_table : boolean
estimator: Estimator object
model_settings : dict
chunk_size : int
trace_hh_id : int
trace_label : str
Returns
-------
choices : pandas.DataFrame indexed by persons_merged_df.index
'choice' : location choices (zone ids)
'logsum' : float logsum of choice utilities across alternatives
logsums optional & only returned if DEST_CHOICE_LOGSUM_COLUMN_NAME specified in model_settings
"""
chooser_segment_column = model_settings['CHOOSER_SEGMENT_COLUMN_NAME']
# maps segment names to compact (integer) ids
segment_ids = model_settings['SEGMENT_IDS']
choices_list = []
sample_list = []
for segment_name, segment_id in segment_ids.items():
choosers = persons_merged_df[persons_merged_df[chooser_segment_column] == segment_id]
# size_term and shadow price adjustment - one row per zone
dest_size_terms = shadow_price_calculator.dest_size_terms(segment_name)
assert dest_size_terms.index.is_monotonic_increasing, \
f"shadow_price_calculator.dest_size_terms({segment_name}) not monotonic_increasing"
if choosers.shape[0] == 0:
logger.info(f"{trace_label} skipping segment {segment_name}: no choosers")
continue
# - location_sample
location_sample_df = \
run_location_sample(
segment_name,
choosers,
network_los,
dest_size_terms,
estimator,
model_settings,
chunk_size,
chunk_tag, # run_location_sample will add appropriate suffix for sample or presample
trace_label=tracing.extend_trace_label(trace_label, 'sample.%s' % segment_name))
# - location_logsums
location_sample_df = \
run_location_logsums(
segment_name,
choosers,
network_los,
location_sample_df,
model_settings,
chunk_size, chunk_tag=f'{chunk_tag}.logsums',
trace_label=tracing.extend_trace_label(trace_label, 'logsums.%s' % segment_name))
# - location_simulate
choices_df = \
run_location_simulate(
segment_name,
choosers,
location_sample_df,
network_los,
dest_size_terms,
want_logsums,
estimator,
model_settings,
chunk_size, chunk_tag=f'{chunk_tag}.simulate',
trace_label=tracing.extend_trace_label(trace_label, 'simulate.%s' % segment_name))
if estimator:
if trace_hh_id:
estimation_trace_label = \
tracing.extend_trace_label(trace_label, f'estimation.{segment_name}.modeled_choices')
tracing.trace_df(choices_df, label=estimation_trace_label)
estimator.write_choices(choices_df.choice)
choices_df.choice = estimator.get_survey_values(choices_df.choice, 'persons',
column_names=model_settings['DEST_CHOICE_COLUMN_NAME'])
estimator.write_override_choices(choices_df.choice)
if want_logsums:
# if we override choices, we need to to replace choice logsum with ologsim for override location
# fortunately, as long as we aren't sampling dest alts, the logsum will be in location_sample_df
# if we start sampling dest alts, we will need code below to compute override location logsum
assert estimator.want_unsampled_alternatives
# merge mode_choice_logsum for the overridden location
# alt_logsums columns: ['person_id', 'choice', 'logsum']
alt_dest_col = model_settings['ALT_DEST_COL_NAME']
alt_logsums = \
location_sample_df[[alt_dest_col, ALT_LOGSUM]]\
.rename(columns={alt_dest_col: 'choice', ALT_LOGSUM: 'logsum'})\
.reset_index()
# choices_df columns: ['person_id', 'choice']
choices_df = choices_df[['choice']].reset_index()
# choices_df columns: ['person_id', 'choice', 'logsum']
choices_df = pd.merge(choices_df, alt_logsums, how='left').set_index('person_id')
logger.debug(f"{trace_label} segment {segment_name} estimation: override logsums")
if trace_hh_id:
estimation_trace_label = \
tracing.extend_trace_label(trace_label, f'estimation.{segment_name}.survey_choices')
tracing.trace_df(choices_df, estimation_trace_label)
choices_list.append(choices_df)
if want_sample_table:
# FIXME - sample_table
location_sample_df.set_index(model_settings['ALT_DEST_COL_NAME'],
append=True, inplace=True)
sample_list.append(location_sample_df)
else:
# del this so we dont hold active reference to it while run_location_sample is creating its replacement
del location_sample_df
if len(choices_list) > 0:
choices_df = pd.concat(choices_list)
else:
# this will only happen with small samples (e.g. singleton) with no (e.g.) school segs
logger.warning("%s no choices", trace_label)
choices_df = pd.DataFrame(columns=['choice', 'logsum'])
if len(sample_list) > 0:
save_sample_df = pd.concat(sample_list)
else:
# this could happen either with small samples as above, or if no saved sample desired
save_sample_df = None
return choices_df, save_sample_df
def iterate_location_choice(
model_settings,
persons_merged, persons, households,
network_los,
estimator,
chunk_size, trace_hh_id, locutor,
trace_label):
"""
iterate run_location_choice updating shadow pricing until convergence criteria satisfied
or max_iterations reached.
(If use_shadow_pricing not enabled, then just iterate once)
Parameters
----------
model_settings : dict
persons_merged : injected table
persons : injected table
network_los : los.Network_LOS
chunk_size : int
trace_hh_id : int
locutor : bool
whether this process is the privileged logger of shadow_pricing when multiprocessing
trace_label : str
Returns
-------
adds choice column model_settings['DEST_CHOICE_COLUMN_NAME']
adds logsum column model_settings['DEST_CHOICE_LOGSUM_COLUMN_NAME']- if provided
adds annotations to persons table
"""
chunk_tag = trace_label
# boolean to filter out persons not needing location modeling (e.g. is_worker, is_student)
chooser_filter_column = model_settings['CHOOSER_FILTER_COLUMN_NAME']
dest_choice_column_name = model_settings['DEST_CHOICE_COLUMN_NAME']
logsum_column_name = model_settings.get('DEST_CHOICE_LOGSUM_COLUMN_NAME')
sample_table_name = model_settings.get('DEST_CHOICE_SAMPLE_TABLE_NAME')
want_sample_table = config.setting('want_dest_choice_sample_tables') and sample_table_name is not None
persons_merged_df = persons_merged.to_frame()
persons_merged_df = persons_merged_df[persons_merged[chooser_filter_column]]
persons_merged_df.sort_index(inplace=True) # interaction_sample expects chooser index to be monotonic increasing
# chooser segmentation allows different sets coefficients for e.g. different income_segments or tour_types
chooser_segment_column = model_settings['CHOOSER_SEGMENT_COLUMN_NAME']
assert chooser_segment_column in persons_merged_df, \
f"CHOOSER_SEGMENT_COLUMN '{chooser_segment_column}' not in persons_merged table."
spc = shadow_pricing.load_shadow_price_calculator(model_settings)
max_iterations = spc.max_iterations
assert not (spc.use_shadow_pricing and estimator)
logger.debug("%s max_iterations: %s" % (trace_label, max_iterations))
for iteration in range(1, max_iterations + 1):
if spc.use_shadow_pricing and iteration > 1:
spc.update_shadow_prices()
choices_df, save_sample_df = run_location_choice(
persons_merged_df,
network_los,
shadow_price_calculator=spc,
want_logsums=logsum_column_name is not None,
want_sample_table=want_sample_table,
estimator=estimator,
model_settings=model_settings,
chunk_size=chunk_size, chunk_tag=chunk_tag,
trace_hh_id=trace_hh_id,
trace_label=tracing.extend_trace_label(trace_label, 'i%s' % iteration))
# choices_df is a pandas DataFrame with columns 'choice' and (optionally) 'logsum'
if choices_df is None:
break
spc.set_choices(
choices=choices_df['choice'],
segment_ids=persons_merged_df[chooser_segment_column].reindex(choices_df.index))
if locutor:
spc.write_trace_files(iteration)
if spc.use_shadow_pricing and spc.check_fit(iteration):
logging.info("%s converged after iteration %s" % (trace_label, iteration,))
break
# - shadow price table
if locutor:
if spc.use_shadow_pricing and 'SHADOW_PRICE_TABLE' in model_settings:
inject.add_table(model_settings['SHADOW_PRICE_TABLE'], spc.shadow_prices)
if 'MODELED_SIZE_TABLE' in model_settings:
inject.add_table(model_settings['MODELED_SIZE_TABLE'], spc.modeled_size)
persons_df = persons.to_frame()
# add the choice values to the dest_choice_column in persons dataframe
# We only chose school locations for the subset of persons who go to school
# so we backfill the empty choices with -1 to code as no school location
# names for location choice and (optional) logsums columns
NO_DEST_ZONE = -1
persons_df[dest_choice_column_name] = \
choices_df['choice'].reindex(persons_df.index).fillna(NO_DEST_ZONE).astype(int)
# add the dest_choice_logsum column to persons dataframe
if logsum_column_name:
persons_df[logsum_column_name] = \
choices_df['logsum'].reindex(persons_df.index).astype('float')
if save_sample_df is not None:
# might be None for tiny samples even if sample_table_name was specified
assert len(save_sample_df.index.get_level_values(0).unique()) == len(choices_df)
# lest they try to put school and workplace samples into the same table
if pipeline.is_table(sample_table_name):
raise RuntimeError("dest choice sample table %s already exists" % sample_table_name)
pipeline.extend_table(sample_table_name, save_sample_df)
# - annotate persons table
if 'annotate_persons' in model_settings:
expressions.assign_columns(
df=persons_df,
model_settings=model_settings.get('annotate_persons'),
trace_label=tracing.extend_trace_label(trace_label, 'annotate_persons'))
pipeline.replace_table("persons", persons_df)
if trace_hh_id:
tracing.trace_df(persons_df,
label=trace_label,
warn_if_empty=True)
# - annotate households table
if 'annotate_households' in model_settings:
households_df = households.to_frame()
expressions.assign_columns(
df=households_df,
model_settings=model_settings.get('annotate_households'),
trace_label=tracing.extend_trace_label(trace_label, 'annotate_households'))
pipeline.replace_table("households", households_df)
if trace_hh_id:
tracing.trace_df(households_df,
label=trace_label,
warn_if_empty=True)
if logsum_column_name:
tracing.print_summary(logsum_column_name, choices_df['logsum'], value_counts=True)
return persons_df
@inject.step()
def workplace_location(
persons_merged, persons, households,
network_los,
chunk_size, trace_hh_id, locutor):
"""
workplace location choice model
iterate_location_choice adds location choice column and annotations to persons table
"""
trace_label = 'workplace_location'
model_settings = config.read_model_settings('workplace_location.yaml')
estimator = estimation.manager.begin_estimation('workplace_location')
if estimator:
write_estimation_specs(estimator, model_settings, 'workplace_location.yaml')
# FIXME - debugging code to test multiprocessing failure handling
# process_name = multiprocessing.current_process().name
# if multiprocessing.current_process().name =='mp_households_0':
# raise RuntimeError(f"fake fail {process_name}")
iterate_location_choice(
model_settings,
persons_merged, persons, households,
network_los,
estimator,
chunk_size, trace_hh_id, locutor, trace_label
)
if estimator:
estimator.end_estimation()
@inject.step()
def school_location(
persons_merged, persons, households,
network_los,
chunk_size, trace_hh_id, locutor
):
"""
School location choice model
iterate_location_choice adds location choice column and annotations to persons table
"""
trace_label = 'school_location'
model_settings = config.read_model_settings('school_location.yaml')
estimator = estimation.manager.begin_estimation('school_location')
if estimator:
write_estimation_specs(estimator, model_settings, 'school_location.yaml')
iterate_location_choice(
model_settings,
persons_merged, persons, households,
network_los,
estimator,
chunk_size, trace_hh_id, locutor, trace_label
)
if estimator:
estimator.end_estimation()
|
import webbrowser
class Movie():
def __init__(self, movie_title, movie_poster, movie_storyline,
movie_trailer_url):
'''Created a structure in order to store the movie details.
self :- examples or instancces of this class
This structure takes 4 string type parameters
movie_title :- Name of the movie
movie_poster :- image for the movie
moview_storyline :- Movie description
movie_trailer :- its a url of youtube video
'''
self.title = movie_title
self.poster_image_url = movie_poster
self.storyline = movie_storyline
self.trailer_youtube_url = movie_trailer_url
def show_trailer(self):
'''
This function is for displaying the trailer for a selected movie
'''
webbrowser.open(self.trailer_youtube_url)
def show_poster(self):
'''
This function is for displaying the poster for a movie
'''
webbrowser.open(self.poster)
|
# -*- coding:utf-8 -*-
###
# @Author: Chris
# Created Date: 2020-01-02 21:16:28
# -----
# Last Modified: 2020-02-23 15:38:39
# Modified By: Chris
# -----
# Copyright (c) 2020
###
import os
import copy
import json
import random
import pandas as pd
from tqdm import tqdm
from loguru import logger
from script.utility import Utility
class Attacker:
def __init__(self, datafile):
logger.info("Initilizing Attacker ...")
self.datafile = datafile
self.__loaddata()
def __loaddata(self):
logger.info(f"Loading local pano dataset from {self.datafile}...")
self.dataset = {}
with open(self.datafile, "r") as fin:
for line in tqdm(fin.readlines()):
data = json.loads(line)
self.dataset[data["id"]] = data
def random_same(self, num):
return random.sample(list(self.dataset.values()), num)
def random(self, num):
logger.info("Generating random attack points")
pano_attack = random.sample(list(self.dataset.values()), num)
pano_attack_random = []
for pano in pano_attack:
gps_correct = (pano["lat"], pano["lng"])
gps_attack = Utility.generateGPS_random(1)[0]
while gps_attack == gps_correct:
gps_attack = Utility.generateGPS_random(1)[0]
pano["lat_attack"] = gps_attack[0]
pano["lng_attack"] = gps_attack[1]
pano_attack_random.append(pano)
return pano_attack_random
def nearby(self, num):
pass
def driving(self, num_route, num_point, attack=True):
logger.info("Generating mock driving routes...")
logger.info(f"Generating {num_route} routes with {num_point} points")
pano_init = random.sample(list(self.dataset.values()), num_route)
pano_attack_driving = []
count_miss = 0
for pano in pano_init:
for _ in range(5):
route = self.generate_route(pano, num_point, route=[], attack=attack)
if route:
break
logger.debug(f"{len(route)}")
if len(route) < num_point:
count_miss += 1
logger.warning(f"{pano['id']} can not find a route with {num_point} points.")
else:
pano_attack_driving.append(route)
if count_miss:
pano_attack_driving.extend(self.driving(count_miss, num_point))
return pano_attack_driving
def generate_route(
self, pano_init, num_point, route=[], attack=True,
):
num_exist = len(route)
pano = pano_init
for idx in range(num_exist, num_exist + num_point):
if not route:
pano_nxt_id = pano["neighbor"][0]
else:
if len(pano["neighbor"]) == 1:
return route
pano_nxt_id = random.choice(pano["neighbor"])
while pano_nxt_id == route[idx - 1]["id"]:
pano_nxt_id = random.choice(pano["neighbor"])
if attack:
## fraud GPS
gps_correct = (pano["lat"], pano["lng"])
gps_attack = Utility.generateGPS_random(1)[0]
while gps_attack == gps_correct:
gps_attack = Utility.generateGPS_random(1)[0]
pano["lat_attack"] = gps_attack[0]
pano["lng_attack"] = gps_attack[1]
route.append(pano)
pano = self.dataset[pano_nxt_id]
return route
def read_route(self, filename: str, only_id=False) -> list:
logger.info("Reading route from %s" % filename)
routesDF = pd.read_csv(filename, index_col=["route_id"])
routes_num = int(routesDF.index[-1] + 1)
routes = []
for route_id in tqdm(range(routes_num), desc="Route ID"):
route = []
routeDF = routesDF.loc[route_id]
for step in range(len(routeDF)):
pano_id = routeDF.iloc[step]["pano_id"]
if only_id:
pano = {"id": pano_id}
else:
pano = self.dataset[pano_id]
pano["lat_attack"] = routeDF.iloc[step]["lats_attack"]
pano["lng_attack"] = routeDF.iloc[step]["lngs_attack"]
route.append(pano)
routes.append(route)
return routes
def write_route(self, routes: list, filename: str):
logger.info("Writing routes to %s" % filename)
routes_dict = {"route_id": [], "pano_id": []}
coords = {"lats": [], "lngs": [], "lats_attack": [], "lngs_attack": []}
for idx, route in enumerate(routes):
for pano in route:
routes_dict["route_id"].append(idx)
routes_dict["pano_id"].append(pano["id"])
coords["lats"].append(pano["lat"])
coords["lngs"].append(pano["lng"])
coords["lats_attack"].append(pano.get("lat_attack", None))
coords["lngs_attack"].append(pano.get("lat_attack", None))
routesDF = pd.DataFrame({**routes_dict, **coords})
routesDF.to_csv(filename, index=False, header=True, mode="w")
def split_route(self, routes: list, points: int):
routes_new = []
routes_num = len(routes)
for idx, route in enumerate(routes):
if len(route) < points:
logger.warning(f"Route {idx} doesn't have enough points")
continue
for i in range(len(route) - points + 1):
routes_new.append(route[i : points + i])
return routes_new
def test_generate_route(panofilename, routefilename):
test = Attacker(panofilename)
routes = test.driving(60, 99)
routes_dict = {"route_id": [], "pano_id": []}
coords = {"lats": [], "lngs": [], "lats_attack": [], "lngs_attack": []}
for idx, route in enumerate(routes):
for pano in route:
routes_dict["route_id"].append(idx)
routes_dict["pano_id"].append(pano["id"])
coords["lats"].append(pano["lat"])
coords["lngs"].append(pano["lng"])
coords["lats_attack"].append(pano["lat_attack"])
coords["lngs_attack"].append(pano["lng_attack"])
Utility.visualize_map(coords)
routesDF = pd.DataFrame({**routes_dict, **coords})
header = not os.path.exists(routefilename)
routesDF.to_csv(routefilename, index=False, header=header, mode="a")
def test_generate_route_longer():
test = Attacker("../results/pano_text.json")
filename = "/home/bourne/Workstation/AntiGPS/results/routes_generate.csv"
routesDF = pd.read_csv(filename, index_col=["route_id"])
routes_num = int(routesDF.index[-1] + 1)
routes_dict = {"route_id": [], "pano_id": []}
routes_dist = 0
coords = {"lats": [], "lngs": [], "lats_attack": [], "lngs_attack": []}
for route_id in range(routes_num):
route = []
routeDF = routesDF.loc[route_id]
for step in range(len(routeDF)):
pano_id = routeDF.iloc[step]["pano_id"]
pano = test.dataset[pano_id]
pano["lat_attack"] = routeDF.iloc[step]["lats_attack"]
pano["lng_attack"] = routeDF.iloc[step]["lngs_attack"]
route.append(pano)
route_default = copy.deepcopy(route)
for _ in range(5):
route = test.generate_route(route[-1], 49, route)
if len(route) < 99:
route = test.generate_route(route[0], 99 - len(route), route[::-1])
if len(route) < 99:
route = route_default
else:
break
if len(route) < 99:
logger.warning(f"route {route_id} cannot be produced")
continue
for pano in route:
routes_dict["route_id"].append(route_id)
routes_dict["pano_id"].append(pano["id"])
coords["lats"].append(pano["lat"])
coords["lngs"].append(pano["lng"])
coords["lats_attack"].append(pano["lat_attack"])
coords["lngs_attack"].append(pano["lng_attack"])
routes_dist += Utility.distance_route(route)
logger.info("Average route length is {}".format(routes_dist / routes_num))
Utility.visualize_map(coords)
routesDF = pd.DataFrame({**routes_dict, **coords})
filename = "/home/bourne/Workstation/AntiGPS/results/routes_generate_longer.csv"
routesDF.to_csv(filename, index=False, header=True, mode="w")
def test_split_route(panofilename, routefilename):
test = Attacker(panofilename)
routes = test.read_route(routefilename)
routes = test.split_route(routes, 50)
fout = routefilename.split(".")[0] + "_split.csv"
test.write_route(routes, fout)
if __name__ == "__main__":
# test = Attacker("../results/pano_text.json")
# test_pano_attack_random = test.random(10)
# print(test_pano_attack_random)
# test_generate_route(
# "../results/pano_text_pit.json",
# "/home/bourne/Workstation/AntiGPS/results/routes_generate_pit.csv",
# )
# test_generate_route_longer()
test_split_route(
"../results/pano_text_pit.json",
"/home/bourne/Workstation/AntiGPS/results/routes_generate_pit.csv",
)
|
from direct.directnotify import DirectNotifyGlobal
from otp.distributed.DistributedDirectoryAI import DistributedDirectoryAI
from otp.distributed.OtpDoGlobals import *
from toontown.distributed.ToontownInternalRepository import ToontownInternalRepository
# TODO: Remove Astron dependence.
class ToontownUDRepository(ToontownInternalRepository):
notify = DirectNotifyGlobal.directNotify.newCategory('ToontownUDRepository')
def __init__(self, baseChannel, serverId):
ToontownInternalRepository.__init__(self, baseChannel, serverId, dcSuffix='UD')
self.astronLoginManager = None
def handleConnected(self):
ToontownInternalRepository.handleConnected(self)
# Create our root object.
self.notify.info('Creating root object (%d)...' % self.getGameDoId())
rootObj = DistributedDirectoryAI(self)
rootObj.generateWithRequiredAndId(self.getGameDoId(), 0, 0)
# Create our global objects.
self.notify.info('Creating global objects...')
self.createGlobals()
self.notify.info('UberDOG server is ready.')
def createGlobals(self):
# Create our Astron login manager...
self.astronLoginManager = self.generateGlobalObject(OTP_DO_ID_ASTRON_LOGIN_MANAGER, 'AstronLoginManager')
|
import requests
import os
import sys
from datetime import datetime
import logging
from bsnl import data
logger = logging.getLogger(__name__)
headers = data.common_headers
payload = {
'location': 'NOID',
'actionName': 'manual',
'_search': 'false',
'nd': '1588301203519',
'rows': '4',
'page': '1',
'sidx': '',
'sord': 'asc'
}
def fetch(session: requests.Session) -> data.Status:
status = data.Status()
response = None
try:
response = session.post(
'https://fuptopup.bsnl.co.in/fetchUserQuotaPM.do',
headers=headers,
data=payload,
verify=False)
response.raise_for_status()
status.data = response.json()
status.success = True
status.message = "Success"
except requests.exceptions.RequestException as request_exception:
logger.exception("Error in making request.")
if response:
status.code = response.status_code
status.message = str(request_exception)
except Exception as error:
logger.exception("General error occurred.")
status.message = str(error)
return status
|
"""Tracking utilities."""
from textwrap import dedent
from typing import Any, Callable, Mapping, Optional, cast
__all__ = [
"init_tracker",
]
def init_tracker(
config: Mapping[str, Any],
use_wandb: bool,
information: Mapping[str, Any],
wandb_name: Optional[str] = None,
wandb_group: Optional[str] = None,
is_hpo: bool = False,
) -> Optional[Callable[[Mapping[str, Any]], None]]:
"""
Initialize the results tracker.
:param config:
The configuration to log to the tracker.
:param use_wandb:
Whether to use wandb.
:param wandb_name:
The wandb experiment name.
:param wandb_group:
The wandb group name.
:param information:
The data information to log.
:param is_hpo:
Whether this is an HPO run and should be grouped under the wandb_name.
:return:
A result callback.
"""
result_callback = None
if use_wandb:
try:
import wandb
except ImportError as e:
raise RuntimeError(dedent("""
Could not import wandb. Did you install it? You can do so by
pip install .[wandb]
or directly
pip install wandb
""")) from e
name = wandb_name if not is_hpo else None
group = wandb_name if is_hpo else wandb_group
wandb_run = cast(
wandb.wandb_sdk.wandb_run.Run,
wandb.init(project="stare_query", entity="hyperquery", name=name, reinit=True, group=group),
)
# All wandb information needs to be collected and then stored as one action on the root of the config object.
wandb_run.config.update(config)
wandb_run.config.update(dict(data_loading=information))
def wandb_log_callback(result: Mapping[str, Any]) -> None:
"""Wrapper around Run.log."""
wandb_run.log(dict(result))
result_callback = wandb_log_callback
return result_callback
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 11/26/2004
# ------------------------------------------------------------------------------
""" Adds all of the core traits to the Traits database.
"""
if __name__ == '__main__':
from enthought.traits.api import Event, List, Dict, Any, Int, Long, Float, Str
from enthought.traits.api import Unicode, Complex, Bool, CInt, CLong, CFloat
from enthought.traits.api import CStr, CUnicode, CComplex, CBool, false, true
from enthought.traits.api import String, Password, File, Directory, Function
from enthought.traits.api import Method, Class, Module, Type, This, self, Python
from enthought.traits.api import ReadOnly, ListInt, ListFloat, ListStr
from enthought.traits.api import ListUnicode, ListComplex, ListBool
from enthought.traits.api import ListFunction, ListMethod, ListClass
from enthought.traits.api import ListInstance, ListThis, DictStrAny, DictStrStr
from enthought.traits.api import DictStrInt, DictStrLong, DictStrFloat
from enthought.traits.api import DictStrBool,DictStrList
from enthought.traits.api import tdb
define = tdb.define
define( 'Event', Event )
define( 'List', List )
define( 'Dict', Dict )
define( 'Any', Any )
define( 'Int', Int )
define( 'Long', Long )
define( 'Float', Float )
define( 'Str', Str )
define( 'Unicode', Unicode )
define( 'Complex', Complex )
define( 'Bool', Bool )
define( 'CInt', CInt )
define( 'CLong', CLong )
define( 'CFloat', CFloat )
define( 'CStr', CStr )
define( 'CUnicode', CUnicode )
define( 'CComplex', CComplex )
define( 'CBool', CBool )
define( 'false', false )
define( 'true', true )
define( 'String', String )
define( 'Password', Password )
define( 'File', File )
define( 'Directory', Directory )
# define( 'Function', Function )
# define( 'Method', Method )
# define( 'Class', Class )
# define( 'Module', Module )
define( 'Type', Type )
define( 'This', This )
# define( 'self', self )
define( 'Python', Python )
## define( 'ReadOnly', ReadOnly ) <-- 'Undefined' doesn't have right
# semantics when persisted
define( 'ListInt', ListInt )
define( 'ListFloat', ListFloat )
define( 'ListStr', ListStr )
define( 'ListUnicode', ListUnicode )
define( 'ListComplex', ListComplex )
define( 'ListBool', ListBool )
# define( 'ListFunction', ListFunction )
# define( 'ListMethod', ListMethod )
# define( 'ListClass', ListClass )
# define( 'ListInstance', ListInstance )
define( 'ListThis', ListThis )
define( 'DictStrAny', DictStrAny )
define( 'DictStrStr', DictStrStr )
define( 'DictStrInt', DictStrInt )
define( 'DictStrLong', DictStrLong )
define( 'DictStrFloat', DictStrFloat )
define( 'DictStrBool', DictStrBool )
define( 'DictStrList', DictStrList )
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from inception import Inception3
from PIL import Image
use_gpu = True
dtype = torch.float32
device = torch.device('cuda') if use_gpu and torch.cuda.is_available() else torch.device('cpu')
print('Using device: ', device)
def load_model(model_path, device=device):
print('pytorch_classifier.py: Loading model...')
num_classes = 8142
checkpoint = torch.load(model_path, map_location=device)
# reference: https://github.com/macaodha/inat_comp_2018/blob/master/train_inat.py
model = Inception3(transform_input=True)
model.fc = nn.Linear(2048, num_classes)
model.aux_logits = False
model.load_state_dict(checkpoint['state_dict'])
model = model.to(device=device, dtype=dtype)
model.eval() # set model to evaluation mode
print('pytorch_classifier.py: model loaded.')
return model
def classify(model, image_bytes):
img = Image.open(image_bytes)
image_np = np.asarray(img, np.uint8)
# swap color axis because numpy image is H x W x C, torch image is C X H X W
image_np = image_np.transpose((2, 0, 1))
image_np = image_np[:3, :, :] # Remove the alpha channel
image_np = np.expand_dims(image_np, axis=0) # add a batch dimension
img_input = torch.from_numpy(image_np).type(torch.float32).to(device=device, dtype=dtype)
with torch.no_grad():
scores = model(img_input)
scores = scores.cpu().data.numpy()
clss = np.argmax(scores[0])
return 'Most likely category is {}'.format(str(clss))
|
import os
import json
from datetime import datetime
from django.views import View
from django.middleware import csrf
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse, JsonResponse
from .models import *
class IndexView(View):
def get(self, request):
return HttpResponse('Use post method to save your data!')
class SignupView(View):
def post(self, request):
# if they do not pass email id, give error
if 'first_name' not in request.POST or 'last_name' not in request.POST:
return JsonResponse({'success': False, 'message': 'first_name and last_name are required for signup!'}, status=400)
# if they do not pass email id, give error
if 'email' not in request.POST:
return JsonResponse({'success': False, 'message': 'email id is required for signup!'}, status=400)
# if they do not pass password, give error
if 'password' not in request.POST:
return JsonResponse({'success': False, 'message': 'password is required for signup!'}, status=400)
# get data from request.post
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = request.POST['password']
try:
# create an user and mark it as active
user = User.objects.create_user(first_name = first_name,
last_name = last_name,
username = email,
email = email,
is_active = True)
user.set_password(password)
user.save()
except:
return JsonResponse(status=404, data={
'success': False,'message':'Try again, email already used in another account.'
})
else:
return JsonResponse({"success": True, "message": "Your account has been created!"})
class SigninView(View):
def post(self, request):
if request.user.is_authenticated:
return JsonResponse({"success": True, "message": "You are already logged in!"})
# if they do not pass email id, give error
if 'email' not in request.POST:
return JsonResponse({'success': False, 'message': 'email id is required for signing in!'}, status=400)
# if they do not pass password, give error
if 'password' not in request.POST:
return JsonResponse({'success': False, 'message': 'password is required for signing in!'}, status=400)
email = request.POST['email']
password = request.POST['password']
# authenticate user
user = authenticate(request, username=email, password=password)
if user is not None:
# log the user in
login(request, user)
return JsonResponse({"success": True, "message": "Logged in successfully!"})
else:
# try to find the user model
user = User.objects.filter(username = email).first()
# if the user exists
if user:
# and the user is active, password was incorrect
if user.is_active:
return JsonResponse(
status=401,
data={'success': False, 'message': 'Incorrect password, please try again.'}
)
# otherwise, account is inactive
else:
return JsonResponse(
status=401,
data={'success': False, 'message':'Your account is inactive, please send us an email.'}
)
# if user does not exist
else:
return JsonResponse(status=404, data={'success': False, 'message':'This account does not exist, please signup.'})
class SignoutView(View):
def get(self, request):
logout(request)
return JsonResponse({'success': True, 'message':'You have successfully signed out.'})
class FormSubmitView(View):
def post(self, request, username):
metaItemsToCatch = [
'HTTP_HOST', 'HTTP_USER_AGENT', 'HTTP_ACCEPT_ENCODING',
'REMOTE_HOST', 'REMOTE_ADDR'
]
print(request.META)
user = User.objects.filter(username = username, is_active=True).first()
if user:
user_url = UserUrlMap.objects.filter(user=user, url=request.get_host()).first()
if user_url:
if user_url.is_active:
try:
DataStore.objects.create(
user=user,
url=user_url,
created_at=datetime.now(),
header_data=json.dumps({item: request.META[item] for item in metaItemsToCatch}),
form_data=json.dumps(dict(request.POST))
)
except:
return JsonResponse(status=400, data={"success": False, "message": "Could not submit, try again!"})
else:
return JsonResponse({"success": True, "message": "Form submitted successfully!"})
else:
return JsonResponse(
status=400,
data={"success": False, "message": f"Form not submitted, you need to activate the originating domain {request.get_host()} first!"}
)
else:
UserUrlMap.objects.create(user=user, url=request.get_host())
return JsonResponse(
status=400,
data={"success": False, "message": f"Form not submitted, you need to verify the originating domain {request.get_host()} first!"}
)
else:
return JsonResponse(
status=403,
data={'success': False, 'message': 'This form is not active yet, signup or activate to submit data!'}
)
|
import numpy
from tensorflow import keras
from keras.constraints import maxnorm
seed = 21
from __bazaPodatkov import bazaPodatkov
# Nalaganje baze podatkov slik
(X_train, y_train), (X_test, y_test), class_num = bazaPodatkov()
# Sestavljanje modela
# POKUSI spremenite plasti nevronske mreže, tako da dodaš, odvzameš plast ali pa spremeniš njene parametre!
# Več o posameznih tipih lasti: https://keras.io/api/layers/
model = keras.Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), input_shape=X_train.shape[1:], padding='same'))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Conv2D(32, 3, input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(64, 3, activation='relu', padding='same'))
model.add(keras.layers.MaxPooling2D(2))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(64, 3, activation='relu', padding='same'))
model.add(keras.layers.MaxPooling2D(2))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(128, 3, activation='relu', padding='same'))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Flatten())
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(class_num, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary()) # Izpisek sestavljenega modela
# Učenje modela: POZOR program je zelo počasen 2h+
numpy.random.seed(seed)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=25, batch_size=64)
# Shranimo model
model.save("cifar-cnn")
import pandas as pd
import matplotlib.pyplot as plt
# Prikaz zgodovine učenja oz. treninga!
pd.DataFrame(history.history).plot()
plt.show()
|
import numpy as np
import tensorly as tl
from gnss_func.gnss_function import frequecy_domain_CA, correlator_bank_Q
from gnss_func.array import array_lin
from gnss_func.utils import normalise_columns
import pandas as pd
from sklearn.decomposition import TruncatedSVD
import sys
sys.path.extend(['/Users/araujo/Documents/GitHub/GNSS_rep'])
from os import path
class single_polarization():
def __init__(self, nAntennas, B, T, Tc, delayGranularity, tau_vec, theta_deg_vec, number_of_epochs, IDsat,
correlatorType='Qw'):
self.tau_vec = tau_vec
self.theta_deg_vec = theta_deg_vec
self.number_of_epochs = number_of_epochs
self.ID = IDsat
self.bandwidth = B
self.time_period = T
self.chip_period = Tc
self.nAntennas = nAntennas
self.delayGranularity = delayGranularity
self.correlatorType = correlatorType
self.CA_FFT = None
self.CA_PSD = None
self.tx_power = None
self.Rnoise = None
self.Q = None
self.Qw = None
self.C = None
self.CQ = None
self.Lnoise = None
self.B = None
self.S = None
self.create_output_correlator()
self.cov_matrix_noise_mode_2()
def bank_delay(self):
return np.linspace(-self.chip_period, self.chip_period, 2*self.delayGranularity)
def code_path(self):
return '/Users/araujo/Documents/GitHub/GNSS_rep/CACODE/CA_FFT_ ' + str(self.ID) + '_' + str(
self.bandwidth) + '.pkl'
def number_of_paths(self):
return self.tau_vec.size
def cov_matrix_noise_mode_2(self):
if self.correlatorType == 'Q':
self.Rnoise = np.conj(self.Q.T) @ self.Q
elif self.correlatorType == 'Qw':
self.Rnoise = np.conj(self.Qw.T) @ self.Qw
self.Lnoise = np.linalg.cholesky(self.Rnoise)
def calc_snr_pre(self, C_N_dB):
return C_N_dB - 10 * np.log10(2 * self.bandwidth)
def noise_var(self, C_N_dB):
SNR_dB_pre = self.calc_snr_pre(C_N_dB)
SNR_dB = SNR_dB_pre + 10*np.log10(self.bandwidth*self.time_period)
snr = 10 ** (SNR_dB / 10)
return self.tx_power / snr
def create_output_correlator(self):
BANK_delay = self.bank_delay()
folder = self.code_path()
bool = path.exists(folder)
if bool == False:
self.CA_FFT, self.CA_PSD = frequecy_domain_CA(self.bandwidth, self.time_period, self.ID)
else:
code_df = pd.read_pickle(folder)
self.CA_FFT = code_df['CA_FFT']
self.CA_PSD = code_df['CA_SPEC']
self.Q, self.C, self.CQ, self.tx_power = correlator_bank_Q(self.bandwidth, self.chip_period, self.time_period,
BANK_delay, self.tau_vec, self.CA_FFT)
if self.correlatorType == 'Qw':
self.create_Qw()
self.CQ = self.C.T @ self.Qw
def create_Qw(self,n=7):
svd = TruncatedSVD(n_components=n, n_iter=7, random_state=59, algorithm='arpack')
svd.fit(self.Q)
self.Qw = normalise_columns(svd.fit_transform(self.Q))
def channel_taps(self):
n_epochs = self.number_of_epochs
L = self.number_of_paths()
self.B = 1 / np.sqrt(2) * (np.random.randn(n_epochs, L) + 1j * np.random.randn(n_epochs, L))
def array(self):
self.A = array_lin(self.theta_deg_vec, self.nAntennas)
def create_signal(self):
self.channel_taps()
self.array()
S0 = self.B @ tl.tenalg.khatri_rao([self.CQ.T, self.A]).T
# Tx-tensor
self.S = tl.tensor(
S0.reshape(self.number_of_epochs, int(self.CQ.size / self.number_of_paths()), self.nAntennas))
def create_Noise(self, sigma, a, b, c):
N2 = 1 / np.sqrt(2 * sigma) * (np.random.randn(b, a * c) + 1j * np.random.randn(b, a * c))
N2 = self.Lnoise @ N2
return tl.tensor(N2.reshape(a, b, c))
def rx_signal(self, C_N_dB):
sigma = self.noise_var(C_N_dB)
self.create_signal()
a, b, c = self.S.shape
self.rSignal = self.S + self.create_Noise(sigma, a, b, c)
|
from pprint import pformat
from typing import Optional
import typer
from .config import get_skill_config
from .helpers import create_nlp
app = typer.Typer(help='Commands for working with NLP models')
# take name to find app path, otherwise default to cwd
@app.command()
def build(
name: Optional[str] = typer.Argument(
None,
help="The name of the skill to build.",
),
):
"""Build nlp models associated with this skill"""
app_dir = '.'
if name:
config = get_skill_config(name)
app_dir = config['app_dir']
nlp = create_nlp(app_dir)
nlp.build()
@app.command()
def process(
name: Optional[str] = typer.Argument(
None,
help="The name of the skill to send the query to.",
),
):
"""Run a query through NLP processing"""
app_dir = '.'
if name:
config = get_skill_config(name)
app_dir = config['app_dir']
nlp = create_nlp(app_dir)
nlp.load()
typer.echo('Enter a query below (Ctl+C to exit)')
query = typer.prompt('>>', prompt_suffix=' ')
while True:
output = nlp.process(query)
typer.secho(pformat(output, indent=2, width=120), fg=typer.colors.GREEN)
query = typer.prompt('>>', prompt_suffix=' ')
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import validators as core_validators
from django.utils.translation import ugettext_lazy as _
from taiga.base.api import serializers
from taiga.base.api import validators
from taiga.base.exceptions import ValidationError
from taiga.base.fields import PgArrayField
from .models import User, Role
import re
######################################################
# User
######################################################
class UserValidator(validators.ModelValidator):
class Meta:
model = User
fields = ("username", "full_name", "color", "bio", "lang",
"theme", "timezone", "is_active")
def validate_username(self, attrs, source):
value = attrs[source]
validator = core_validators.RegexValidator(re.compile('^[\w.-]+$'), _("invalid username"),
_("invalid"))
try:
validator(value)
except ValidationError:
raise ValidationError(_("Required. 255 characters or fewer. Letters, "
"numbers and /./-/_ characters'"))
if (self.object and
self.object.username != value and
User.objects.filter(username=value).exists()):
raise ValidationError(_("Invalid username. Try with a different one."))
return attrs
class UserAdminValidator(UserValidator):
class Meta:
model = User
# IMPORTANT: Maintain the UserSerializer Meta up to date
# with this info (including here the email)
fields = ("username", "full_name", "color", "bio", "lang",
"theme", "timezone", "is_active", "email", "read_new_terms")
def validate_read_new_terms(self, attrs, source):
value = attrs[source]
if not value:
raise ValidationError(
_("Read new terms has to be true'"))
return attrs
class RecoveryValidator(validators.Validator):
token = serializers.CharField(max_length=200)
password = serializers.CharField(min_length=6)
class ChangeEmailValidator(validators.Validator):
email_token = serializers.CharField(max_length=200)
class CancelAccountValidator(validators.Validator):
cancel_token = serializers.CharField(max_length=200)
######################################################
# Role
######################################################
class RoleValidator(validators.ModelValidator):
permissions = PgArrayField(required=False)
class Meta:
model = Role
fields = ('id', 'name', 'permissions', 'computable', 'project', 'order')
i18n_fields = ("name",)
class ProjectRoleValidator(validators.ModelValidator):
class Meta:
model = Role
fields = ('id', 'name', 'slug', 'order', 'computable')
|
#!/usr/bin/env python2
"""
builtin_test.py: Tests for builtin.py
"""
from __future__ import print_function
import unittest
import sys
from core import pyutil
from osh import split
from osh import builtin # module under test
class BuiltinTest(unittest.TestCase):
def testAppendParts(self):
# allow_escape is True by default, but False when the user passes -r.
CASES = [
(['Aa', 'b', ' a b'], 100, 'Aa b \\ a\\ b'),
(['a', 'b', 'c'], 3, 'a b c '),
]
for expected_parts, max_results, line in CASES:
sp = split.IfsSplitter(split.DEFAULT_IFS, '')
spans = sp.Split(line, True)
print('--- %r' % line)
for span in spans:
print(' %s %s' % span)
parts = []
builtin._AppendParts(line, spans, max_results, False, parts)
self.assertEqual(expected_parts, parts)
print('---')
def testPrintHelp(self):
# Localization: Optionally use GNU gettext()? For help only. Might be
# useful in parser error messages too. Good thing both kinds of code are
# generated? Because I don't want to deal with a C toolchain for it.
loader = pyutil.GetResourceLoader()
builtin.Help([], loader)
for name, spec in builtin.BUILTIN_DEF.arg_specs.iteritems():
print(name)
spec.PrintHelp(sys.stdout)
print()
if __name__ == '__main__':
unittest.main()
|
import base64
import json
import zlib
from .primitives import E, Entity
from .util import UP, RIGHT, DOWN, LEFT, Point
FORMAT_VERSION = '0'
MAP_VERSION = 0x1000330000
# Size of non-1x1 entities. Needed as blueprints specify location by center point.
# Each entry is (width, height), as per their layout with orientation up/down.
x2 = (2, 2)
x3 = (3, 3)
x4 = (4, 4)
x5 = (5, 5)
entity_sizes = {
E.pump: (1, 2),
E.big_pole: x2,
E.beacon: x3,
E.radar: x3,
E.assembler: x3,
E.chemical_plant: x3,
E.lab: x3,
E.splitter: (2, 1),
E.roboport: x4,
E.furnace: x3,
E.refinery: x5,
E.rocket_silo: (9, 9),
}
def encode(entities, label="Generated", icons=[E.assembler]):
"""Encode a list of (pos, entity) into a blueprint string.
Optional args are to set blueprint label and icons.
"""
# Non-centered blueprints seem to cause weird issues.
# We work out the full width and height, then pick a center point
# and re-cast everything to that.
width = max([
pos.x + entity_sizes.get(entity.name, (1, 1))[0]
for pos, entity in entities
])
height = max([
pos.y + entity_sizes.get(entity.name, (1, 1))[1]
for pos, entity in entities
])
center = Point(width / 2 + .5, height / 2 + .5)
# lookup for resolving connections
entity_number_by_position = {pos: i+1 for i, (pos, entity) in enumerate(entities)}
blueprint = {
"blueprint": {
"item": "blueprint",
"label": label,
"version": MAP_VERSION,
"icons": [
{
"index": i + 1,
"signal": {
"type": "item", # future work: more icon types
"name": item,
},
} for i, item in enumerate(icons)
],
"entities": [
encode_entity(entity_number_by_position, i + 1, pos, entity, center)
for i, (pos, entity) in enumerate(entities)
],
}
}
return encode_json(blueprint)
def encode_json(data):
return FORMAT_VERSION + base64.b64encode(zlib.compress(json.dumps(data)))
def decode_json(data):
if data[0] != FORMAT_VERSION:
raise ValueError("Unknown format version: {}".format(data[0]))
return json.loads(zlib.decompress(base64.b64decode(data[1:])))
def encode_entity(entity_number_by_position, number, pos, entity, center):
width, height = entity_sizes.get(entity.name, (1, 1))
if entity.orientation is not None and entity.orientation % 2 == 1:
# Rotate entities if left or right
height, width = width, height
ret = {
"entity_number": number,
"name": entity.name,
"position": {
"x": pos.x + width / 2. - center.x,
"y": pos.y + height / 2. - center.y,
},
}
if entity.connections:
ret['connections'] = {}
for port, color, rel_x, rel_y, target_port in entity.connections:
target_pos = Point(pos.x + rel_x, pos.y + rel_y)
if target_pos not in entity_number_by_position:
raise ValueError("{name} at {pos} tried to target {target_pos} for {color} connection, but nothing is there".format(
name=entity.name, pos=pos, target_pos=target_pos, color=color,
))
target_number = entity_number_by_position[target_pos]
target = {"entity_id": target_number}
if target_port:
target['circuit_id'] = target_port
ret['connections'].setdefault(str(port), {}).setdefault(color, []).append(target)
if entity.orientation is not None and entity.orientation != UP:
# In their blueprints, up-oriented things have direction omitted.
# I suspect this would work either way but shorter blueprints is always nice.
# Their orientations map the same as ours but doubled, ie. 0, 2, 4, 6.
# Bottom bit is ignored.
ret["direction"] = 2 * entity.orientation
ret.update(entity.attrs)
return ret
def lossy_decode_entity(entity):
"""Convert blueprint entity to (position, Entity)"""
width, height = entity_sizes.get(entity['name'], (1, 1))
orientation = entity.get('direction', 0) / 2
if orientation % 2 == 1:
# Rotate entities if left or right
height, width = width, height
attrs = {k: v for k, v in entity.items() if k not in ('entity_number', 'name', 'position', 'direction')}
pos = entity['position']
return Point(
pos['x'] - width / 2.,
pos['y'] - height / 2.,
), Entity(
entity['name'],
orientation,
attrs,
)
def lossy_decode(data):
"""Convert a blueprint to a list of (position, entity). Note that many properties get dropped!"""
data = decode_json(data)
entities = map(lossy_decode_entity, data['blueprint']['entities'])
if not entities:
return []
top_left = Point(
min(pos.x for pos, entity in entities),
min(pos.y for pos, entity in entities),
)
return [
(
Point(int(pos.x - top_left.x), int(pos.y - top_left.y)),
entity,
)
for pos, entity in entities
]
if __name__ == '__main__':
# For testing of arbitrary blueprints
import sys
print encode_json(json.load(sys.stdin))
|
class DuplicateKeyError(LookupError):
pass
class udict(dict):
def __setitem__(self, key, value):
if key in self:
raise DuplicateKeyError(key)
super().__setitem__(key, value)
class lazy:
def __init__(self, f):
self.f = f
def __get__(self, obj, cls):
ret = self.f(obj)
setattr(obj, self.f.__name__, ret)
return ret
def enumerate_rank(it, start=1, key=lambda x: x):
idx, cnt = start, start
try:
it = iter(it)
x = next(it)
except StopIteration:
# python 3.7 muutos:
# https://www.python.org/dev/peps/pep-0479/
return
k = key(x)
yield idx, x
for i in it:
cnt += 1
kk = key(i)
if kk != k:
k = kk
idx = cnt
yield idx, i
def noneflt(func):
return lambda it, **kwargs: func(filter(lambda x: x is not None, it), **kwargs)
|
# nlg.py
# -*- coding: utf-8 -*-
import random
import datetime as dt
class NLG(object):
"""
Used to generate natural language. Most of these sections are hard coded. However, some use simpleNLG which is
used to string together verbs and nouns.
"""
def __init__(self):
# make random more random by seeding with time
random.seed(dt.datetime.now())
def info(self, phrase):
infos = [
"Este es un prototipo del UPB Smart Booth.",
"Te presento al UPB Smart Booth.",
"Hola! Soy UPB Smart Booth y aun estoy en Beta."
]
infos2 = [
"Di \"" + phrase.capitalize() + "\" para interactuar conmigo",
"Di \"" + phrase.capitalize() + "\" para que pueda ayudarte",
"Di \"" + phrase.capitalize() + "\" para darte una mano"
]
return random.choice(infos) + " " + random.choice(infos2)
def acknowledge(self):
simple_acknoledgement = [
"Sí?",
"Qué deseas?",
"Qué necesitas de mí?",
"Cómo puedo ayudarte?",
"Qué puedo hacer por ti?"
]
ret_phrase = ""
greet_with_date = random.choice([True, False])
if greet_with_date:
ret_phrase = random.choice(simple_acknoledgement)
else:
date = dt.datetime.now()
ret_phrase = ("{}. {}").format(self.time_of_day(date), random.choice(simple_acknoledgement))
return ret_phrase
def time_of_day(self, date, with_adjective=False):
ret_phrase = ""
if date.hour < 12:
ret_phrase = "Buenos días"
if with_adjective:
ret_phrase = "%s %s" % ("this", ret_phrase)
elif (date.hour >= 12) and (date.hour < 18):
ret_phrase = "Buenas tardes"
if with_adjective:
ret_phrase = "%s %s" % ("this", ret_phrase)
elif date.hour >= 18:
ret_phrase = "Buenas noches"
if with_adjective:
ret_phrase = "%s %s" % ("this", ret_phrase)
return ret_phrase
# CUSTOM
def buses(self):
actual_schedule = None
schedules = [
"10:10",
"12:25",
"14:40",
"16:55",
"18:50"
]
now = dt.datetime.now().time()
actual_schedule = "El próximo bus debería salir a las "
if now > dt.time(0,0,0) and now <= dt.time(10,10,0):
actual_schedule += schedules[0]
elif now > dt.time(10,10,0) and now <= dt.time(12,25,0):
actual_schedule += schedules[1]
elif now > dt.time(12,25,0) and now <= dt.time(14,40,0):
actual_schedule += schedules[2]
elif now > dt.time(14,40,0) and now <= dt.time(16,55,0):
actual_schedule += schedules[3]
elif now > dt.time(16,55,0) and now <= dt.time(18,50,0):
actual_schedule += schedules[4]
else:
actual_schedule = "Lo siento, el último bus ya salió."
return actual_schedule
def appreciation(self):
phrases = [
"No hay problema!",
"No, gracias a ti",
"De nada",
"No, por favor",
"Claro, no hay problema",
"Ni lo menciones"
]
return random.choice(phrases)
def get_map_url(self, location, map_type=None):
if map_type == "satellite":
return "http://maps.googleapis.com/maps/api/staticmap?center=%s&zoom=13&scale=false&size=1200x600&maptype=satellite&format=png" % location
elif map_type == "terrain":
return "http://maps.googleapis.com/maps/api/staticmap?center=%s&zoom=13&scale=false&size=1200x600&maptype=terrain&format=png" % location
elif map_type == "hybrid":
return "http://maps.googleapis.com/maps/api/staticmap?center=%s&zoom=13&scale=false&size=1200x600&maptype=hybrid&format=png" % location
else:
return "http://maps.googleapis.com/maps/api/staticmap?center=%s&zoom=13&scale=false&size=1200x600&maptype=roadmap&format=png" % location
|
import logging
from egtsdebugger.egts import *
import socket
class RnisConnector:
"""Provide functional for connecting to RNIS"""
def __init__(self, host, port, num, dispatcher, file, **kwargs):
self.host = host
self.port = port
self.num = 0
self.max = num
self.did = dispatcher
self.pid = 0
self.rid = 0
self.login = kwargs.get('login')
self.password = kwargs.get('password')
self.buffer = b''
logging.basicConfig(filename=file, filemode='w', level=logging.INFO)
def start(self):
logging.info("start rnis_connector")
s = socket.socket()
s.connect((self.host, self.port))
with s:
try:
if self._auth(s):
self._loop(s)
else:
logging.error("EGTS authorization failed")
except Exception as err:
logging.error("EGTS connection test failed: %s", err)
else:
if self.num == self.max:
logging.info("Received %s packets", self.num)
finally:
s.close()
def _auth(self, conn):
subrec = EgtsSrDispatcherIdentity(EGTS_SR_DISPATCHER_IDENTITY, dt=0, did=self.did)
response = self._send_auth_packet(conn, subrec)
logging.info("Received egts packet: %s", response)
if not self._check_response(response):
return False
if self.did == 0xFFffFFff:
auth_params = self._receive_packet(conn)
logging.info("Received egts packet: %s", auth_params)
self._send_replay(conn, auth_params)
if not self._check_auth_params(auth_params):
return False
subrec = EgtsSrAuthInfo(EGTS_SR_AUTH_INFO, unm=self.login, upsw=self.password)
response = self._send_auth_packet(conn, subrec)
logging.info("Received egts packet: %s", response)
if not self._check_response(response):
return False
result_code = self._receive_packet(conn)
logging.info("Received egts packet: %s", result_code)
self._send_replay(conn, result_code)
if not self._check_result_code(result_code):
return False
return True
def _loop(self, conn):
while self.num < self.max:
egts = self._receive_packet(conn)
self._send_replay(conn, egts)
self.num += 1
logging.info("Received egts packet: %s", egts)
def _receive_packet(self, conn):
while len(self.buffer) <= EGTS_MAX_PACKET_LENGTH:
if self.buffer == b'':
data = conn.recv(1024)
if not data:
return None
else:
self.buffer += data
try:
egts = Egts(self.buffer)
self.buffer = egts.rest_buff
return egts
except EgtsPcInvdatalen as err:
data = conn.recv(1024)
if not data:
return None
else:
self.buffer += data
def _send_replay(self, conn, egts):
reply = egts.reply(self.pid, self.rid)
self._pid_increment()
self._rid_increment()
def _send_auth_packet(self, conn, subrec):
egts_record = EgtsRecord(rid=self.rid, sst=EGTS_AUTH_SERVICE, subrecords=[subrec])
packet = Egts.form_bin(self.pid, [egts_record])
conn.send(packet)
self._pid_increment()
self._rid_increment()
response = self._receive_packet(conn)
return response
def _pid_increment(self):
self.pid += 1
if self.pid > 0xFFff:
self.pid = 0
def _rid_increment(self):
self.rid += 1
if self.rid > 0xFFff:
self.rid = 0
@staticmethod
def _check_response(packet):
for record in packet.records:
for subrec in record.subrecords:
if subrec.rst != 0:
return False
else:
return True
@staticmethod
def _check_auth_params(packet):
if packet.records[0].subrecords[0].flg != 0:
return False
else:
return True
@staticmethod
def _check_result_code(packet):
if packet.records[0].subrecords[0].rcd != 0:
return False
else:
return True
|
from typing import Any
from .exceptions import PipelineError
from .initialize import GLib, Gst
def bus_call(_: Any, message: Gst.Message, loop: GLib.MainLoop) -> None:
"""Handle bus messages."""
if message.type == Gst.MessageType.EOS:
loop.quit()
elif message.type == Gst.MessageType.ERROR:
error, debug = message.parse_error()
loop.quit()
raise PipelineError(f"{error} -- {debug}")
def run_pipeline(pipeline: Gst.Pipeline) -> None:
"""Run a GStreamer pipeline."""
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
print("Starting pipeline")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
loop.quit()
pass
# Clean up
pipeline.set_state(Gst.State.NULL)
|
import os,sys
import random
import UserList
def __ascending__(a,b):
if (a < b):
return -1
elif (a > b):
return 1
return 0
def __descending__(a,b):
if (a < b):
return 1
elif (a > b):
return -1
return 0
class GroupedSort(UserList.UserList):
def sortOn(self,keys):
self.__keys__ = keys
iK = 0
self.__grouping__ = [self]
while (iK < len(self.__keys__)):
__groups__ = []
for group in self.__grouping__:
_key_ = self.__keys__[iK].get('key','')
__comparator__ = self.__keys__[iK].get('cmp',lambda a,b:0)
group.sort(cmp=__comparator__,key=lambda d: d[_key_])
keys = list(set([item.get(_key_,'') for item in group]))
keys.sort(cmp=__comparator__)
for key in keys:
subgroup = [item for item in group if (item.get(_key_,'') == key)]
__groups__.append(subgroup)
self.__grouping__ = []
for g in __groups__:
self.__grouping__.append(g)
iK +=1
__groups__ = []
for group in self.__grouping__:
item = group
if (isinstance(item,list)):
item = item[0]
__groups__.append(item)
self.__grouping__ = __groups__
def __repr__(self):
import StringIO
ioBuf = StringIO.StringIO()
print >>ioBuf, 'BEGIN:'
_t_ = self.grouping[0]['b']
for group in self.grouping:
if (group['b'] != _t_):
print >>ioBuf, ''
_t_ = group['b']
print >>ioBuf, str(group)
print >>ioBuf, 'END!!!'
return ioBuf.getvalue()
def grouping():
doc = "returns __grouping__"
def fget(self):
return self.__grouping__
return locals()
grouping = property(**grouping())
if (__name__ == __main__):
__data__ = GroupedSort()
for i in xrange(20):
__data__.append({'a':i+1,'b':random.choice(['a','b','c','d']),'type':random.choice(['Linux','Windows'])})
__keys__ = []
__keys__.append({'key':'type','cmp':__ascending__})
__keys__.append({'key':'b','cmp':__descending__})
__keys__.append({'key':'a','cmp':__ascending__})
__data__.sortOn(__keys__)
print str(__data__)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import six
from pygments.lexer import RegexLexer
from pygments.lexer import include
from pygments.token import Comment
from pygments.token import Name
from pygments.token import Number
from pygments.token import Operator
from pygments.token import Other
from pygments.token import Punctuation
from pygments.token import String
from pygments.token import Text
import pwndbg.compat
__all__ = ['PwntoolsLexer']
class PwntoolsLexer(RegexLexer):
"""
Fork from pwntools
https://github.com/Gallopsled/pwntools/blob/7860eecf025135380b137dd9df85dd02a2fd1667/pwnlib/lexer.py
Edit:
* Remove Objdump rules
* Merge pygments-arm (https://github.com/heia-fr/pygments-arm)
"""
name = 'PwntoolsLexer'
filenames = ['*.s', '*.S', '*.asm']
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[\w$.@-]'
identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+|or)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
memory = r'(?:[\]\[])'
eol = r'[\r\n]+'
tokens = {
'root': [
include('whitespace'),
# Label
(identifier + ':', Name.Label),
(number + ':', Name.Label),
# AT&T directive
(r'\.' + identifier, Name.Attribute, 'directive-args'),
(r'lock|rep(n?z)?|data\d+', Name.Attribute),
# Instructions
(identifier, Name.Function, 'instruction-args'),
(r'[\r\n]+', Text),
],
'directive-args': [
(identifier, Name.Constant),
(string, String),
('@' + identifier, Name.Attribute),
(number, Number.Integer),
(eol, Text, '#pop'),
(r'#.*?$', Comment, '#pop'),
include('punctuation'),
include('whitespace')
],
'instruction-args': [
# Fun things
(r'([\]\[]|BYTE|DWORD|PTR|\+|\-|}|{|\^|>>|<<|&)', Text),
# Address constants
(identifier, Name.Constant),
('=' + identifier, Name.Constant), # ARM symbol
(number, Number.Integer),
# Registers
('%' + identifier, Name.Variable),
('$' + identifier, Name.Variable),
# Numeric constants
('$' + number, Number.Integer),
('#' + number, Number.Integer),
# ARM predefined constants
('#' + identifier, Name.Constant),
(r"$'(.|\\')'", String.Char),
(eol, Text, '#pop'),
include('punctuation'),
include('whitespace')
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
# Block comments
# /* */ (AT&T)
(r'/\*.*?\*/', Comment),
# Line comments
# // (AArch64)
# # (AT&T)
# ; (NASM/intel, LLVM)
# @ (ARM)
(r'(//|[#;@]).*$', Comment.Single)
],
'punctuation': [
(r'[-*,.():]+', Punctuation)
]
}
# Note: convert all unicode() to str() if in Python2.7 since unicode_literals is enabled
# The pygments<=2.2.0 (latest stable when commit) in Python2.7 use 'str' type in rules matching
# We must convert all unicode back to str()
if pwndbg.compat.python2:
def _to_str(obj):
type_ = type(obj)
if type_ in (tuple, list):
return type_(map(_to_str, obj))
elif type_ is unicode:
return str(obj)
return obj
PwntoolsLexer.tokens = {
_to_str(k): _to_str(v)
for k, v in PwntoolsLexer.tokens.iteritems()
}
|
from .base import BaseProvider
from .process import ProcessProvider
__all__ = ["BaseProvider", "ProcessProvider"]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.lib import nfp_exceptions
LOG = nfp_logging.getLogger(__name__)
sql_lock_support = True
class ContextManager(object):
def __init__(self, session=None, suppress=tuple()):
# suppress tuple holds the kind of exceptions
# the we don't have re-raise
self.session = session
self.suppress = suppress
def __enter__(self):
pass
def __exit__(self, Exptype, expvalue, traceback):
if self.suppress and Exptype:
if Exptype in self.suppress:
return False
for exception in self.suppress:
if isinstance(Exptype, exception):
return False
if not self.suppress and traceback:
return True
else:
return False
def retry(self, method, *args, **kwargs):
tries = kwargs.pop('tries', 1)
delay = 2
backoff = 2
while tries > 1:
# Loop for 'tries-1' times and
# the last time without any try-catch
try:
return method(*args, **kwargs)
except Exception:
msg = " %s retrying in %s seconds " % (self.__class__, delay)
LOG.error(msg)
time.sleep(delay)
tries -= 1
delay *= backoff
return method(*args, **kwargs)
class NfpDbContextManager(ContextManager):
def new(self, **kwargs):
return NfpDbContextManager(**kwargs)
def lock(self, session, method, *args, **kwargs):
if not sql_lock_support:
return method(session, *args, **kwargs)
with session.begin(subtransactions=True):
session.execute("SELECT GET_LOCK('nfp_db_lock', -1)")
ret = method(session, *args, **kwargs)
session.execute("SELECT RELEASE_LOCK('nfp_db_lock')")
return ret
def __enter__(self):
super(NfpDbContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpDbContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.DbException(Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpNovaContextManager(ContextManager):
def new(self, **kwargs):
return NfpNovaContextManager(**kwargs)
def __enter__(self):
super(NfpNovaContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpNovaContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.NovaException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpKeystoneContextManager(ContextManager):
def new(self, **kwargs):
return NfpKeystoneContextManager(**kwargs)
def __enter__(self):
super(NfpKeystoneContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpKeystoneContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.KeystoneException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpNeutronContextManager(ContextManager):
def new(self, **kwargs):
return NfpNeutronContextManager(**kwargs)
def __enter__(self):
super(NfpNeutronContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpNeutronContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.NeutronException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpHeatContextManager(ContextManager):
def new(self, **kwargs):
return NfpHeatContextManager(**kwargs)
def __enter__(self):
super(NfpHeatContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpHeatContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.HeatException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
class NfpGBPContextManager(ContextManager):
def new(self, **kwargs):
return NfpGBPContextManager(**kwargs)
def __enter__(self):
super(NfpGBPContextManager, self).__enter__()
return self
def __exit__(self, Exptype, expvalue, traceback):
if super(NfpGBPContextManager, self).__exit__(
Exptype, expvalue, traceback):
raise nfp_exceptions.GBPException(
Exptype, str(expvalue), traceback)
# By default exit method returns False, if False is returned
# the with block re-raises the exception. To suppress that
# True should be returned explicitly
return True
# Create the respective instances once, so that no need
# to instantiate them again any where
DbContextManager = NfpDbContextManager()
NovaContextManager = NfpNovaContextManager()
KeystoneContextManager = NfpKeystoneContextManager()
NeutronContextManager = NfpNeutronContextManager()
HeatContextManager = NfpHeatContextManager()
GBPContextManager = NfpGBPContextManager()
|
###############################################################################
# Exercise
# Use the corpus module to explore austen-persuasion.txt.
# How many word tokens does this book have?
# How many word types?
###############################################################################
# import the corpus containing austen-persuasion.txt
from nltk.corpus import gutenberg
# read the text
text = gutenberg.words('austen-persuasion.txt')
# determine how many word tokens there are
num_tokens = len(text)
# determine how many types (i.e. unique tokens) there are
num_types = len(set(text))
# present the outcome
print('austen-persuasion.txt: tokens=',num_tokens,'types=',num_types)
|
"""
This module contains the different algorithms that were evaluated for discovering log file templates (format strings).
Add additional template processors to this file.
Functions in this module should accept an iterable of LogLines.
Functions should return an iterable of Templates.
(see named tuple definition in magichour.api.local.util.namedtuples)
"""
import re
import tempfile
import uuid
from magichour.api.local.modelgen.LogCluster import LogCluster
from magichour.api.local.util.log import get_logger, log_exc
from magichour.api.local.util.namedtuples import DistributedTemplateLine
from magichour.lib.StringMatch import StringMatch
logger = get_logger(__name__)
def logcluster(lines, *args, **kwargs):
"""
This function uses the logcluster algorithm (available at http://ristov.github.io/logcluster/) to cluster
log files and mine line patterns. See http://ristov.github.io/publications/cnsm15-logcluster-web.pdf for
additional details on the algorithm. The current implementation writes loglines to a temporary file then
feeds it to the logcluster command line tool (written in perl).
Eventually, the goal is to fully translate logcluster.pl into python to eliminate this step.
Behavior of this function differs depending on how of lines and file_path are set:
lines AND file_path set: write lines to file at file_path
lines BUT NOT file_path set: write lines to temporary file
file_path BUT NOT lines: pass file_path directly into logcluster
NEITHER lines NOR file_path: throw exception
Args:
lines: an iterable of LogLine named tuples
*args:
**kwargs:
Kwargs:
file_path (string): target path to pass to logcluster.pl (only used if lines is None, otherwise ignored).
All other kwargs are passed on the command line to logcluster.pl. See above for details.
Returns:
templates: a list of Template named tuples
"""
file_path = kwargs.pop("file_path", None)
fp = None
if lines and file_path:
logger.info("Writing lines to file: %s", file_path)
LogCluster.write_file(lines, file_path)
elif lines and not file_path:
fp = tempfile.NamedTemporaryFile()
file_path = fp.name
logger.info("Writing lines to temporary file: %s", file_path)
LogCluster.write_file(lines, file_path)
elif not lines and file_path:
logger.info("Using existing lines in file: %s", file_path)
else: # not lines and not passed_file_path
log_exc(
logger,
"Must pass either argument 'lines' or keyword argument 'file_path' (or both).")
support = kwargs.pop("support", None)
if not support:
log_exc(logger, "Must pass kwarg 'support'.")
output = LogCluster.run_on_file(file_path, support, *args, **kwargs)
if fp:
# Temporary files are deleted when closed.
logger.info("Closing file: %s", file_path)
fp.close()
templates = LogCluster.parse_output(output)
return templates
def stringmatch(lines, *args, **kwargs):
"""
This function uses the StringMatch algorithm to perform clustering and line pattern mining.
See the paper "One Graph Is Worth a Thousand Logs: Uncovering Hidden Structures in Massive System Event Logs"
by Aharon, Barash, Cohen, and Mordechai for further details on the algorithm.
The name "StringMatch" was taken from another paper: (Aharon et al do not name their algorithm).
Args:
lines: (iterable LogLine): an iterable of LogLine named tuples
Kwargs:
batch_size (int): batch_size to pass to StringMatch (default: 5000)
skip_count (int): skip_count to pass to StringMatch (default: 0)
threshold (float): threshold to pass to StringMatch, must be between 0 and 1 (default: 0.75)
min_samples (int): min_samples to pass to StringMatch (default: 25)
Returns:
templates (list Template): a list of Template named tuples
"""
batch_size = kwargs.get("batch_size", 5000)
skip_count = kwargs.get("skip_count", 0)
threshold = kwargs.get("threshold", 0.75)
min_samples = kwargs.get("min_samples", 25)
clusters = StringMatch.get_clusters(
lines, batch_size, skip_count, threshold, min_samples)
template_id = 1
templates = []
for cluster in clusters:
template_str = cluster.get_template_line()
template_regex = re.compile("%s$" % re.escape(template_str))
template = DistributedTemplateLine(
id=str(uuid.uuid4()),
template=template_regex,
skip_words=None,
raw_str=template_str,
)
templates.append(template)
template_id += 1
return templates
def baler(lines):
"""
This function uses the Baler tool, created by Sandia National Labs.
The tool is expected to be released in Q1 2016, so this code will be updated when that happens.
TODO: Complete this section.
Args:
lines (iterable LogLine): an iterable of LogLine named tuples
Returns:
templates (list Template): a list of Template named tuples
"""
pass
|
""" Module to test processing root requests """
from unittest.mock import Mock, call
import pytest
from pinakes.main.approval.tests.factories import (
WorkflowFactory,
RequestFactory,
)
from pinakes.main.approval.services.process_root_request import (
ProcessRootRequest,
)
from pinakes.main.approval.tasks import start_request_task
from pinakes.main.catalog.services.handle_approval_events import (
HandleApprovalEvents,
)
@pytest.mark.django_db
def test_process_request_no_workflow(mocker):
"""Test to create a new request with no workflow"""
service = _prepare_service(mocker, [])
request = service.process().request
_assert_request(request, state="completed", decision="approved")
@pytest.mark.django_db
def test_process_request_one_workflow(mocker):
"""Test to create a new request with one workflow but no group"""
workflow = WorkflowFactory()
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(
request, state="notified", group_name="<NO_GROUP>", workflow=workflow
)
@pytest.mark.django_db
def test_process_request_one_workflow_one_group(mocker):
"""Test to create a new request with one workflow and one group"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
validations = mocker.patch(
"pinakes.main.approval.validations.runtime_validate_group",
return_value=True,
)
workflow = WorkflowFactory(group_refs=({"name": "n1", "uuid": "u1"},))
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(
request, state="notified", group_name="n1", workflow=workflow
)
assert add_permissions.call_count == 1
assert validations.call_count == 1
@pytest.mark.django_db
def test_process_request_one_workflow_groups(mocker):
"""Test to create a new request with one workflow multiple groups"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
enqueue = mocker.patch("django_rq.enqueue", return_value=Mock(id=123))
workflow = WorkflowFactory(
group_refs=({"name": "n1", "uuid": "u1"}, {"name": "n2", "uuid": "u2"})
)
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(request, num_children=2, group_name="n1,n2")
_assert_request(request.requests[0], group_name="n1", workflow=workflow)
_assert_request(request.requests[1], group_name="n2", workflow=workflow)
enqueue.assert_has_calls(
[
call(start_request_task, request.requests[0].id),
call(start_request_task, request.requests[1].id),
]
)
assert add_permissions.call_count == 2
@pytest.mark.django_db
def test_process_request_workflows_groups(mocker):
"""Test to create a new request with workflows and groups"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
mocker.patch(
"pinakes.main.approval.validations.runtime_validate_group",
return_value=True,
)
workflow1 = WorkflowFactory(group_refs=({"name": "n1", "uuid": "u1"},))
workflow2 = WorkflowFactory()
service = _prepare_service(mocker, [workflow1.id, workflow2.id])
request = service.process().request
request.refresh_from_db()
_assert_request(
request, state="notified", num_children=2, group_name="n1,<NO_GROUP>"
)
_assert_request(
request.requests[0],
state="notified",
group_name="n1",
workflow=workflow1,
)
_assert_request(
request.requests[1],
state="pending",
group_name="<NO_GROUP>",
workflow=workflow2,
)
assert add_permissions.call_count == 1
def _prepare_service(mocker, workflow_ids):
request = RequestFactory(
name="test", description="description", workflow=None
)
mocker.patch.object(HandleApprovalEvents, "process", return_value=None)
service = ProcessRootRequest(request.id, workflow_ids)
return service
def _assert_request(
request,
state="pending",
decision="undecided",
num_children=0,
group_name="",
workflow=None,
):
assert request.name == "test"
assert request.description == "description"
assert request.state == state
assert request.decision == decision
assert request.number_of_children == num_children
assert request.workflow == workflow
assert request.group_name == group_name
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BgpIpv4L2Site(Base):
"""BGP IPv4 Peer L2 Site (Range) Configuration
The BgpIpv4L2Site class encapsulates a list of bgpIpv4L2Site resources that are managed by the user.
A list of resources can be retrieved from the server using the BgpIpv4L2Site.find() method.
The list can be managed by using the BgpIpv4L2Site.add() and BgpIpv4L2Site.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bgpIpv4L2Site'
_SDM_ATT_MAP = {
'Active': 'active',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DistinguishAsNumber': 'distinguishAsNumber',
'DistinguishAssignedNumber': 'distinguishAssignedNumber',
'DistinguishIpAddr': 'distinguishIpAddr',
'DutIp': 'dutIp',
'EnCluster': 'enCluster',
'EnControlWord': 'enControlWord',
'EnSeqDelivery': 'enSeqDelivery',
'EnableBfdVccv': 'enableBfdVccv',
'EnableVccvPing': 'enableVccvPing',
'EncapsulationType': 'encapsulationType',
'Errors': 'errors',
'LocalIp': 'localIp',
'LocalRouterID': 'localRouterID',
'MtuL2Site': 'mtuL2Site',
'Multiplier': 'multiplier',
'Name': 'name',
'NumClusterPerL2Site': 'numClusterPerL2Site',
'NumL2Sites': 'numL2Sites',
'NumLabelBlocksPerL2Site': 'numLabelBlocksPerL2Site',
'SessionStatus': 'sessionStatus',
'SiteId': 'siteId',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TargetAsNumber': 'targetAsNumber',
'TargetAssignedNumber': 'targetAssignedNumber',
'TargetIpAddr': 'targetIpAddr',
'TypeDistinguish': 'typeDistinguish',
'TypeTarget': 'typeTarget',
'VpnName': 'vpnName',
}
def __init__(self, parent):
super(BgpIpv4L2Site, self).__init__(parent)
@property
def ClusterList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.clusterlist_6e3b9385a37769c7040d46a28feaa819.ClusterList): An instance of the ClusterList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.clusterlist_6e3b9385a37769c7040d46a28feaa819 import ClusterList
return ClusterList(self)
@property
def Connector(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def LabelBlockList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.labelblocklist_7243cf48ef4a1cf284a8988d7ce69917.LabelBlockList): An instance of the LabelBlockList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.labelblocklist_7243cf48ef4a1cf284a8988d7ce69917 import LabelBlockList
return LabelBlockList(self)
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def Active(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DistinguishAsNumber(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Distinguish AS Number
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DistinguishAsNumber']))
@property
def DistinguishAssignedNumber(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Distinguish Assigned Number
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DistinguishAssignedNumber']))
@property
def DistinguishIpAddr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Distinguish IP Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DistinguishIpAddr']))
@property
def DutIp(self):
"""
Returns
-------
- list(str): DUT IP
"""
return self._get_attribute(self._SDM_ATT_MAP['DutIp'])
@property
def EnCluster(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Cluster
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnCluster']))
@property
def EnControlWord(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Control Word
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnControlWord']))
@property
def EnSeqDelivery(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Enable Sequenced Delivery
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnSeqDelivery']))
@property
def EnableBfdVccv(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If selected, BFD VCCV MPLS is enabled.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBfdVccv']))
@property
def EnableVccvPing(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): If selected, VCCV Ping is enabled
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableVccvPing']))
@property
def EncapsulationType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Encapsulation Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EncapsulationType']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def LocalIp(self):
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def LocalRouterID(self):
"""
Returns
-------
- list(str): Router ID
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterID'])
@property
def MtuL2Site(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): MTU
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MtuL2Site']))
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumClusterPerL2Site(self):
"""
Returns
-------
- number: Number Of Clusters Per L2 Site
"""
return self._get_attribute(self._SDM_ATT_MAP['NumClusterPerL2Site'])
@NumClusterPerL2Site.setter
def NumClusterPerL2Site(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumClusterPerL2Site'], value)
@property
def NumL2Sites(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): No. Of L2 Sites
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumL2Sites']))
@property
def NumLabelBlocksPerL2Site(self):
"""
Returns
-------
- number: Number Of Label Blocks Per L2 Site
"""
return self._get_attribute(self._SDM_ATT_MAP['NumLabelBlocksPerL2Site'])
@NumLabelBlocksPerL2Site.setter
def NumLabelBlocksPerL2Site(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumLabelBlocksPerL2Site'], value)
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def SiteId(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Site ID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SiteId']))
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TargetAsNumber(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Target AS Number
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetAsNumber']))
@property
def TargetAssignedNumber(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Target Assigned Number
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetAssignedNumber']))
@property
def TargetIpAddr(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Target IP Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TargetIpAddr']))
@property
def TypeDistinguish(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Distinguish Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeDistinguish']))
@property
def TypeTarget(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Target Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeTarget']))
@property
def VpnName(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): VPN Name
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VpnName']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, NumClusterPerL2Site=None, NumLabelBlocksPerL2Site=None, StackedLayers=None):
"""Updates bgpIpv4L2Site resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumClusterPerL2Site (number): Number Of Clusters Per L2 Site
- NumLabelBlocksPerL2Site (number): Number Of Label Blocks Per L2 Site
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, NumClusterPerL2Site=None, NumLabelBlocksPerL2Site=None, StackedLayers=None):
"""Adds a new bgpIpv4L2Site resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumClusterPerL2Site (number): Number Of Clusters Per L2 Site
- NumLabelBlocksPerL2Site (number): Number Of Label Blocks Per L2 Site
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved bgpIpv4L2Site resources using find and the newly added bgpIpv4L2Site resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bgpIpv4L2Site resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DutIp=None, Errors=None, LocalIp=None, LocalRouterID=None, Multiplier=None, Name=None, NumClusterPerL2Site=None, NumLabelBlocksPerL2Site=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves bgpIpv4L2Site resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpIpv4L2Site resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpIpv4L2Site resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- DutIp (list(str)): DUT IP
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LocalIp (list(str)): Local IP
- LocalRouterID (list(str)): Router ID
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumClusterPerL2Site (number): Number Of Clusters Per L2 Site
- NumLabelBlocksPerL2Site (number): Number Of Label Blocks Per L2 Site
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching bgpIpv4L2Site resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpIpv4L2Site data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpIpv4L2Site resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, DistinguishAsNumber=None, DistinguishAssignedNumber=None, DistinguishIpAddr=None, EnCluster=None, EnControlWord=None, EnSeqDelivery=None, EnableBfdVccv=None, EnableVccvPing=None, EncapsulationType=None, MtuL2Site=None, NumL2Sites=None, SiteId=None, TargetAsNumber=None, TargetAssignedNumber=None, TargetIpAddr=None, TypeDistinguish=None, TypeTarget=None, VpnName=None):
"""Base class infrastructure that gets a list of bgpIpv4L2Site device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- DistinguishAsNumber (str): optional regex of distinguishAsNumber
- DistinguishAssignedNumber (str): optional regex of distinguishAssignedNumber
- DistinguishIpAddr (str): optional regex of distinguishIpAddr
- EnCluster (str): optional regex of enCluster
- EnControlWord (str): optional regex of enControlWord
- EnSeqDelivery (str): optional regex of enSeqDelivery
- EnableBfdVccv (str): optional regex of enableBfdVccv
- EnableVccvPing (str): optional regex of enableVccvPing
- EncapsulationType (str): optional regex of encapsulationType
- MtuL2Site (str): optional regex of mtuL2Site
- NumL2Sites (str): optional regex of numL2Sites
- SiteId (str): optional regex of siteId
- TargetAsNumber (str): optional regex of targetAsNumber
- TargetAssignedNumber (str): optional regex of targetAssignedNumber
- TargetIpAddr (str): optional regex of targetIpAddr
- TypeDistinguish (str): optional regex of typeDistinguish
- TypeTarget (str): optional regex of typeTarget
- VpnName (str): optional regex of vpnName
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
|
#!/usr/bin/env python
import sys
from unittest import TestCase
from mock import Mock, patch
# Mock out the collectd module
sys.modules['collectd'] = Mock()
from plugin.nginx_plus_collectd import MetricSink, MetricRecord
class MetricSinkTest(TestCase):
def setUp(self):
self.sink = MetricSink()
self.mock_values = CollectdValuesMock()
@patch('plugin.nginx_plus_collectd.collectd.Values')
def test_emit_record(self, mock_collectd):
mock_collectd.return_value = self.mock_values
instance_id = 'my_plugin'
metric_value = 1234567890
metric_dimensions = {'nginx.version' : '1.11.10'}
expected_type = 'counter'
expected_values = [metric_value]
expected_plugin_instance = '{}[nginx_version=1.11.10]'.format(instance_id)
expected_type_instance = 'connections.accepted'
expected_meta = {'true' : 'true'}
expected_plugin = 'nginx-plus'
record = MetricRecord(expected_type_instance, expected_type, metric_value, instance_id, metric_dimensions)
self.sink.emit(record)
self.assertEquals(1, len(self.mock_values.dispatch_collector))
dispatched_value = self.mock_values.dispatch_collector[0]
self.assertIsNotNone(dispatched_value.time)
self.assertEquals(expected_plugin, dispatched_value.plugin)
self.assertEquals(expected_values, dispatched_value.values)
self.assertEquals(expected_type, dispatched_value.type)
self.assertEquals(expected_type_instance, dispatched_value.type_instance)
self.assertEquals(expected_plugin_instance, dispatched_value.plugin_instance)
self.assertDictEqual(expected_meta, dispatched_value.meta)
def test_format_dimensions(self):
key_1 = 'my.key.1'
key_2 = 'my.key.2'
value_1 = 'my.value.1'
value_2 = 'my.value.2'
raw_dimensions = {key_1 : value_1, key_2 : value_2}
expected_pair_1 = '{}={}'.format(key_1.replace('.', '_'), value_1)
expected_pair_2 = '{}={}'.format(key_2.replace('.', '_'), value_2)
actual_dimensions = self.sink._format_dimensions(raw_dimensions)
pairs = actual_dimensions.split(',')
self.assertEquals(2, len(pairs))
self.assertTrue(expected_pair_1 in pairs)
self.assertTrue(expected_pair_2 in pairs)
class CollectdValuesMock(object):
def __init__(self):
self.dispatch_collector = []
def dispatch(self):
self.dispatch_collector.append(self)
|
import os
import sys
def usage():
print "%s script.b" % sys.argv[0]
sys.exit(127)
class Machine:
def __init__(self, filename):
self.filename = filename
self.code = []
self.codePos = 0
self.tape = []
self.tapePos = 0
try:
with open(filename, 'r') as fp:
self.code = [ ord(ch) for ch in fp.read() ]
except Exception as err:
print err
sys.exit(127)
def run(self, skip = False):
while self.tapePos >= 0 and self.codePos < len(self.code):
if self.tapePos >= len(self.tape):
self.tape.append(0)
if self.code[self.codePos] == ord('['):
self.codePos += 1
oldPos = self.codePos
while self.run(self.tape[self.tapePos] == 0):
self.codePos = oldPos
elif self.code[self.codePos] == ord(']'):
return self.tape[self.tapePos] != 0
elif not skip:
ch = self.code[self.codePos]
if ch == ord('+'):
if self.tape[self.tapePos] < 255:
self.tape[self.tapePos] += 1
elif ch == ord('-'):
if self.tape[self.tapePos] < 255:
self.tape[self.tapePos] -= 1
elif ch == ord('>'): self.tapePos += 1
elif ch == ord('<'): self.tapePos -= 1
elif ch == ord('.'): os.write(2, chr(self.tape[self.tapePos]))
elif ch == ord(','): self.tape[self.tapePos] = ord(os.read(0, 1))
self.codePos += 1
return True
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 1:
usage()
filename = args[0]
m = Machine(filename)
m.run()
|
from threading import *
def mymsgprint():
print("Display function ")
mythreadobj = current_thread()
print("The main thread name is",mythreadobj.getName(), end = '')
if mythreadobj.daemon:
print(" and is a daemon thread.")
else:
print(" and is a non-daemon thread.")
mychildt1 = Thread(target = mymsgprint)
print("Child thread name is",mychildt1.getName(), end = '')
if mychildt1.daemon:
print(" and is a daemon thread.")
else:
print(" and is a non-daemon thread.")
mychildt1.start()
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
from pgadmin.utils.route import BaseTestGenerator
from pgadmin.tools.sqleditor.utils.query_tool_fs_utils import \
read_file_generator
class TestReadFileGeneratorForEncoding(BaseTestGenerator):
"""
Check that the start_running_query method works as intended
"""
scenarios = [
(
'When user is trying to load the file with utf-8 encoding',
dict(
file='test_file_utf8_encoding.sql',
encoding='utf-8'
)
),
(
'When user is trying to load the file with other encoding and'
' trying to use utf-8 encoding to read it',
dict(
file='test_file_other_encoding.sql',
encoding='utf-8'
)
),
]
def setUp(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
self.complate_path = os.path.join(self.dir_path, self.file)
def runTest(self):
result = read_file_generator(self.complate_path, self.encoding)
# Check if file is read properly by the generator
self.assertIn('SELECT 1', next(result))
|
from setuptools import setup
setup(
name="keycloak_cli",
version='0.1',
py_modules=['keycloak_cli'],
install_requires=[
'Click',
'requests==2.23.0',
'PyYAML==5.3.1'
],
entry_points='''
[console_scripts]
keycloak_cli=keycloak_cli:cli
''',
)
|
import cronex
import dateutil.parser
from django.http import JsonResponse
from django.db.models import Q
from django.utils import timezone
from django.views.decorators.gzip import gzip_page
from django.views.decorators.http import require_GET
from .lookup import query_can_map_static
from .models import DynamicAuth, AdminAuth, UserAuth, \
PasscodeAuth, NetworkAuth, Exclusive
from beetle.models import Contact
from network.models import ServiceInstance, CharInstance
from network.lookup import get_gateway_and_device_helper
from utils.decorators import require_api_port
def __get_dynamic_auth(rule, principal):
result = []
for auth in DynamicAuth.objects.filter(rule=rule).order_by("priority"):
auth_obj = {
"when" : auth.require_when,
}
if isinstance(auth, NetworkAuth):
auth_obj["type"] = "network"
auth_obj["ip"] = auth.ip_address
auth_obj["priv"] = auth.is_private
elif isinstance(auth, PasscodeAuth):
auth_obj["type"] = "passcode"
elif isinstance(auth, UserAuth):
auth_obj["type"] = "user"
if principal.owner.id == Contact.NULL:
# Rule is unsatisfiable: there is user to
# authenticate
return False, []
elif isinstance(auth, AdminAuth):
auth_obj["type"] = "admin"
if auth.admin.id == Contact.NULL:
# Rule is unsatisfiable: there is no admin
return False, []
else:
continue
result.append(auth_obj)
return True, result
def __get_minimal_rules(rules, cached_relations):
"""Returns the rules that are 'minimal'."""
if rules.count() == 0:
return rules
not_minimal_ids = set()
# TODO: this is not particularly efficient...
# filling in upper triangular matrix of strict partial order.
for lhs in rules.order_by("id"):
for rhs in rules.filter(id__gte=lhs.id):
key = (lhs.id, rhs.id)
if key not in cached_relations:
lhs_lte_rhs = lhs.static_lte(rhs)
rhs_lte_lhs = rhs.static_lte(lhs)
if lhs_lte_rhs and rhs_lte_lhs:
cached_relations[key] = 0
elif lhs_lte_rhs:
cached_relations[key] = -1
elif rhs_lte_lhs:
cached_relations[key] = 1
else:
cached_relations[key] = 0
cached_val = cached_relations[key]
if cached_val == 0:
pass
elif cached_val > 0:
not_minimal_ids.add(lhs.id)
else:
not_minimal_ids.add(rhs.id)
rules = rules.exclude(id__in=not_minimal_ids)
return rules
def __evaluate_cron(rule, timestamp, cached_cron):
"""Returns whether the timestamp is in the trigger window of the rule"""
if rule.id in cached_cron:
return cached_cron[rule.id]
result = False
try:
cron_str = str(rule.cron_expression)
cron = cronex.CronExpression(cron_str)
result = cron.check_trigger(timestamp.timetuple()[:5],
utc_offset=timestamp.utcoffset().seconds / (60 ** 2))
except Exception, err:
print err
result = False
cached_cron[rule.id] = result
return result
@require_api_port
@gzip_page
@require_GET
def query_can_map(request, from_gateway, from_id, to_gateway, to_id):
"""Return whether fromId at fromGateway can connect to toId at toGateway"""
if "timestamp" in request.GET:
timestamp = dateutil.parser.parse(request.GET["timestamp"])
else:
timestamp = timezone.now()
from_id = int(from_id)
from_gateway, from_principal, _, conn_from_principal = \
get_gateway_and_device_helper(from_gateway, from_id)
to_id = int(to_id)
to_gateway, to_principal, _, _ = \
get_gateway_and_device_helper(to_gateway, to_id)
can_map, applicable_rules = query_can_map_static(from_gateway,
from_principal, to_gateway, to_principal, timestamp)
response = {}
if not can_map:
response["result"] = False
return JsonResponse(response)
# Response format:
# ================
# {
# "result" : True,
# "access" : {
# "rules" : {
# 1 : { # Spec of the rule
# "prop" : "rwni",
# "int" : True,
# "enc" : False,
# "lease" : 1000, # Expiration time
# "excl" : False,
# "dauth" : [
# ... # Additional auth
# ]
# },
# },
# "services": {
# "2A00" : { # Service
# "2A01" : [1] # Char to applicable rules
# },
# },
# }
# }
# Cached already computed relations for a strict partial order
cached_relations = {}
cached_cron = {}
services = {}
rules = {}
for service_instance in ServiceInstance.objects.filter(
device_instance=conn_from_principal):
service_rules = applicable_rules.filter(
Q(service=service_instance.service) | Q(service__name="*"))
service = service_instance.service
for char_instance in CharInstance.objects.filter(
service_instance=service_instance):
characteristic = char_instance.characteristic
char_rules = service_rules.filter(Q(characteristic=characteristic) |
Q(characteristic__name="*"))
for char_rule in __get_minimal_rules(char_rules, cached_relations):
#####################################
# Compute access per characteristic #
#####################################
# Evaluate the cron expression
if not __evaluate_cron(char_rule, timestamp, cached_cron):
continue
# Access allowed
if service.uuid not in services:
services[service.uuid] = {}
if characteristic.uuid not in services[service.uuid]:
services[service.uuid][characteristic.uuid] = []
if char_rule.id not in rules:
satisfiable, dauth = __get_dynamic_auth(char_rule,
to_principal)
if not satisfiable:
continue
if char_rule.exclusive:
exclusive_id = char_rule.exclusive.id
else:
exclusive_id = Exclusive.NULL
# Put the rule in the result
rules[char_rule.id] = {
"prop" : char_rule.properties,
"excl" : exclusive_id,
"int" : char_rule.integrity,
"enc" : char_rule.encryption,
"lease" : (timestamp + char_rule.lease_duration
).strftime("%s"),
"dauth" : dauth,
}
services[service.uuid][characteristic.uuid].append(
char_rule.id)
if not rules:
response["result"] = False
else:
response["result"] = True
response["access"] = {
"rules" : rules,
"services" : services,
}
return JsonResponse(response)
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module with BIP38 utility functions."""
# Imports
from typing import Union
from bip_utils.base58 import Base58Encoder
from bip_utils.coin_conf import CoinsConf
from bip_utils.ecc import IPublicKey, Secp256k1PublicKey
from bip_utils.utils.misc import CryptoUtils
from bip_utils.wif import WifPubKeyModes
# Alias for WifPubKeyModes
Bip38PubKeyModes = WifPubKeyModes
class Bip38AddrConst:
"""Class container for BIP38 address constants."""
# Address hash length
ADDR_HASH_LEN: int = 4
class Bip38Addr:
"""Class for BIP38 address computation."""
@staticmethod
def EncodeKey(pub_key: Union[bytes, IPublicKey],
pub_key_mode: Bip38PubKeyModes) -> str:
"""
Encode a public key to compressed or uncompressed address.
Args:
pub_key (bytes or IPublicKey) : Public key bytes or object
pub_key_mode (Bip38PubKeyModes): Public key mode
Returns:
str: Encoded address
Raises:
TypeError: If the public key is not a Secp256k1PublicKey
ValueError: If the public key bytes are not valid
"""
# Convert to public key to check if bytes are valid
if isinstance(pub_key, bytes):
pub_key = Secp256k1PublicKey.FromBytes(pub_key)
elif not isinstance(pub_key, Secp256k1PublicKey):
raise TypeError("A secp256k1 public key is required")
# Get public key bytes
pub_key_bytes = (pub_key.RawCompressed().ToBytes()
if pub_key_mode == Bip38PubKeyModes.COMPRESSED
else pub_key.RawUncompressed().ToBytes())
# Encode key to address
net_ver = CoinsConf.BitcoinMainNet.Params("p2pkh_net_ver")
return Base58Encoder.CheckEncode(net_ver + CryptoUtils.Hash160(pub_key_bytes))
@staticmethod
def AddressHash(pub_key: Union[bytes, IPublicKey],
pub_key_mode: Bip38PubKeyModes) -> bytes:
"""
Compute the address hash as specified in BIP38.
Args:
pub_key (bytes or IPublicKey) : Public key bytes or object
pub_key_mode (Bip38PubKeyModes): Public key mode
Returns:
bytes: Address hash
Raises:
TypeError: If the public key is not a Secp256k1PublicKey
ValueError: If the public key bytes are not valid
"""
# Compute the Bitcoin address
address = Bip38Addr.EncodeKey(pub_key, pub_key_mode)
# Take the first four bytes of SHA256(SHA256())
return CryptoUtils.DoubleSha256(address)[:Bip38AddrConst.ADDR_HASH_LEN]
|
import secrets
import time
import csv
from selenium import webdriver
from common_steps import *
import os.path
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
browser = webdriver.Chrome()
browser.get("http://conduitapp.progmasters.hu:1667")
# registration test
sign_up_nav_btn = browser.find_element_by_xpath("//li/a[@href='#/register']")
sign_up_nav_btn.click()
register_site = "http://conduitapp.progmasters.hu:1667/#/register"
assert browser.current_url == register_site
username_reg_field = browser.find_element_by_xpath("//input[@placeholder='Username']")
email_reg_field = browser.find_element_by_xpath("//input[@placeholder='Email']")
password_reg_field = browser.find_element_by_xpath("//input[@placeholder='Password']")
sign_up_send_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg btn-primary pull-xs-right']")
random_email_token = secrets.token_hex(6)
username_good_format = "Tesztjozsef"
email_good_format = f"{random_email_token}@jozsef.hu"
email_bad_format = "jozsef"
password_good_format = "ASDFasdf123"
username_reg_field.send_keys(username_good_format)
email_reg_field.send_keys(email_bad_format)
password_reg_field.send_keys(password_good_format)
sign_up_send_btn.click()
time.sleep(2)
swal_text = browser.find_element_by_xpath("//div[@class= 'swal-text']")
wrong_email_msg = "Email must be a valid email."
taken_email_msg = "Email already taken. "
wrong_password_msg = "Password must be 8 characters long and include 1 number, 1 uppercase letter, and 1 lowercase letter. "
assert swal_text.text == wrong_email_msg
swal_ok_btn = browser.find_element_by_xpath("//button[@class = 'swal-button swal-button--confirm']")
swal_ok_btn.click()
## cookie test.
decline_btn = browser.find_element_by_xpath(
"//button[@class= 'cookie__bar__buttons__button cookie__bar__buttons__button--decline']")
decline_btn.click()
cookie_decline = browser.get_cookie("vue-cookie-accept-decline-cookie-policy-panel")
assert cookie_decline["value"] == "decline"
browser.delete_cookie("vue-cookie-accept-decline-cookie-policy-panel")
browser.refresh()
time.sleep(2)
accept_btn = browser.find_element_by_xpath(
"//button[@class= 'cookie__bar__buttons__button cookie__bar__buttons__button--accept']")
accept_btn.click()
cookie_accept = browser.get_cookie("vue-cookie-accept-decline-cookie-policy-panel")
assert cookie_accept["value"] == "accept"
time.sleep(2)
# real registration
username_reg_field = browser.find_element_by_xpath("//input[@placeholder='Username']")
email_reg_field = browser.find_element_by_xpath("//input[@placeholder='Email']")
password_reg_field = browser.find_element_by_xpath("//input[@placeholder='Password']")
sign_up_send_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg btn-primary pull-xs-right']")
username_reg_field.send_keys(username_good_format)
email_reg_field.send_keys(email_good_format)
password_reg_field.send_keys(password_good_format)
sign_up_send_btn.click()
time.sleep(2)
success_reg_msg = browser.find_element_by_xpath("//div[@class='swal-text']")
assert success_reg_msg.text == "Your registration was successful!"
reg_success_ok_btn = browser.find_element_by_xpath("//div/button[@class='swal-button swal-button--confirm']")
reg_success_ok_btn.click()
# username test
navbar_all_item_logged_in = browser.find_elements_by_xpath("//li[@class= 'nav-item']")
assert navbar_all_item_logged_in[3].text == username_good_format
# logout test
navbar_all_item_logged_in[4].click()
session_cookie = browser.get_cookie("drash_sess")
assert session_cookie["value"] == "null"
# login
navbar_all_item_logged_out = browser.find_elements_by_xpath("//li[@class= 'nav-item']")
navbar_all_item_logged_out[1].click()
login_site = "http://conduitapp.progmasters.hu:1667/#/login"
assert browser.current_url == login_site
assert browser.get_cookie("drash_sess")["value"] == "null"
email_sign_in_field = browser.find_element_by_xpath("//input[@placeholder='Email']")
password_sign_in_field = browser.find_element_by_xpath("//input[@placeholder='Password']")
sign_in_send_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg btn-primary pull-xs-right']")
# random felhasználó bejelentkezés
# email_sign_in_field.send_keys(email_good_format)
# password_sign_in_field.send_keys(password_good_format)
# sign_in_send_btn.click()
# átmeneti égetett adatok postoláshoz és post törléséhez
email_sign_in_field.send_keys("jozsefteszt@jozsefteszt.hu")
password_sign_in_field.send_keys("asdfASDF123")
sign_in_send_btn.click()
time.sleep(1)
assert browser.get_cookie("drash_sess")["value"] != "null"
navbar_all_item_logged_in = browser.find_elements_by_xpath("//li[@class= 'nav-item']")
# navbar_in(browser)
# username ellenőrzés a random adatoknál
# assert navbar_all_item_logged_in[3].text == username_good_format
# username ellenőrzés az átmeneti égetett adatokkal
assert navbar_all_item_logged_in[3].text == "jozsefteszt"
# new post
navbar_all_item_logged_in[1].click()
time.sleep(1)
article_title_field = browser.find_element_by_xpath("//input[@placeholder='Article Title']")
article_about_field = browser.find_element_by_xpath("//input[contains(@placeholder,'this article about')]")
article_body_field = browser.find_element_by_xpath("//textarea[@placeholder='Write your article (in markdown)']")
article_tag_field = browser.find_element_by_xpath("//input[@placeholder='Enter tags']")
article_publish_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg pull-xs-right btn-primary']")
article_title_field.send_keys("titleteszt3")
article_about_field.send_keys("aboutteszt")
article_body_field.send_keys("bodyteszt")
article_tag_field.send_keys("tagteszt")
article_publish_btn.click()
time.sleep(2)
article_url = browser.current_url
assert article_url == "http://conduitapp.progmasters.hu:1667/#/articles/titleteszt3"
# edit article
edit_btn = browser.find_element_by_xpath("//a[contains(@href,'editor/titleteszt3')]")
edit_btn.click()
time.sleep(1)
article_body_field = browser.find_element_by_xpath("//textarea[@placeholder='Write your article (in markdown)']")
article_body_field.send_keys(" EDITED")
article_publish_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg pull-xs-right btn-primary']")
article_publish_btn.click()
time.sleep(1)
edited_contetnt = browser.find_element_by_xpath("//div[@class = 'row article-content']/div/div/p")
assert edited_contetnt.text == "bodyteszt EDITED"
browser.back()
# delete article
navbar_all_item_logged_in = browser.find_elements_by_xpath("//li[@class= 'nav-item']")
navbar_all_item_logged_in[0].click()
time.sleep(1)
titles_before_delete = browser.find_elements_by_xpath("//a[@class='preview-link']/h1")
before_delete_list = []
for i in titles_before_delete:
before_delete_list.append(i.text)
browser.back()
browser.back()
time.sleep(1)
delete_btn = browser.find_element_by_xpath("//button[@class='btn btn-outline-danger btn-sm']")
delete_btn.click()
time.sleep(2)
titles_after_delete = browser.find_elements_by_xpath("//a[@class='preview-link']/h1")
after_delete_list = []
for i in titles_after_delete:
after_delete_list.append(i.text)
difference = []
deleted_title = ""
for i in before_delete_list:
if i not in after_delete_list:
deleted_title = deleted_title + i
assert deleted_title == "titleteszt3"
# data from file
# new post
navbar_all_item_logged_in = browser.find_elements_by_xpath("//li[@class= 'nav-item']")
navbar_all_item_logged_in[1].click()
time.sleep(1)
article_title_field = browser.find_element_by_xpath("//input[@placeholder='Article Title']")
article_about_field = browser.find_element_by_xpath("//input[contains(@placeholder,'this article about')]")
article_body_field = browser.find_element_by_xpath("//textarea[@placeholder='Write your article (in markdown)']")
article_tag_field = browser.find_element_by_xpath("//input[@placeholder='Enter tags']")
article_publish_btn = browser.find_element_by_xpath("//button[@class= 'btn btn-lg pull-xs-right btn-primary']")
article_title_field.send_keys("commentitle")
article_about_field.send_keys("commentabout")
article_body_field.send_keys("commnetbody")
article_tag_field.send_keys("commentag")
article_publish_btn.click()
time.sleep(2)
comment_field = browser.find_element_by_xpath("//textarea[@placeholder='Write a comment...']")
comment_btn = browser.find_element_by_xpath("//div[@class= 'card-footer']/button")
with open("input_data.txt", "r") as file:
comment_lines = file.readlines()
for i in comment_lines:
comment = i.strip()
comment_field.send_keys(comment)
comment_btn.click()
time.sleep(1)
comment_list = browser.find_elements_by_xpath("//p[@class = 'card-text']")
stripped_list = []
text_list = []
for i in range(len(comment_lines) - 1, -1, -1):
stripped_list.append(comment_lines[i].strip())
for k in comment_list:
text_list.append(k.text)
assert stripped_list == text_list
delete_btn = browser.find_element_by_xpath("//button[@class='btn btn-outline-danger btn-sm']")
delete_btn.click()
time.sleep(2)
# data to file
authors = browser.find_elements_by_xpath("//a[@class = 'author']")
titles = browser.find_elements_by_xpath("//a[@class = 'preview-link']/h1")
summaries = browser.find_elements_by_xpath("//a[@class = 'preview-link']/p")
likes = browser.find_elements_by_xpath("//span[@class = 'counter']")
with open('output_data.csv', 'w', encoding='utf-8') as new_csv:
new_csv.write("author" + "," + "title" + "," + "summary" + "," + "number_of_likes" + "\n")
for i in range(len(authors) - 1):
new_csv.write(authors[i].text + "," + titles[i].text + "," + summaries[i].text + "," + likes[i].text + "\n")
with open('output_data.csv', 'r', encoding="utf-8") as file:
reader = csv.reader(file, delimiter=',')
rows = list(file)
random_line_index = secrets.randbelow(len(authors)-1)
random_article = f"{authors[random_line_index].text},{titles[random_line_index].text},{summaries[random_line_index].text},{likes[random_line_index].text}\n"
random_file_line = rows[random_line_index+1]
assert random_article == random_file_line
# listing
browser.get("http://conduitapp.progmasters.hu:1667/#/tag/lorem_tag")
time.sleep(2)
tags_in_article = browser.find_elements_by_xpath("//a[@class= 'preview-link']/div/a")
articles_with_current_tag = browser.find_elements_by_xpath("//a[@class = 'preview-link']/h1")
tag_counter = 0
for i in range(len(tags_in_article)):
if "lorem_tag" in tags_in_article[i].text:
tag_counter += 1
assert len(articles_with_current_tag) == tag_counter
# pagination
browser.back()
time.sleep(2)
page_btns = browser.find_elements_by_xpath("//ul[@class = 'pagination']/li/a")
for i in range(len(page_btns)):
page_btns[i].click()
time.sleep(1)
current_page = browser.find_element_by_xpath("//li[@class = 'page-item active']/a")
assert int(current_page.text) == len(page_btns)
time.sleep(15)
browser.quit()
|
from pydantic import BaseModel
from typing import Optional
class UserModel(BaseModel):
username: Optional[str] = ""
passcode: Optional[str] = ""
def is_valid(self):
error = []
if len(self.username) == 0:
error.append("Username cannot be blank")
if len(self.passcode) == 0:
error.append("Password cannot be blank")
if len(error) == 0:
return True, error
return False, error
|
from office365.mail.item import Item
class Contact(Item):
"""User's contact."""
@property
def id(self):
return self.properties.get("id", None)
|
from django.test import TestCase
from mirrors.tests import create_mirror_url
class MirrorUrlTest(TestCase):
def setUp(self):
self.mirror_url = create_mirror_url()
def testAddressFamilies(self):
self.assertIsNotNone(self.mirror_url.address_families())
def testHostname(self):
self.assertEqual(self.mirror_url.hostname, 'archlinux.org')
def testGetAbsoluteUrl(self):
absolute_url = self.mirror_url.get_absolute_url()
expected = '/mirrors/%s/%d/' % (self.mirror_url.mirror.name, self.mirror_url.pk)
self.assertEqual(absolute_url, expected)
def test_mirror_overview(self):
response = self.client.get('/mirrors/')
self.assertEqual(response.status_code, 200)
self.assertIn(self.mirror_url.mirror.name, response.content.decode())
def testClean(self):
# TODO: add test for self.mirror_url.clean()
pass
def tearDown(self):
self.mirror_url.delete()
|
"""Utilities for creating a new resource state
"""
from ..constructs.workspace import Workspace
def create_resource_state_cli(args) -> None:
workspace = Workspace.instance()
create_resource_state(workspace)
def create_resource_state(workspace: Workspace) -> None:
workspace.get_backend().create_resource_state("demo")
|
''' *********************** USER-PARAMETERS OF BASE ESTIMATOR *********************** '''
from base_estimator_conf import * # noqa
K = 6 * (1e7, )
|
# TODO: this is no longer woring due to recent Reddit API changes
import time
import datetime
import json
import argparse
import praw
class RedditRetriever(object):
def __init__(self, _subreddit, _outfile, _start_date, _end_date, step=3600):
self.r = praw.Reddit(site_name='graphbrain', user_agent='GraphBrain (http://graphbrain.org)')
self.subreddit = _subreddit
self.output_file = _outfile
self.step = step
self.start_ts = int(time.mktime(datetime.datetime.strptime(_start_date, "%d/%m/%Y").timetuple()))
self.end_ts = int(time.mktime(datetime.datetime.strptime(_end_date, "%d/%m/%Y").timetuple()))
self.cur_ts = self.start_ts
self.posts = 0
self.comments = 0
self.retry_wait = 30
def print_status(self):
delta_t = self.end_ts - self.start_ts
done_t = self.cur_ts - self.start_ts
per = (float(done_t) / float(delta_t)) * 100.
print('retrieving subreddit: %s [%.2f%% done] --- %s posts; %s comments'
% (self.subreddit, per, self.posts, self.comments))
def build_comment(self, comment):
if hasattr(comment, 'replies'):
replies = [self.build_comment(reply) for reply in comment.replies if reply is not None]
else:
replies = []
if not hasattr(comment, 'body'):
return None
if hasattr(comment, 'author') and comment.author is not None:
author = comment.author.name
else:
author = ''
self.comments += 1
return {'id': comment.id,
'author': author,
'body': comment.body,
'score': comment.score,
'ups': comment.ups,
'downs': comment.downs,
'created': comment.created,
'created_utc': comment.created_utc,
'comments': replies}
def comments_tree(self, post):
top_level_comments = list(post.comments)
return [self.build_comment(comment) for comment in top_level_comments]
def retrieve_posts(self):
for ts in range(self.cur_ts, self.end_ts, self.step):
self.cur_ts = ts
query = 'timestamp:%s..%s' % (str(ts), str(ts + self.step))
self.print_status()
search_results = self.r.subreddit(self.subreddit).search(query, syntax='cloudsearch')
for res in search_results:
comments = self.comments_tree(res)
post = {'id': res.id,
'title': res.title,
'author': res.author.name,
'permalink': res.permalink.replace('?ref=search_posts', ''),
'url': res.url,
'selftext': res.selftext,
'score': res.score,
'ups': res.ups,
'downs': res.downs,
'created': res.created,
'created_utc': res.created_utc,
'comments': comments}
self.posts += 1
# write to file
with open(self.output_file, 'a') as file:
file.write('%s\n' % json.dumps(post, separators=(',', ':')))
def run(self):
print('writing to file: %s' % self.output_file)
while True:
try:
self.retrieve_posts()
print('done.')
exit()
except KeyboardInterrupt:
exit()
except SystemExit:
exit()
except Exception as e:
print('exception: %s' % str(e))
print('retrying in %s seconds...' % self.retry_wait)
time.sleep(self.retry_wait)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--outfile', type=str, help='output file', default=None)
parser.add_argument('--startdate', type=str, help='start date', default=None)
parser.add_argument('--enddate', type=str, help='end date', default=None)
parser.add_argument('--subreddit', type=str, help='subreddit to retrieve.', default=None)
args = parser.parse_args()
subreddit = args.subreddit
outfile = args.outfile
startdate = args.startdate
enddate = args.enddate
rr = RedditRetriever(subreddit, outfile, startdate, enddate)
rr.run()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 10 22:29:29 2016
@author: nam
"""
import mmRef;
import utilities;
def prepareEvacEventFile(evacEventFileName,plfName,refEventName):
newRow = [];
for i in range(len(mmRef.EvacEventCols)):
newRow.append('');
newRow[mmRef.EvacEventCols.refEventName.value] = refEventName;
newRow[mmRef.EvacEventCols.timeOffset.value] = mmRef.ConstTimeValues.evacTimeOffset.value;
newRow[mmRef.EvacEventCols.keyToken.value] = mmRef.TimetableDict.trainTicket.value + plfName;
newRow[mmRef.EvacEventCols.location.value] = mmRef.TimetableDict.trainSeats.value + plfName;
newEvacEventRow = [];
newEvacEventRow.append(newRow);
utilities.appendToCSV(evacEventFileName,newEvacEventRow);
|
import numpy as np
import skimage.draw as skdraw
def vis_bbox(bbox,img,color=(255,0,0),modify=False,alpha=0.2):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox
x1 = max(0,min(x1,im_w-1))
x2 = max(x1,min(x2,im_w-1))
y1 = max(0,min(y1,im_h-1))
y2 = max(y1,min(y2,im_h-1))
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if modify==True:
img_ = img
else:
img_ = np.copy(img)
if len(img.shape)==2:
color = (color[0],)
rr,cc = skdraw.polygon(r,c,img.shape[:2])
skdraw.set_color(img_,(rr,cc),color,alpha=alpha)
rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])
if len(img.shape)==3:
for k in range(3):
img_[rr,cc,k] = color[k]
elif len(img.shape)==2:
img_[rr,cc]=color[0]
return img_
def create_att(bbox,prev_att,att_value):
im_h,im_w = prev_att.shape[0:2]
x1,y1,x2,y2 = bbox
x1 = int(max(0,min(x1,im_w-1)))
x2 = int(max(x1,min(x2,im_w-1)))
y1 = int(max(0,min(y1,im_h-1)))
y2 = int(max(y1,min(y2,im_h-1)))
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
att = 0*prev_att
att[y1:y2,x1:x2] = att_value
return np.maximum(prev_att,att)
def compute_iou(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
x1_in = max(x1,x1_)
y1_in = max(y1,y1_)
x2_in = min(x2,x2_)
y2_in = min(y2,y2_)
intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)
area1 = compute_area(bbox1)
area2 = compute_area(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def compute_area(bbox,invalid=None):
x1,y1,x2,y2 = bbox
if (x2 <= x1) or (y2 <= y1):
area = invalid
else:
area = (x2 - x1 + 1) * (y2 - y1 + 1)
return area
def point_in_box(pt,bbox):
x1,y1,x2,y2 = bbox
x,y = pt
is_inside = False
if x>x1 and x<x2 and y>y1 and y<y2:
is_inside=True
return is_inside
def compute_center(bbox):
x1,y1,x2,y2 = bbox
xc = 0.5*(x1+x2)
yc = 0.5*(y1+y2)
return (xc,yc)
|
'''
simple trigger test case
@author: Huiyugeng
'''
import time
from task import task
from task import task_container
from task.trigger import simple_trigger
from example.jobs import time_job
container = task_container.TaskContainer()
def get_now():
time_stamp = time.time()
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_stamp))
return time_now
def test_simple_trigger():
job1 = time_job.JobA()
trigger1 = simple_trigger.SimpleTrigger(0, 2)
task1 = task.Task('TaskA', job1, trigger1)
container.add_task(task1)
job2 = time_job.JobB()
trigger2 = simple_trigger.SimpleTrigger(0, 5)
trigger2.set_delay(5, 0)
task2 = task.Task('TaskB', job2, trigger2)
container.add_task(task2)
print 'start at %s' % get_now()
print '----Start (With Daemon)----'
container.start_all(True)
time.sleep(11) # pause container
print '---------Pause All---------'
container.pasuse_all()
print container.stat_tasks()
time.sleep(10) # restart container
print '--------Restart All--------'
container.start_all()
print container.stat_tasks()
time.sleep(11) # stop task
print '--------Stop Task A--------'
container.stop('TaskA')
print container.stat_tasks()
time.sleep(10) # restart task
print '------Start Task A--------'
container.start('TaskA')
time.sleep(11) # remove task
print '---------Remove A---------'
container.remove_task('TaskA')
time.sleep(10) # stop all
print '---------Stop All---------'
container.stop_all()
test_simple_trigger()
|
import os
import os.path as op
import logging
lgr = logging.getLogger(__name__)
# start with SLURM but extend past that #TODO
def queue_conversion(progname, queue, outdir, heuristic, dicoms, sid,
anon_cmd, converter, session,with_prov, bids):
# Rework this...
convertcmd = ' '.join(['python', progname,
'-o', outdir,
'-f', heuristic,
'-s', sid,
'--anon-cmd', anon_cmd,
'-c', converter])
if session:
convertcmd += " --ses '%s'" % session
if with_prov:
convertcmd += " --with-prov"
if bids:
convertcmd += " --bids"
if dicoms:
convertcmd += " --files"
convertcmd += [" '%s'" % f for f in dicoms]
script_file = 'dicom-%s.sh' % sid
with open(script_file, 'wt') as fp:
fp.writelines(['#!/bin/bash\n', convertcmd])
outcmd = 'sbatch -J dicom-%s -p %s -N1 -c2 --mem=20G %s' \
% (sid, queue, script_file)
os.system(outcmd)
|
from datetime import datetime
from turnips.ttime import TimePeriod
def datetime_to_timeperiod(timestamp: datetime) -> TimePeriod:
weekday = timestamp.isoweekday() % 7
if timestamp.hour < 8:
raise ValueError(
"This is way too early to log a price. If you meant to log a price for another "
"day, try `!turnip log {price} [time period]` instead."
)
if weekday == 0 and timestamp.hour >= 12:
raise ValueError(
"Daisy Mae has already left your island. If you still want to log Sunday "
"prices, use `!turnip log {price} Sunday_AM` instead."
)
if timestamp.hour >= 22:
raise ValueError(
"Nook's Cranny is closed for the day, but if you want to log past prices, "
"use `!turnip log {price} [time period]` instead."
)
return TimePeriod(weekday * 2 + (0 if timestamp.hour < 12 else 1))
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from typing import Any, List
import math
import InnerEye.Common.Statistics.mann_whitney_test as mwt
def test_mann_whitney_on_key() -> None:
data = [float_list(range(10)), float_list(range(5)), float_list(range(20)), float_list(range(25))]
result = mwt.mann_whitney_on_key("Imu,neck,neck", data)
# We expect 3 comparisons, because the 5-element list should be ignored, and there are 3 unordered
# pairs we can make from the other 3.
assert len(result) == 3
def float_list(seq: Any) -> List[float]:
return [float(x) for x in seq]
def test_compose_comparison_line() -> None:
pair1 = mwt.compose_comparison_line(0, 1, "Imu,neck,neck", [0.1, 0.3], [0.2, 0.4], [0.2, 0.3], [0.1, 0.1])
pair2 = mwt.compose_comparison_line(1, 0, "Imu,neck,neck", [0.2, 0.4], [0.1, 0.3], [0.2, 0.3], [0.1, 0.1])
# Check we get the same result both ways round
assert pair1 == pair2
def test_get_z_test_p_value() -> None:
result = mwt.get_z_test_p_value(0, 1, 5, 5, [3.0, 4.0], [1.0, 1.0])
assert abs(result - 0.05) <= 0.01
assert mwt.get_z_test_p_value(0, 1, 5, 5, [3.0, 3.0], [1.0, 1.0]) == 0.5
assert mwt.get_z_test_p_value(0, 1, 5, 5, [3.0, 4.0], [0.0, 0.0]) == 0.0
def test_mean_or_zero() -> None:
assert mwt.mean_or_zero([]) == 0.0
assert mwt.mean_or_zero([1.0]) == 1.0
assert mwt.mean_or_zero([1.0, 2.0]) == 1.5
def test_standard_deviation_or_zero() -> None:
assert mwt.standard_deviation_or_zero([]) == 0.0
assert mwt.standard_deviation_or_zero([1.0]) == 0.0
assert mwt.standard_deviation_or_zero([1.0, 2.0]) == math.sqrt(0.5)
def test_roc_value() -> None:
assert mwt.roc_value([], []) == 0.5
assert mwt.roc_value([], [1]) == 0.5
assert mwt.roc_value([1], [2]) == 1.0
assert mwt.roc_value([2], [1]) == 0.0
assert mwt.roc_value([1], [1]) == 0.5
assert mwt.roc_value([2], [3, 1]) == 0.5
assert mwt.roc_value([3, 1], [1, 4]) == 0.625
def test_get_median() -> None:
assert mwt.get_median([]) == " " * 9
assert mwt.get_median([12.345]) == " 12.345"
assert mwt.get_median([12345.0]) == "1.234e+04"
assert mwt.get_median([1, 2, 4]) == " 2.000"
def test_parse_values() -> None:
rows = ["0,Imu,neck,neck,12.34".split(","),
"0,Isd,neck,neck,5.67".split(","),
"1,Imu,neck,neck,23.45".split(",")]
result = mwt.parse_values(rows)
expected = {"Imu,neck,neck": [12.34, 23.45], "Isd,neck,neck": [5.67]}
assert result == expected
def test_split_statistics_data_by_institutions() -> None:
dataset_rows = ["0,,,,inst1".split(","), "1,,,,inst1".split(","), "2,,,,inst2".split(",")]
stats_rows = [["0,zero".split(","), "1,one".split(","), "2,two".split(",")]]
contents, header_rows = mwt.split_statistics_data_by_institutions(dataset_rows, stats_rows, count_threshold=1)
assert header_rows == ["1: inst1 (2 items)", "2: inst2 (1 items)", ""]
assert contents == [[["0", "zero"], ["1", "one"]], [["2", "two"]]]
# With count threshold 2, only inst1 should survive
contents, header_rows = mwt.split_statistics_data_by_institutions(dataset_rows, stats_rows, count_threshold=2)
assert header_rows == ["1: inst1 (2 items)", ""]
assert contents == [[["0", "zero"], ["1", "one"]]]
# With count threshold 3, neither should survive
contents, header_rows = mwt.split_statistics_data_by_institutions(dataset_rows, stats_rows, count_threshold=3)
assert header_rows == [""]
assert contents == []
def test_get_arguments() -> None:
assert mwt.get_arguments(["-d", "dataset.csv", "statistics.csv"])[1] == \
mwt.UsagePattern.SPLIT_STATISTICS_FILES_BY_INSTITUTION
assert mwt.get_arguments(["statistics1.csv", "statistics2.csv"])[1] == mwt.UsagePattern.COMPARE_STATISTICS_FILES
assert mwt.get_arguments(["-a", "foo", "mstats.csv"])[1] == mwt.UsagePattern.PROCESS_METRICS_ACROSS_ALL_RUNS
assert mwt.get_arguments(["mstats.csv"])[1] == mwt.UsagePattern.PROCESS_METRICS_ACROSS_ALL_RUNS
|
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib
import seaborn
import logging
import warnings
import scipy.stats as st
import statsmodels as sm
import statistics
def remove_static(df, unocc_st = '01:00:00', unocc_et = '04:00:00', quartile='75%'):
"""Calculates the static devices during specified periods and removes the devices from the dataframe
Parameters
----------
df : DataFrame
DF at the building level
unocc_st : unoccupied start time string input
Set to 1:00 am as standard
unocc_et : unoccupied end time string input
Set to 4:00 am as standard
** Note: this is a time range in which you believe there would most likely not be people within the builing
quartile : string input
quartile level of how much you would like to be removed as the baseline, ( '25%', '50%', '75%')
Returns
-------
Dataframe where the static baseline is removed from all the device counts for each building
"""
# plot initalizing
font = {'size' : 12}
plt.rc('font', **font)
# filtering df for unoccupied time range
night_occ = df.between_time(unocc_st, unocc_et)
# Retriving stats for unoccupied times
removed_devices = night_occ.iloc[:,0].describe()[quartile]
no_static_df = df - removed_devices
no_static_df[no_static_df<0]=0
kwargs = dict(alpha=0.5, bins=100)
plt.figure(figsize=(10, 10), dpi=80)
plt.xlim(xmax = df.iloc[:,0].describe()['75%'])
plt.hist(df.iloc[:,0].values.tolist(), **kwargs, label='With Static Devices')
plt.hist(no_static_df.iloc[:,0].values.tolist(), **kwargs, label='Without Static Devices')
#plt.hist(df.iloc[:,0].values.tolist(), **kwargs, label='With Static Devices')
plt.gca().set(title='Static Removal Shift', xlabel='Device Count', ylabel='Device Count Frequency')
plt.legend()
return no_static_df
|
import tweepy
import random
import configparser
from twitter_auth import twitter_api as api
# Read config files - default config and auth config
config = configparser.ConfigParser()
config.read('config.ini')
cfg = config['DEFAULT']
# Store id of the last mentioned tweet so that you don't double-reply
def update_last_mentioned_tweet(tweet_id):
with open("timeline_since.txt", "w") as file:
file.write(str(tweet_id))
with open("timeline_since.txt", 'r') as f:
since_tweet_id = f.readline()
if since_tweet_id == '':
since_tweet_id = api().mentions_timeline(count=1)[0].id
update_last_mentioned_tweet(since_tweet_id)
# tweets = tweepy.Cursor(api.mentions_timeline, since_id=since_tweet_id, tweet_mode='extended').items(20)
if __name__ == "__main__":
tweets = api().mentions_timeline(since_id=since_tweet_id, tweet_mode='extended', count=20)
for tweet in reversed(tweets):
hashtags = (tweet._json['entities']['hashtags'])
msg = ''
for hashtag in hashtags:
for config_hashtag in cfg['HASHTAGS'].split(','):
if config_hashtag.lower() == hashtag['text'].lower():
msg = cfg['PI_DAY_MSG_' + config_hashtag]
break
elif hashtag['text'].lower() == cfg['PI_DAY_JOKE_HASHTAG'].lower():
with open(cfg['JOKES_FILE'], 'r') as f:
joke = random.choice(f.readlines())
msg = f"{joke}{cfg['TWEET_END']}"
break
if msg != '':
try:
api().update_status(msg, in_reply_to_status_id=tweet.id, auto_populate_reply_metadata=True)
print(f"Tweeted to {str(tweet.id)}: {msg}")
except Exception as e:
print(tweet.id)
print(e)
else:
print("nothing to tweet")
# Store the tweet id in the file so that it's skipped next time
update_last_mentioned_tweet(tweet.id)
|
from manta import *
dim = 2
particle_number = 2
res = 32
gs = vec3(res, res, res)
narrowBandWidth = 3
combineBandWidth = narrowBandWidth - 1
if dim == 2:
gs.z = 1
particleNumber = 3
s = Solver(name='Josue', gridSize=gs, dim=dim)
s.timestep = 0.5
minParticles = pow(2, dim)
flags = s.create(FlagGrid)
phi = s.create(LevelsetGrid)
phiParts = s.create(LevelsetGrid)
vel = s.create(MACGrid)
velOld = s.create(MACGrid)
velParts = s.create(MACGrid)
pressure = s.create(RealGrid)
tmpVec3 = s.create(MACGrid)
pp = s.create(BasicParticleSystem)
pVel = pp.create(PdataVec3)
mesh = s.create(Mesh)
pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)
flags.initDomain(boundaryWidth=0)
phi.initFromFlags(flags)
fluidbox = Box(parent=s, p0=gs*vec3(0, 0, 0), p1=gs*vec3(0.2, 1, 1))
phi.join(fluidbox.computeLevelset())
# apic part
mass = s.create(MACGrid)
pCx = pp.create(PdataVec3)
pCy = pp.create(PdataVec3)
pCz = pp.create(PdataVec3)
# fluidDam = Box(parent=s, p0=gs*vec3(0, 0.15, 0), p1=gs*vec3(0.4, 0.5, 0.8))
# phi.join(fluidDam.computeLevelset())
flags.updateFromLevelset(phi)
sampleLevelsetWithParticles(
phi=phi, flags=flags, parts=pp, discretization=2, randomness=0.1)
mapGridToPartsVec3(source=vel, parts=pp, target=pVel)
if GUI:
gui = Gui()
gui.show()
gui.pause()
step = -1
for i in range(2000):
maxVel = vel.getMax()
s.adaptTimestep(maxVel)
pp.advectInGrid(flags=flags, vel=vel, integrationMode=IntRK2, deleteInObstacle=False)
advectSemiLagrange(flags=flags, vel=vel, grid=phi, order=1)
flags.updateFromLevelset(phi)
# # Here insert the correct particle positions
advectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2)
gridParticleIndex(parts=pp , flags=flags, indexSys=pindex, index=gpi)
unionParticleLevelset(pp, pindex, flags, gpi, phiParts)
phi.addConst(1.)
phi.join(phiParts)
extrapolateLsSimple(phi=phi, distance=narrowBandWidth+2, inside=True)
extrapolateLsSimple(phi=phi, distance=3)
flags.updateFromLevelset(phi)
# APIC Transfer to MAC
particleMACTransfers(flags=flags, vel=velParts, parts=pp, partVel=pVel, cpx=pCx, cpy=pCy, cpz=pCz, mass=mass)
combineGridVel(vel=velParts, weight=tmpVec3, combineVel=vel, phi=phi, narrowBand=combineBandWidth, thresh=0)
velOld.copyFrom(vel)
addGravity(flags=flags, vel=vel, gravity=(0, -0.003, 0))
setWallBcs(flags=flags, vel=vel)
solvePressure(flags=flags, vel=vel, pressure=pressure)
setWallBcs(flags=flags, vel=vel)
extrapolateMACSimple(flags=flags, vel=vel, distance=(int(maxVel*1.25 + 2.)))
# APIC Transfer to vel
particleGridTransfers(cpx=pCx, cpy=pCy, cpz=pCz, partVel=pVel, parts=pp, vel=vel, flags=flags)
if dim==3:
phi.createMesh(mesh)
pVel.setSource(vel, isMAC=True)
phi.setBoundNeumann(0)
adjustNumber(
parts=pp, vel=vel, flags=flags,
minParticles=1*minParticles, maxParticles=2*minParticles, phi=phi,
narrowBand=narrowBandWidth
)
s.step()
|
import argparse
import logging
from api_proxy import api_proxy
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser('release tools')
subparsers = parser.add_subparsers(title='actions', dest='action')
register_command = subparsers.add_parser('register')
register_command.add_argument('--model_name', type=str, required=True)
update_command = subparsers.add_parser('update')
update_command.add_argument('--model_name', type=str, required=True)
update_command.add_argument('--model_base_path', type=str, required=True)
update_command.add_argument('--model_version', type=int, required=True)
delete_command = subparsers.add_parser('delete')
delete_command.add_argument('--model_name', type=str, required=True)
delete_command.add_argument('--model_version', type=int, required=True)
subparsers.add_parser('list')
return parser
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = build_parser()
args = vars(parser.parse_args())
action = args.pop('action')
print(api_proxy(action, args))
|
from dataloaders.dataset1d import EcgDataset1D
from dataloaders.dataset2d import EcgDataset2D
from models import models1d, models2d
from trainers.base_trainer import BaseTrainer
class Trainer2D(BaseTrainer):
def __init__(self, config):
super().__init__(config)
def _init_net(self):
model = getattr(models2d, self.config["model"])(
num_classes=self.config["num_classes"],
)
model = model.to(self.config["device"])
return model
def _init_dataloaders(self):
train_loader = EcgDataset2D(
self.config["train_json"], self.config["mapping_json"],
).get_dataloader(
batch_size=self.config["batch_size"],
num_workers=self.config["num_workers"],
)
val_loader = EcgDataset2D(
self.config["val_json"], self.config["mapping_json"],
).get_dataloader(
batch_size=self.config["batch_size"],
num_workers=self.config["num_workers"],
)
return train_loader, val_loader
class Trainer1D(BaseTrainer):
def __init__(self, config):
super().__init__(config)
def _init_net(self):
model = getattr(models1d, self.config["model"])(
num_classes=self.config["num_classes"],
)
model = model.to(self.config["device"])
return model
def _init_dataloaders(self):
train_loader = EcgDataset1D(
self.config["train_json"], self.config["mapping_json"],
).get_dataloader(
batch_size=self.config["batch_size"],
num_workers=self.config["num_workers"],
)
val_loader = EcgDataset1D(
self.config["val_json"], self.config["mapping_json"],
).get_dataloader(
batch_size=self.config["batch_size"],
num_workers=self.config["num_workers"],
)
return train_loader, val_loader
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .definition_reference import DefinitionReference
class BuildDefinitionReference(DefinitionReference):
"""BuildDefinitionReference.
:param created_date: The date the definition was created.
:type created_date: datetime
:param id: The ID of the referenced definition.
:type id: int
:param name: The name of the referenced definition.
:type name: str
:param path: The folder path of the definition.
:type path: str
:param project: A reference to the project.
:type project: :class:`TeamProjectReference <build.v4_1.models.TeamProjectReference>`
:param queue_status: A value that indicates whether builds can be queued against this definition.
:type queue_status: object
:param revision: The definition revision number.
:type revision: int
:param type: The type of the definition.
:type type: object
:param uri: The definition's URI.
:type uri: str
:param url: The REST URL of the definition.
:type url: str
:param _links:
:type _links: :class:`ReferenceLinks <build.v4_1.models.ReferenceLinks>`
:param authored_by: The author of the definition.
:type authored_by: :class:`IdentityRef <build.v4_1.models.IdentityRef>`
:param draft_of: A reference to the definition that this definition is a draft of, if this is a draft definition.
:type draft_of: :class:`DefinitionReference <build.v4_1.models.DefinitionReference>`
:param drafts: The list of drafts associated with this definition, if this is not a draft definition.
:type drafts: list of :class:`DefinitionReference <build.v4_1.models.DefinitionReference>`
:param latest_build:
:type latest_build: :class:`Build <build.v4_1.models.Build>`
:param latest_completed_build:
:type latest_completed_build: :class:`Build <build.v4_1.models.Build>`
:param metrics:
:type metrics: list of :class:`BuildMetric <build.v4_1.models.BuildMetric>`
:param quality: The quality of the definition document (draft, etc.)
:type quality: object
:param queue: The default queue for builds run against this definition.
:type queue: :class:`AgentPoolQueue <build.v4_1.models.AgentPoolQueue>`
"""
_attribute_map = {
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'project': {'key': 'project', 'type': 'TeamProjectReference'},
'queue_status': {'key': 'queueStatus', 'type': 'object'},
'revision': {'key': 'revision', 'type': 'int'},
'type': {'key': 'type', 'type': 'object'},
'uri': {'key': 'uri', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'authored_by': {'key': 'authoredBy', 'type': 'IdentityRef'},
'draft_of': {'key': 'draftOf', 'type': 'DefinitionReference'},
'drafts': {'key': 'drafts', 'type': '[DefinitionReference]'},
'latest_build': {'key': 'latestBuild', 'type': 'Build'},
'latest_completed_build': {'key': 'latestCompletedBuild', 'type': 'Build'},
'metrics': {'key': 'metrics', 'type': '[BuildMetric]'},
'quality': {'key': 'quality', 'type': 'object'},
'queue': {'key': 'queue', 'type': 'AgentPoolQueue'}
}
def __init__(self, created_date=None, id=None, name=None, path=None, project=None, queue_status=None, revision=None, type=None, uri=None, url=None, _links=None, authored_by=None, draft_of=None, drafts=None, latest_build=None, latest_completed_build=None, metrics=None, quality=None, queue=None):
super(BuildDefinitionReference, self).__init__(created_date=created_date, id=id, name=name, path=path, project=project, queue_status=queue_status, revision=revision, type=type, uri=uri, url=url)
self._links = _links
self.authored_by = authored_by
self.draft_of = draft_of
self.drafts = drafts
self.latest_build = latest_build
self.latest_completed_build = latest_completed_build
self.metrics = metrics
self.quality = quality
self.queue = queue
|
#!$HOME/github/my_venv/bin/python
# Open virtual environment
# -*- encoding: utf-8 -*-
# Script file to finalize score
import os
import shutil
import abjad
# from muda_score.segments import segment_01
# Paths
srcdir = "muda_score/segments"
dstdir = "muda_score/score"
# Runs "segment_0X.py" files (generate segments)
# segment_01
# Copies "segment_0X.ly" files to score folder
for basename in os.listdir(srcdir):
if basename.endswith(".ly"):
pathname = os.path.join(srcdir, basename)
if os.path.isfile(pathname):
shutil.copy2(pathname, dstdir)
# Get the current working directory
cwd = os.getcwd()
# Change the current working directory
os.chdir("./muda_score/score")
# Print the current working directory
print("Current working directory: {0}".format(os.getcwd()))
# Runs LilyPond to compile the score
os.system("lilypond " + "score.ly")
# Opens the PDF
abjad.io.open_file("score.pdf")
|
from hask.lang import Eq
|
# -*- coding: utf-8 -*-
import helpers.boobankCommands as boobank
import helpers.objectTypes as types
from helpers.pretty import pretty_print
from helpers.configure import configure_backends, erase_credentials
from helpers.serializer import serialize
import webbrowser
import os
filename = '/root/src/weboobToFile/files/test.json'
def process_boobank():
bank = dict()
bank['name'] = 'Jean-Mi'
bank['accounts'] = boobank.get_accounts()
for account in bank['accounts']:
account['type'] = types.set_account_type(account['type'])
account.update({'coming_operations': []})
account['coming_operations'] = boobank.get_coming(account['id'])
account.update({'transactions': []})
account['transactions'] = boobank.get_transactions(account['id'])
for transaction in account['transactions']:
transaction['type'] = types.set_transaction_type(transaction['type'])
return bank
def main():
configure_backends()
os.system("ls -l root/.config/weboob/")
os.system("chmod 600 /root/.config/weboob/backends")
os.system("chmod 600 /root/.config/weboob/sources.list")
boobank.weboob_update()
bank = dict()
bank = process_boobank()
if not bank:
bank['Error'] = 'Fail to execute Boobank..'
erase_credentials()
serialize(bank)
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import json
import ctypes
import numpy
import numba
import awkward1.layout
import awkward1._util
import awkward1._connect._numba.arrayview
@numba.extending.typeof_impl.register(awkward1.layout.NumpyArray)
def typeof_NumpyArray(obj, c):
t = numba.typeof(numpy.asarray(obj))
return NumpyArrayType(
numba.types.Array(t.dtype, t.ndim, "A"),
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.RegularArray)
def typeof_RegularArray(obj, c):
return RegularArrayType(
numba.typeof(obj.content),
obj.size,
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.ListArray32)
@numba.extending.typeof_impl.register(awkward1.layout.ListArrayU32)
@numba.extending.typeof_impl.register(awkward1.layout.ListArray64)
@numba.extending.typeof_impl.register(awkward1.layout.ListOffsetArray32)
@numba.extending.typeof_impl.register(awkward1.layout.ListOffsetArrayU32)
@numba.extending.typeof_impl.register(awkward1.layout.ListOffsetArray64)
def typeof_ListArray(obj, c):
return ListArrayType(
numba.typeof(numpy.asarray(obj.starts)),
numba.typeof(obj.content),
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.IndexedArray32)
@numba.extending.typeof_impl.register(awkward1.layout.IndexedArrayU32)
@numba.extending.typeof_impl.register(awkward1.layout.IndexedArray64)
def typeof_IndexedArray(obj, c):
return IndexedArrayType(
numba.typeof(numpy.asarray(obj.index)),
numba.typeof(obj.content),
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.IndexedOptionArray32)
@numba.extending.typeof_impl.register(awkward1.layout.IndexedOptionArray64)
def typeof_IndexedOptionArray(obj, c):
return IndexedOptionArrayType(
numba.typeof(numpy.asarray(obj.index)),
numba.typeof(obj.content),
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.ByteMaskedArray)
def typeof_ByteMaskedArray(obj, c):
return ByteMaskedArrayType(
numba.typeof(numpy.asarray(obj.mask)),
numba.typeof(obj.content),
obj.valid_when,
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.BitMaskedArray)
def typeof_BitMaskedArray(obj, c):
return BitMaskedArrayType(
numba.typeof(numpy.asarray(obj.mask)),
numba.typeof(obj.content),
obj.valid_when,
obj.lsb_order,
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.UnmaskedArray)
def typeof_UnmaskedArray(obj, c):
return UnmaskedArrayType(
numba.typeof(obj.content), numba.typeof(obj.identities), obj.parameters
)
@numba.extending.typeof_impl.register(awkward1.layout.RecordArray)
def typeof_RecordArray(obj, c):
return RecordArrayType(
tuple(numba.typeof(x) for x in obj.contents),
obj.recordlookup,
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.UnionArray8_32)
@numba.extending.typeof_impl.register(awkward1.layout.UnionArray8_U32)
@numba.extending.typeof_impl.register(awkward1.layout.UnionArray8_64)
def typeof_UnionArray(obj, c):
return UnionArrayType(
numba.typeof(numpy.asarray(obj.tags)),
numba.typeof(numpy.asarray(obj.index)),
tuple(numba.typeof(x) for x in obj.contents),
numba.typeof(obj.identities),
obj.parameters,
)
@numba.extending.typeof_impl.register(awkward1.layout.VirtualArray)
def typeof_VirtualArray(obj, c):
if obj.form.form is None:
raise ValueError("VirtualArrays without a known 'form' can't be used in Numba")
if obj.form.has_identities:
raise NotImplementedError("TODO: identities in VirtualArray")
return VirtualArrayType(obj.form.form, numba.none, obj.parameters)
class ContentType(numba.types.Type):
@classmethod
def tolookup_identities(cls, layout, positions, sharedptrs, arrays):
if layout.identities is None:
positions.append(-1)
sharedptrs.append(None)
else:
arrays.append(numpy.asarray(layout.identities))
positions.append(arrays[-1])
sharedptrs.append(None)
@classmethod
def form_tolookup_identities(cls, form, positions, sharedptrs, arrays):
if not form.has_identities:
positions.append(-1)
sharedptrs.append(None)
else:
arrays.append(None)
positions.append(0)
sharedptrs.append(None)
@classmethod
def from_form_identities(cls, form):
if not form.has_identities:
return numba.none
else:
raise NotImplementedError("TODO: identities in VirtualArray")
@classmethod
def from_form_index(cls, index_string):
if index_string == "i8":
return numba.types.Array(numba.int8, 1, "C")
elif index_string == "u8":
return numba.types.Array(numba.uint8, 1, "C")
elif index_string == "i32":
return numba.types.Array(numba.int32, 1, "C")
elif index_string == "u32":
return numba.types.Array(numba.uint32, 1, "C")
elif index_string == "i64":
return numba.types.Array(numba.int64, 1, "C")
else:
raise AssertionError(
"unrecognized Form index type: {0}".format(index_string)
)
def form_fill_identities(self, pos, layout, lookup):
if layout.identities is not None:
lookup.arrayptr[pos + self.IDENTITIES] = numpy.asarray(
layout.identities
).ctypes.data
def IndexOf(self, arraytype):
if arraytype.dtype.bitwidth == 8 and arraytype.dtype.signed:
return awkward1.layout.Index8
elif arraytype.dtype.bitwidth == 8:
return awkward1.layout.IndexU8
elif arraytype.dtype.bitwidth == 32 and arraytype.dtype.signed:
return awkward1.layout.Index32
elif arraytype.dtype.bitwidth == 32:
return awkward1.layout.IndexU32
elif arraytype.dtype.bitwidth == 64 and arraytype.dtype.signed:
return awkward1.layout.Index64
else:
raise AssertionError("no Index* type for array: {0}".format(arraytype))
def getitem_at_check(self, viewtype):
typer = awkward1._util.numba_array_typer(viewtype.type, viewtype.behavior)
if typer is None:
return self.getitem_at(viewtype)
else:
return typer(viewtype)
def getitem_range(self, viewtype):
return awkward1._connect._numba.arrayview.wrap(self, viewtype, None)
def getitem_field(self, viewtype, key):
if self.hasfield(key):
return awkward1._connect._numba.arrayview.wrap(
self, viewtype, viewtype.fields + (key,)
)
else:
raise TypeError(
"array does not have a field with key {0}".format(repr(key))
)
def lower_getitem_at_check(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
lower = awkward1._util.numba_array_lower(viewtype.type, viewtype.behavior)
if lower is not None:
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
return lower(
context, builder, rettype, viewtype, viewval, viewproxy, attype, atval
)
else:
return self.lower_getitem_at(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
)
def lower_getitem_range(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
start,
stop,
wrapneg,
):
length = builder.sub(viewproxy.stop, viewproxy.start)
regular_start = numba.core.cgutils.alloca_once_value(builder, start)
regular_stop = numba.core.cgutils.alloca_once_value(builder, stop)
if wrapneg:
with builder.if_then(
builder.icmp_signed("<", start, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(start, length), regular_start)
with builder.if_then(
builder.icmp_signed("<", stop, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(stop, length), regular_stop)
with builder.if_then(
builder.icmp_signed(
"<", builder.load(regular_start), context.get_constant(numba.intp, 0)
)
):
builder.store(context.get_constant(numba.intp, 0), regular_start)
with builder.if_then(
builder.icmp_signed(">", builder.load(regular_start), length)
):
builder.store(length, regular_start)
with builder.if_then(
builder.icmp_signed(
"<", builder.load(regular_stop), builder.load(regular_start)
)
):
builder.store(builder.load(regular_start), regular_stop)
with builder.if_then(
builder.icmp_signed(">", builder.load(regular_stop), length)
):
builder.store(length, regular_stop)
proxyout = context.make_helper(builder, rettype)
proxyout.pos = viewproxy.pos
proxyout.start = builder.add(viewproxy.start, builder.load(regular_start))
proxyout.stop = builder.add(viewproxy.start, builder.load(regular_stop))
proxyout.arrayptrs = viewproxy.arrayptrs
proxyout.sharedptrs = viewproxy.sharedptrs
proxyout.pylookup = viewproxy.pylookup
return proxyout._getvalue()
def lower_getitem_field(self, context, builder, viewtype, viewval, key):
return viewval
def posat(context, builder, pos, offset):
return builder.add(pos, context.get_constant(numba.intp, offset))
def getat(context, builder, baseptr, offset, rettype=None):
ptrtype = None
if rettype is not None:
ptrtype = context.get_value_type(numba.types.CPointer(rettype))
bitwidth = rettype.bitwidth
else:
bitwidth = numba.intp.bitwidth
byteoffset = builder.mul(offset, context.get_constant(numba.intp, bitwidth // 8))
return builder.load(
numba.core.cgutils.pointer_add(builder, baseptr, byteoffset, ptrtype)
)
def regularize_atval(context, builder, viewproxy, attype, atval, wrapneg, checkbounds):
atval = awkward1._connect._numba.castint(
context, builder, attype, numba.intp, atval
)
if not attype.signed:
wrapneg = False
if wrapneg or checkbounds:
length = builder.sub(viewproxy.stop, viewproxy.start)
if wrapneg:
regular_atval = numba.core.cgutils.alloca_once_value(builder, atval)
with builder.if_then(
builder.icmp_signed("<", atval, context.get_constant(numba.intp, 0))
):
builder.store(builder.add(atval, length), regular_atval)
atval = builder.load(regular_atval)
if checkbounds:
with builder.if_then(
builder.or_(
builder.icmp_signed(
"<", atval, context.get_constant(numba.intp, 0)
),
builder.icmp_signed(">=", atval, length),
)
):
context.call_conv.return_user_exc(
builder, ValueError, ("slice index out of bounds",)
)
return awkward1._connect._numba.castint(
context, builder, atval.type, numba.intp, atval
)
class NumpyArrayType(ContentType):
IDENTITIES = 0
ARRAY = 1
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
array = numpy.asarray(layout)
assert len(array.shape) == 1
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
positions.append(array)
sharedptrs.append(None)
arrays.append(array)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
if len(form.inner_shape) != 0:
raise NotImplementedError(
"NumpyForm is multidimensional; TODO: convert to RegularForm,"
" just as NumpyArrays are converted to RegularArrays"
)
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
positions.append(0)
sharedptrs.append(None)
arrays.append(0)
return pos
@classmethod
def from_form(cls, form):
if len(form.inner_shape) != 0:
raise NotImplementedError(
"NumpyForm is multidimensional; TODO: convert to RegularForm,"
" just as NumpyArrays are converted to RegularArrays"
)
if form.primitive == "float64":
arraytype = numba.types.Array(numba.float64, 1, "A")
elif form.primitive == "float32":
arraytype = numba.types.Array(numba.float32, 1, "A")
elif form.primitive == "int64":
arraytype = numba.types.Array(numba.int64, 1, "A")
elif form.primitive == "uint64":
arraytype = numba.types.Array(numba.uint64, 1, "A")
elif form.primitive == "int32":
arraytype = numba.types.Array(numba.int32, 1, "A")
elif form.primitive == "uint32":
arraytype = numba.types.Array(numba.uint32, 1, "A")
elif form.primitive == "int16":
arraytype = numba.types.Array(numba.int16, 1, "A")
elif form.primitive == "uint16":
arraytype = numba.types.Array(numba.uint16, 1, "A")
elif form.primitive == "int8":
arraytype = numba.types.Array(numba.int8, 1, "A")
elif form.primitive == "uint8":
arraytype = numba.types.Array(numba.uint8, 1, "A")
elif form.primitive == "bool":
arraytype = numba.types.Array(numba.bool, 1, "A")
else:
raise ValueError(
"unrecognized NumpyForm.primitive type: {0}".format(form.primitive)
)
return NumpyArrayType(
arraytype, cls.from_form_identities(form), form.parameters
)
def __init__(self, arraytype, identitiestype, parameters):
super(NumpyArrayType, self).__init__(
name="awkward1.NumpyArrayType({0}, {1}, {2})".format(
arraytype.name, identitiestype.name, json.dumps(parameters)
)
)
self.arraytype = arraytype
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
lookup.original_positions[pos + self.ARRAY] = numpy.asarray(layout)
lookup.arrayptrs[pos + self.ARRAY] = lookup.original_positions[
pos + self.ARRAY
].ctypes.data
def tolayout(self, lookup, pos, fields):
assert fields == ()
return awkward1.layout.NumpyArray(
lookup.original_positions[pos + self.ARRAY], parameters=self.parameters
)
def hasfield(self, key):
return False
def getitem_at(self, viewtype):
return self.arraytype.dtype
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.ARRAY)
arrayptr = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
arraypos = builder.add(viewproxy.start, atval)
return getat(context, builder, arrayptr, arraypos, rettype)
class RegularArrayType(ContentType):
IDENTITIES = 0
CONTENT = 1
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return RegularArrayType(
awkward1._connect._numba.arrayview.tonumbatype(form.content),
form.size,
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, contenttype, size, identitiestype, parameters):
super(RegularArrayType, self).__init__(
name="awkward1.RegularArrayType({0}, {1}, {2}, {3})".format(
contenttype.name, size, identitiestype.name, json.dumps(parameters)
)
)
self.contenttype = contenttype
self.size = size
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def tolayout(self, lookup, pos, fields):
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return awkward1.layout.RegularArray(
content, self.size, parameters=self.parameters
)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return awkward1._connect._numba.arrayview.wrap(self.contenttype, viewtype, None)
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
size = context.get_constant(numba.intp, self.size)
start = builder.mul(builder.add(viewproxy.start, atval), size)
stop = builder.add(start, size)
proxyout = context.make_helper(builder, rettype)
proxyout.pos = nextpos
proxyout.start = start
proxyout.stop = stop
proxyout.arrayptrs = viewproxy.arrayptrs
proxyout.sharedptrs = viewproxy.sharedptrs
proxyout.pylookup = viewproxy.pylookup
return proxyout._getvalue()
class ListArrayType(ContentType):
IDENTITIES = 0
STARTS = 1
STOPS = 2
CONTENT = 3
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
if isinstance(
layout,
(
awkward1.layout.ListArray32,
awkward1.layout.ListArrayU32,
awkward1.layout.ListArray64,
),
):
starts = numpy.asarray(layout.starts)
stops = numpy.asarray(layout.stops)
elif isinstance(
layout,
(
awkward1.layout.ListOffsetArray32,
awkward1.layout.ListOffsetArrayU32,
awkward1.layout.ListOffsetArray64,
),
):
offsets = numpy.asarray(layout.offsets)
starts = offsets[:-1]
stops = offsets[1:]
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
positions.append(starts)
sharedptrs.append(None)
arrays.append(starts)
positions.append(stops)
sharedptrs.append(None)
arrays.append(stops)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
positions.append(0)
sharedptrs.append(None)
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
arrays.append(0)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return ListArrayType(
cls.from_form_index(
form.starts
if isinstance(form, awkward1.forms.ListForm)
else form.offsets
),
awkward1._connect._numba.arrayview.tonumbatype(form.content),
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, indextype, contenttype, identitiestype, parameters):
super(ListArrayType, self).__init__(
name="awkward1.ListArrayType({0}, {1}, {2}, {3})".format(
indextype.name,
contenttype.name,
identitiestype.name,
json.dumps(parameters),
)
)
self.indextype = indextype
self.contenttype = contenttype
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
if isinstance(
layout,
(
awkward1.layout.ListArray32,
awkward1.layout.ListArrayU32,
awkward1.layout.ListArray64,
),
):
starts = numpy.asarray(layout.starts)
stops = numpy.asarray(layout.stops)
elif isinstance(
layout,
(
awkward1.layout.ListOffsetArray32,
awkward1.layout.ListOffsetArrayU32,
awkward1.layout.ListOffsetArray64,
),
):
offsets = numpy.asarray(layout.offsets)
starts = offsets[:-1]
stops = offsets[1:]
lookup.original_positions[pos + self.STARTS] = starts
lookup.original_positions[pos + self.STOPS] = stops
lookup.arrayptrs[pos + self.STARTS] = starts.ctypes.data
lookup.arrayptrs[pos + self.STOPS] = stops.ctypes.data
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def ListArrayOf(self):
if self.indextype.dtype.bitwidth == 32 and self.indextype.dtype.signed:
return awkward1.layout.ListArray32
elif self.indextype.dtype.bitwidth == 32:
return awkward1.layout.ListArrayU32
elif self.indextype.dtype.bitwidth == 64 and self.indextype.dtype.signed:
return awkward1.layout.ListArray64
else:
raise AssertionError(
"no ListArray* type for array: {0}".format(self.indextype)
)
def tolayout(self, lookup, pos, fields):
starts = self.IndexOf(self.indextype)(
lookup.original_positions[pos + self.STARTS]
)
stops = self.IndexOf(self.indextype)(
lookup.original_positions[pos + self.STOPS]
)
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return self.ListArrayOf()(starts, stops, content, parameters=self.parameters)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return awkward1._connect._numba.arrayview.wrap(self.contenttype, viewtype, None)
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
startspos = posat(context, builder, viewproxy.pos, self.STARTS)
startsptr = getat(context, builder, viewproxy.arrayptrs, startspos)
startsarraypos = builder.add(viewproxy.start, atval)
start = getat(context, builder, startsptr, startsarraypos, self.indextype.dtype)
stopspos = posat(context, builder, viewproxy.pos, self.STOPS)
stopsptr = getat(context, builder, viewproxy.arrayptrs, stopspos)
stopsarraypos = builder.add(viewproxy.start, atval)
stop = getat(context, builder, stopsptr, stopsarraypos, self.indextype.dtype)
proxyout = context.make_helper(builder, rettype)
proxyout.pos = nextpos
proxyout.start = awkward1._connect._numba.castint(
context, builder, self.indextype.dtype, numba.intp, start
)
proxyout.stop = awkward1._connect._numba.castint(
context, builder, self.indextype.dtype, numba.intp, stop
)
proxyout.arrayptrs = viewproxy.arrayptrs
proxyout.sharedptrs = viewproxy.sharedptrs
proxyout.pylookup = viewproxy.pylookup
return proxyout._getvalue()
class IndexedArrayType(ContentType):
IDENTITIES = 0
INDEX = 1
CONTENT = 2
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
arrays.append(numpy.asarray(layout.index))
positions.append(arrays[-1])
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return IndexedArrayType(
cls.from_form_index(form.index),
awkward1._connect._numba.arrayview.tonumbatype(form.content),
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, indextype, contenttype, identitiestype, parameters):
super(IndexedArrayType, self).__init__(
name="awkward1.IndexedArrayType({0}, {1}, {2}, {3})".format(
indextype.name,
contenttype.name,
identitiestype.name,
json.dumps(parameters),
)
)
self.indextype = indextype
self.contenttype = contenttype
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
index = numpy.asarray(layout.index)
lookup.original_positions[pos + self.INDEX] = index
lookup.arrayptrs[pos + self.INDEX] = index.ctypes.data
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def IndexedArrayOf(self):
if self.indextype.dtype.bitwidth == 32 and self.indextype.dtype.signed:
return awkward1.layout.IndexedArray32
elif self.indextype.dtype.bitwidth == 32:
return awkward1.layout.IndexedArrayU32
elif self.indextype.dtype.bitwidth == 64 and self.indextype.dtype.signed:
return awkward1.layout.IndexedArray64
else:
raise AssertionError(
"no IndexedArray* type for array: {0}".format(self.indextype)
)
def tolayout(self, lookup, pos, fields):
index = self.IndexOf(self.indextype)(
lookup.original_positions[pos + self.INDEX]
)
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return self.IndexedArrayOf()(index, content, parameters=self.parameters)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return self.contenttype.getitem_at_check(viewtype)
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
indexpos = posat(context, builder, viewproxy.pos, self.INDEX)
indexptr = getat(context, builder, viewproxy.arrayptrs, indexpos)
indexarraypos = builder.add(viewproxy.start, atval)
nextat = getat(context, builder, indexptr, indexarraypos, self.indextype.dtype)
nextviewtype = awkward1._connect._numba.arrayview.wrap(
self.contenttype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = builder.add(
awkward1._connect._numba.castint(
context, builder, self.indextype.dtype, numba.intp, nextat
),
builder.add(viewproxy.start, context.get_constant(numba.intp, 1)),
)
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
return self.contenttype.lower_getitem_at_check(
context,
builder,
rettype,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
nextat,
False,
False,
)
class IndexedOptionArrayType(ContentType):
IDENTITIES = 0
INDEX = 1
CONTENT = 2
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
arrays.append(numpy.asarray(layout.index))
positions.append(arrays[-1])
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return IndexedOptionArrayType(
cls.from_form_index(form.index),
awkward1._connect._numba.arrayview.tonumbatype(form.content),
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, indextype, contenttype, identitiestype, parameters):
super(IndexedOptionArrayType, self).__init__(
name="awkward1.IndexedOptionArrayType({0}, {1}, {2}, {3})".format(
indextype.name,
contenttype.name,
identitiestype.name,
json.dumps(parameters),
)
)
self.indextype = indextype
self.contenttype = contenttype
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
index = numpy.asarray(layout.index)
lookup.original_positions[pos + self.INDEX] = index
lookup.arrayptrs[pos + self.INDEX] = index.ctypes.data
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def IndexedOptionArrayOf(self):
if self.indextype.dtype.bitwidth == 32 and self.indextype.dtype.signed:
return awkward1.layout.IndexedOptionArray32
elif self.indextype.dtype.bitwidth == 64 and self.indextype.dtype.signed:
return awkward1.layout.IndexedOptionArray64
else:
raise AssertionError(
"no IndexedOptionArray* type for array: {0}".format(self.indextype)
)
def tolayout(self, lookup, pos, fields):
index = self.IndexOf(self.indextype)(
lookup.original_positions[pos + self.INDEX]
)
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return self.IndexedOptionArrayOf()(index, content, parameters=self.parameters)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return numba.types.optional(self.contenttype.getitem_at_check(viewtype))
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
indexpos = posat(context, builder, viewproxy.pos, self.INDEX)
indexptr = getat(context, builder, viewproxy.arrayptrs, indexpos)
indexarraypos = builder.add(viewproxy.start, atval)
nextat = getat(context, builder, indexptr, indexarraypos, self.indextype.dtype)
output = context.make_helper(builder, rettype)
with builder.if_else(
builder.icmp_signed(
"<", nextat, context.get_constant(self.indextype.dtype, 0)
)
) as (isnone, isvalid):
with isnone:
output.valid = numba.core.cgutils.false_bit
output.data = numba.core.cgutils.get_null_value(output.data.type)
with isvalid:
nextviewtype = awkward1._connect._numba.arrayview.wrap(
self.contenttype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = builder.add(
awkward1._connect._numba.castint(
context, builder, self.indextype.dtype, numba.intp, nextat
),
builder.add(viewproxy.start, context.get_constant(numba.intp, 1)),
)
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
outdata = self.contenttype.lower_getitem_at_check(
context,
builder,
rettype.type,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
nextat,
False,
False,
)
output.valid = numba.core.cgutils.true_bit
output.data = outdata
return output._getvalue()
class ByteMaskedArrayType(ContentType):
IDENTITIES = 0
MASK = 1
CONTENT = 2
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
arrays.append(numpy.asarray(layout.mask))
positions.append(arrays[-1])
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return ByteMaskedArrayType(
cls.from_form_index(form.mask),
awkward1._connect._numba.arrayview.tonumbatype(form.content),
form.valid_when,
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, masktype, contenttype, valid_when, identitiestype, parameters):
super(ByteMaskedArrayType, self).__init__(
name="awkward1.ByteMaskedArrayType({0}, {1}, {2}, {3}, "
"{4})".format(
masktype.name,
contenttype.name,
valid_when,
identitiestype.name,
json.dumps(parameters),
)
)
self.masktype = masktype
self.contenttype = contenttype
self.valid_when = valid_when
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
mask = numpy.asarray(layout.mask)
lookup.original_positions[pos + self.MASK] = mask
lookup.arrayptrs[pos + self.MASK] = mask.ctypes.data
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def tolayout(self, lookup, pos, fields):
mask = self.IndexOf(self.masktype)(lookup.original_positions[pos + self.MASK])
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return awkward1.layout.ByteMaskedArray(
mask, content, self.valid_when, parameters=self.parameters
)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return numba.types.optional(self.contenttype.getitem_at_check(viewtype))
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
maskpos = posat(context, builder, viewproxy.pos, self.MASK)
maskptr = getat(context, builder, viewproxy.arrayptrs, maskpos)
maskarraypos = builder.add(viewproxy.start, atval)
byte = getat(context, builder, maskptr, maskarraypos, self.masktype.dtype)
output = context.make_helper(builder, rettype)
with builder.if_else(
builder.icmp_signed(
"==",
builder.icmp_signed("!=", byte, context.get_constant(numba.int8, 0)),
context.get_constant(numba.int8, int(self.valid_when)),
)
) as (isvalid, isnone):
with isvalid:
nextviewtype = awkward1._connect._numba.arrayview.wrap(
self.contenttype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = viewproxy.stop
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
outdata = self.contenttype.lower_getitem_at_check(
context,
builder,
rettype.type,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
atval,
False,
False,
)
output.valid = numba.core.cgutils.true_bit
output.data = outdata
with isnone:
output.valid = numba.core.cgutils.false_bit
output.data = numba.core.cgutils.get_null_value(output.data.type)
return output._getvalue()
class BitMaskedArrayType(ContentType):
IDENTITIES = 0
MASK = 1
CONTENT = 2
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
arrays.append(numpy.asarray(layout.mask))
positions.append(arrays[-1])
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return BitMaskedArrayType(
cls.from_form_index(form.mask),
awkward1._connect._numba.arrayview.tonumbatype(form.content),
form.valid_when,
form.lsb_order,
cls.from_form_identities(form),
form.parameters,
)
def __init__(
self, masktype, contenttype, valid_when, lsb_order, identitiestype, parameters
):
super(BitMaskedArrayType, self).__init__(
name="awkward1.BitMaskedArrayType({0}, {1}, {2}, {3}, {4}, "
"{5})".format(
masktype.name,
contenttype.name,
valid_when,
lsb_order,
identitiestype.name,
json.dumps(parameters),
)
)
self.masktype = masktype
self.contenttype = contenttype
self.valid_when = valid_when
self.lsb_order = lsb_order
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
mask = numpy.asarray(layout.mask)
lookup.original_positions[pos + self.MASK] = mask
lookup.arrayptrs[pos + self.MASK] = mask.ctypes.data
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def tolayout(self, lookup, pos, fields):
mask = self.IndexOf(self.masktype)(lookup.original_positions[pos + self.MASK])
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return awkward1.layout.BitMaskedArray(
mask,
content,
self.valid_when,
len(content),
self.lsb_order,
parameters=self.parameters,
)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return numba.types.optional(self.contenttype.getitem_at_check(viewtype))
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
bitatval = builder.sdiv(atval, context.get_constant(numba.intp, 8))
shiftval = awkward1._connect._numba.castint(
context,
builder,
numba.intp,
numba.uint8,
builder.srem(atval, context.get_constant(numba.intp, 8)),
)
maskpos = posat(context, builder, viewproxy.pos, self.MASK)
maskptr = getat(context, builder, viewproxy.arrayptrs, maskpos)
maskarraypos = builder.add(viewproxy.start, bitatval)
byte = getat(context, builder, maskptr, maskarraypos, self.masktype.dtype)
if self.lsb_order:
# ((byte >> ((uint8_t)shift)) & ((uint8_t)1))
asbool = builder.and_(
builder.lshr(byte, shiftval), context.get_constant(numba.uint8, 1)
)
else:
# ((byte << ((uint8_t)shift)) & ((uint8_t)128))
asbool = builder.and_(
builder.shl(byte, shiftval), context.get_constant(numba.uint8, 128)
)
output = context.make_helper(builder, rettype)
with builder.if_else(
builder.icmp_signed(
"==",
builder.icmp_signed("!=", asbool, context.get_constant(numba.uint8, 0)),
context.get_constant(numba.uint8, int(self.valid_when)),
)
) as (isvalid, isnone):
with isvalid:
nextviewtype = awkward1._connect._numba.arrayview.wrap(
self.contenttype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = viewproxy.stop
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
outdata = self.contenttype.lower_getitem_at_check(
context,
builder,
rettype.type,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
atval,
False,
False,
)
output.valid = numba.core.cgutils.true_bit
output.data = outdata
with isnone:
output.valid = numba.core.cgutils.false_bit
output.data = numba.core.cgutils.get_null_value(output.data.type)
return output._getvalue()
class UnmaskedArrayType(ContentType):
IDENTITIES = 0
CONTENT = 1
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
layout.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.CONTENT] = awkward1._connect._numba.arrayview.tolookup(
form.content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
return UnmaskedArrayType(
awkward1._connect._numba.arrayview.tonumbatype(form.content),
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, contenttype, identitiestype, parameters):
super(UnmaskedArrayType, self).__init__(
name="awkward1.UnmaskedArrayType({0}, {1}, {2})".format(
contenttype.name, identitiestype.name, json.dumps(parameters)
)
)
self.contenttype = contenttype
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
self.contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENT], layout.content, lookup
)
def tolayout(self, lookup, pos, fields):
content = self.contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENT], fields
)
return awkward1.layout.UnmaskedArray(content, parameters=self.parameters)
def hasfield(self, key):
return self.contenttype.hasfield(key)
def getitem_at(self, viewtype):
return numba.types.optional(self.contenttype.getitem_at_check(viewtype))
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
whichpos = posat(context, builder, viewproxy.pos, self.CONTENT)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
output = context.make_helper(builder, rettype)
nextviewtype = awkward1._connect._numba.arrayview.wrap(
self.contenttype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = viewproxy.stop
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
outdata = self.contenttype.lower_getitem_at_check(
context,
builder,
rettype.type,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
atval,
False,
False,
)
output.valid = numba.core.cgutils.true_bit
output.data = outdata
return output._getvalue()
class RecordArrayType(ContentType):
IDENTITIES = 0
CONTENTS = 1
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
positions.extend([None] * layout.numfields)
sharedptrs.extend([None] * layout.numfields)
for i, content in enumerate(layout.contents):
positions[
pos + cls.CONTENTS + i
] = awkward1._connect._numba.arrayview.tolookup(
content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
positions.extend([None] * form.numfields)
sharedptrs.extend([None] * form.numfields)
if form.istuple:
for i, (n, content) in enumerate(form.contents.items()):
positions[
pos + cls.CONTENTS + i
] = awkward1._connect._numba.arrayview.tolookup(
content, positions, sharedptrs, arrays
)
else:
for i, (n, content) in enumerate(form.contents.items()):
positions[
pos + cls.CONTENTS + i
] = awkward1._connect._numba.arrayview.tolookup(
content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
contents = []
if form.istuple:
recordlookup = None
for n, x in form.contents.items():
contents.append(awkward1._connect._numba.arrayview.tonumbatype(x))
else:
recordlookup = []
for n, x in form.contents.items():
contents.append(awkward1._connect._numba.arrayview.tonumbatype(x))
recordlookup.append(n)
return RecordArrayType(
contents, recordlookup, cls.from_form_identities(form), form.parameters
)
def __init__(self, contenttypes, recordlookup, identitiestype, parameters):
super(RecordArrayType, self).__init__(
name="awkward1.RecordArrayType(({0}{1}), ({2}), {3}, {4})".format(
", ".join(x.name for x in contenttypes),
"," if len(contenttypes) == 1 else "",
"None" if recordlookup is None else repr(tuple(recordlookup)),
identitiestype.name,
json.dumps(parameters),
)
)
self.contenttypes = contenttypes
self.recordlookup = recordlookup
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
for i, contenttype in enumerate(self.contenttypes):
contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENTS + i], layout.field(i), lookup
)
def fieldindex(self, key):
out = -1
if self.recordlookup is not None:
for i, x in enumerate(self.recordlookup):
if x == key:
out = i
break
if out == -1:
try:
out = int(key)
except ValueError:
return None
if not 0 <= out < len(self.contenttypes):
return None
return out
def tolayout(self, lookup, pos, fields):
if len(fields) > 0:
index = self.fieldindex(fields[0])
assert index is not None
return self.contenttypes[index].tolayout(
lookup, lookup.positions[pos + self.CONTENTS + index], fields[1:]
)
else:
contents = []
for i, contenttype in enumerate(self.contenttypes):
layout = contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENTS + i], fields
)
contents.append(layout)
if len(contents) == 0:
return awkward1.layout.RecordArray(
contents,
self.recordlookup,
numpy.iinfo(numpy.int64).max,
parameters=self.parameters,
)
else:
return awkward1.layout.RecordArray(
contents, self.recordlookup, parameters=self.parameters
)
def hasfield(self, key):
return self.fieldindex(key) is not None
def getitem_at_check(self, viewtype):
out = self.getitem_at(viewtype)
if isinstance(out, awkward1._connect._numba.arrayview.RecordViewType):
typer = awkward1._util.numba_record_typer(
out.arrayviewtype.type, out.arrayviewtype.behavior
)
if typer is not None:
return typer(out)
return out
def getitem_at(self, viewtype):
if len(viewtype.fields) == 0:
return awkward1._connect._numba.arrayview.RecordViewType(viewtype)
else:
key = viewtype.fields[0]
index = self.fieldindex(key)
if index is None:
if self.recordlookup is None:
raise ValueError(
"no field {0} in tuples with {1} fields".format(
repr(key), len(self.contenttypes)
)
)
else:
raise ValueError(
"no field {0} in records with "
"fields: [{1}]".format(
repr(key), ", ".join(repr(x) for x in self.recordlookup)
)
)
contenttype = self.contenttypes[index]
subviewtype = awkward1._connect._numba.arrayview.wrap(
contenttype, viewtype, viewtype.fields[1:]
)
return contenttype.getitem_at_check(subviewtype)
def getitem_field(self, viewtype, key):
index = self.fieldindex(key)
if index is None:
if self.recordlookup is None:
raise ValueError(
"no field {0} in tuples with {1} fields".format(
repr(key), len(self.contenttypes)
)
)
else:
raise ValueError(
"no field {0} in records with fields: [{1}]".format(
repr(key), ", ".join(repr(x) for x in self.recordlookup)
)
)
contenttype = self.contenttypes[index]
subviewtype = awkward1._connect._numba.arrayview.wrap(
contenttype, viewtype, None
)
return contenttype.getitem_range(subviewtype)
def getitem_field_record(self, recordviewtype, key):
index = self.fieldindex(key)
if index is None:
if self.recordlookup is None:
raise ValueError(
"no field {0} in tuple with {1} fields".format(
repr(key), len(self.contenttypes)
)
)
else:
raise ValueError(
"no field {0} in record with fields: [{1}]".format(
repr(key), ", ".join(repr(x) for x in self.recordlookup)
)
)
contenttype = self.contenttypes[index]
subviewtype = awkward1._connect._numba.arrayview.wrap(
contenttype, recordviewtype, None
)
return contenttype.getitem_at_check(subviewtype)
def lower_getitem_at_check(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
out = self.lower_getitem_at(
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
)
baretype = self.getitem_at(viewtype)
if isinstance(baretype, awkward1._connect._numba.arrayview.RecordViewType):
lower = awkward1._util.numba_record_lower(
baretype.arrayviewtype.type, baretype.arrayviewtype.behavior
)
if lower is not None:
return lower(context, builder, rettype(baretype), (out,))
return out
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
atval = regularize_atval(
context, builder, viewproxy, attype, atval, wrapneg, checkbounds
)
if len(viewtype.fields) == 0:
proxyout = context.make_helper(
builder, awkward1._connect._numba.arrayview.RecordViewType(viewtype)
)
proxyout.arrayview = viewval
proxyout.at = atval
return proxyout._getvalue()
else:
index = self.fieldindex(viewtype.fields[0])
contenttype = self.contenttypes[index]
whichpos = posat(context, builder, viewproxy.pos, self.CONTENTS + index)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
nextviewtype = awkward1._connect._numba.arrayview.wrap(
contenttype, viewtype, viewtype.fields[1:]
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = builder.add(
atval, builder.add(viewproxy.start, context.get_constant(numba.intp, 1))
)
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
return contenttype.lower_getitem_at_check(
context,
builder,
rettype,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
atval,
False,
False,
)
def lower_getitem_field(self, context, builder, viewtype, viewval, key):
viewproxy = context.make_helper(builder, viewtype, viewval)
index = self.fieldindex(key)
contenttype = self.contenttypes[index]
whichpos = posat(context, builder, viewproxy.pos, self.CONTENTS + index)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
proxynext = context.make_helper(builder, contenttype.getitem_range(viewtype))
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = viewproxy.stop
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
return proxynext._getvalue()
def lower_getitem_field_record(
self, context, builder, recordviewtype, recordviewval, key
):
arrayviewtype = recordviewtype.arrayviewtype
recordviewproxy = context.make_helper(builder, recordviewtype, recordviewval)
arrayviewval = recordviewproxy.arrayview
arrayviewproxy = context.make_helper(builder, arrayviewtype, arrayviewval)
index = self.fieldindex(key)
contenttype = self.contenttypes[index]
whichpos = posat(context, builder, arrayviewproxy.pos, self.CONTENTS + index)
nextpos = getat(context, builder, arrayviewproxy.arrayptrs, whichpos)
proxynext = context.make_helper(
builder, contenttype.getitem_range(arrayviewtype)
)
proxynext.pos = nextpos
proxynext.start = arrayviewproxy.start
proxynext.stop = builder.add(
recordviewproxy.at,
builder.add(arrayviewproxy.start, context.get_constant(numba.intp, 1)),
)
proxynext.arrayptrs = arrayviewproxy.arrayptrs
proxynext.sharedptrs = arrayviewproxy.sharedptrs
proxynext.pylookup = arrayviewproxy.pylookup
nextviewtype = awkward1._connect._numba.arrayview.wrap(
contenttype, arrayviewtype, None
)
rettype = self.getitem_field_record(recordviewtype, key)
return contenttype.lower_getitem_at_check(
context,
builder,
rettype,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
recordviewproxy.at,
False,
False,
)
class UnionArrayType(ContentType):
IDENTITIES = 0
TAGS = 1
INDEX = 2
CONTENTS = 3
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
arrays.append(numpy.asarray(layout.tags))
positions.append(arrays[-1])
sharedptrs.append(None)
arrays.append(numpy.asarray(layout.index))
positions.append(arrays[-1])
sharedptrs.append(None)
positions.extend([None] * layout.numcontents)
sharedptrs.extend([None] * layout.numcontents)
for i, content in enumerate(layout.contents):
positions[
pos + cls.CONTENTS + i
] = awkward1._connect._numba.arrayview.tolookup(
content, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
arrays.append(0)
positions.append(0)
sharedptrs.append(None)
positions.extend([None] * form.numcontents)
sharedptrs.extend([None] * form.numcontents)
for i, content in enumerate(form.contents):
positions[
pos + cls.CONTENTS + i
] = awkward1._connect._numba.arrayview.tolookup(
content, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
contents = []
for x in form.contents:
contents.append(awkward1._connect._numba.arrayview.tonumbatype(x))
return UnionArrayType(
cls.from_form_index(form.tags),
cls.from_form_index(form.index),
contents,
cls.from_form_identities(form),
form.parameters,
)
def __init__(self, tagstype, indextype, contenttypes, identitiestype, parameters):
super(UnionArrayType, self).__init__(
name="awkward1.UnionArrayType({0}, {1}, ({2}{3}), {4}, "
"{5})".format(
tagstype.name,
indextype.name,
", ".join(x.name for x in contenttypes),
"," if len(contenttypes) == 1 else "",
identitiestype.name,
json.dumps(parameters),
)
)
self.tagstype = tagstype
self.indextype = indextype
self.contenttypes = contenttypes
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
tags = numpy.asarray(layout.tags)
lookup.original_positions[pos + self.TAGS] = tags
lookup.arrayptrs[pos + self.TAGS] = tags.ctypes.data
index = numpy.asarray(layout.index)
lookup.original_positions[pos + self.INDEX] = index
lookup.arrayptrs[pos + self.INDEX] = index.ctypes.data
for i, contenttype in enumerate(self.contenttypes):
contenttype.form_fill(
lookup.arrayptrs[pos + self.CONTENTS + i], layout.content(i), lookup
)
def UnionArrayOf(self):
if self.tagstype.dtype.bitwidth == 8 and self.tagstype.dtype.signed:
if self.indextype.dtype.bitwidth == 32 and self.indextype.dtype.signed:
return awkward1.layout.UnionArray8_32
elif self.indextype.dtype.bitwidth == 32:
return awkward1.layout.UnionArray8_U32
elif self.indextype.dtype.bitwidth == 64 and self.indextype.dtype.signed:
return awkward1.layout.UnionArray8_64
else:
raise AssertionError(
"no UnionArray* type for index array: {0}".format(self.indextype)
)
else:
raise AssertionError(
"no UnionArray* type for tags array: {0}".format(self.tagstype)
)
def tolayout(self, lookup, pos, fields):
tags = self.IndexOf(self.tagstype)(lookup.original_positions[pos + self.TAGS])
index = self.IndexOf(self.indextype)(
lookup.original_positions[pos + self.INDEX]
)
contents = []
for i, contenttype in enumerate(self.contenttypes):
layout = contenttype.tolayout(
lookup, lookup.positions[pos + self.CONTENTS + i], fields
)
contents.append(layout)
return self.UnionArrayOf()(tags, index, contents, parameters=self.parameters)
def hasfield(self, key):
return any(x.hasfield(key) for x in self.contenttypes)
def getitem_at(self, viewtype):
if not all(isinstance(x, RecordArrayType) for x in self.contenttypes):
raise TypeError("union types cannot be accessed in Numba")
def getitem_range(self, viewtype):
if not all(isinstance(x, RecordArrayType) for x in self.contenttypes):
raise TypeError("union types cannot be accessed in Numba")
def getitem_field(self, viewtype, key):
if not all(isinstance(x, RecordArrayType) for x in self.contenttypes):
raise TypeError("union types cannot be accessed in Numba")
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
raise NotImplementedError(
type(self).__name__ + ".lower_getitem_at not implemented"
)
def lower_getitem_range(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
start,
stop,
wrapneg,
):
raise NotImplementedError(
type(self).__name__ + ".lower_getitem_range not implemented"
)
def lower_getitem_field(self, context, builder, viewtype, viewval, viewproxy, key):
raise NotImplementedError(
type(self).__name__ + ".lower_getitem_field not implemented"
)
class VirtualArrayType(ContentType):
IDENTITIES = 0
PYOBJECT = 1
ARRAY = 2
@classmethod
def tolookup(cls, layout, positions, sharedptrs, arrays):
pos = len(positions)
cls.tolookup_identities(layout, positions, sharedptrs, arrays)
sharedptrs[-1] = layout._persistent_shared_ptr
if layout.form is None:
raise ValueError(
"VirtualArrays without a known 'form' can't be used in Numba"
)
pyptr = ctypes.py_object(layout)
ctypes.pythonapi.Py_IncRef(pyptr)
voidptr = numpy.frombuffer(pyptr, dtype=numpy.intp).item()
positions.append(voidptr)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.ARRAY] = awkward1._connect._numba.arrayview.tolookup(
layout.form.form, positions, sharedptrs, arrays
)
return pos
@classmethod
def form_tolookup(cls, form, positions, sharedptrs, arrays):
pos = len(positions)
cls.form_tolookup_identities(form, positions, sharedptrs, arrays)
sharedptrs[-1] = 0
if form.form is None:
raise ValueError(
"VirtualArrays without a known 'form' can't be used in Numba"
)
positions.append(0)
sharedptrs.append(None)
positions.append(None)
sharedptrs.append(None)
positions[pos + cls.ARRAY] = awkward1._connect._numba.arrayview.tolookup(
form.form, positions, sharedptrs, arrays
)
return pos
@classmethod
def from_form(cls, form):
if form.form is None:
raise ValueError(
"VirtualArrays without a known 'form' can't be used in Numba "
"(including nested)"
)
return VirtualArrayType(
form.form, cls.from_form_identities(form), form.parameters
)
def __init__(self, generator_form, identitiestype, parameters):
if generator_form is None:
raise ValueError(
"VirtualArrays without a known 'form' can't be used in Numba"
)
super(VirtualArrayType, self).__init__(
name="awkward1.VirtualArrayType({0}, {1}, {2})".format(
generator_form.tojson(), identitiestype.name, json.dumps(parameters)
)
)
self.generator_form = generator_form
self.identitiestype = identitiestype
self.parameters = parameters
def form_fill(self, pos, layout, lookup):
lookup.sharedptrs_hold[pos] = layout._persistent_shared_ptr
lookup.sharedptrs[pos] = lookup.sharedptrs_hold[pos].ptr()
self.form_fill_identities(pos, layout, lookup)
pyptr = ctypes.py_object(layout)
ctypes.pythonapi.Py_IncRef(pyptr)
voidptr = numpy.frombuffer(pyptr, dtype=numpy.intp).item()
lookup.original_positions[pos + self.PYOBJECT] = voidptr
lookup.arrayptrs[pos + self.PYOBJECT] = voidptr
def tolayout(self, lookup, pos, fields):
voidptr = ctypes.c_void_p(int(lookup.arrayptrs[pos + self.PYOBJECT]))
pyptr = ctypes.cast(voidptr, ctypes.py_object)
ctypes.pythonapi.Py_IncRef(pyptr)
virtualarray = pyptr.value
return virtualarray
def hasfield(self, key):
return self.generator_form.haskey(key)
def getitem_at(self, viewtype):
def getitem_at(form):
if isinstance(form, awkward1.forms.NumpyForm):
assert len(form.inner_shape) == 0
if form.primitive == "float64":
return numba.float64
elif form.primitive == "float32":
return numba.float32
elif form.primitive == "int64":
return numba.int64
elif form.primitive == "uint64":
return numba.uint64
elif form.primitive == "int32":
return numba.int32
elif form.primitive == "uint32":
return numba.uint32
elif form.primitive == "int16":
return numba.int16
elif form.primitive == "uint16":
return numba.uint16
elif form.primitive == "int8":
return numba.int8
elif form.primitive == "uint8":
return numba.uint8
elif form.primitive == "bool":
return numba.bool
else:
raise ValueError(
"unrecognized NumpyForm.primitive type: {0}".format(
form.primitive
)
)
elif isinstance(
form,
(
awkward1.forms.RegularForm,
awkward1.forms.ListForm,
awkward1.forms.ListOffsetForm,
),
):
return form.content
elif isinstance(form, awkward1.forms.IndexedForm):
return getitem_at(form.content)
elif isinstance(
form,
(
awkward1.forms.IndexedOptionForm,
awkward1.forms.ByteMaskedForm,
awkward1.forms.BitMaskedForm,
awkward1.forms.UnmaskedForm,
),
):
return numba.types.optional(wrap(getitem_at(form.content)))
elif isinstance(form, awkward1.forms.RecordForm):
arrayview = wrap(form)
return arrayview.type.getitem_at(arrayview)
elif isinstance(form, awkward1.forms.UnionForm):
raise TypeError("union types cannot be accessed in Numba")
else:
raise AssertionError("unrecognized Form type: {0}".format(type(form)))
def wrap(out):
if isinstance(out, awkward1.forms.Form):
numbatype = awkward1._connect._numba.arrayview.tonumbatype(out)
return awkward1._connect._numba.arrayview.wrap(
numbatype, viewtype, None
)
else:
return out
return wrap(getitem_at(self.generator_form))
def lower_getitem_at(
self,
context,
builder,
rettype,
viewtype,
viewval,
viewproxy,
attype,
atval,
wrapneg,
checkbounds,
):
pyobjptr = getat(
context,
builder,
viewproxy.arrayptrs,
posat(context, builder, viewproxy.pos, self.PYOBJECT),
)
arraypos = getat(
context,
builder,
viewproxy.arrayptrs,
posat(context, builder, viewproxy.pos, self.ARRAY),
)
sharedptr = getat(context, builder, viewproxy.sharedptrs, arraypos)
numbatype = awkward1._connect._numba.arrayview.tonumbatype(self.generator_form)
with builder.if_then(
builder.icmp_signed("==", sharedptr, context.get_constant(numba.intp, 0)),
likely=False,
):
# only rarely enter Python
pyapi = context.get_python_api(builder)
gil = pyapi.gil_ensure()
# borrowed references
virtualarray_obj = builder.inttoptr(
pyobjptr, context.get_value_type(numba.types.pyobject)
)
lookup_obj = viewproxy.pylookup
# new references
numbatype_obj = pyapi.unserialize(pyapi.serialize_object(numbatype))
fill_obj = pyapi.object_getattr_string(numbatype_obj, "form_fill")
arraypos_obj = pyapi.long_from_ssize_t(arraypos)
array_obj = pyapi.object_getattr_string(virtualarray_obj, "array")
with builder.if_then(
builder.icmp_signed(
"!=",
pyapi.err_occurred(),
context.get_constant(numba.types.voidptr, 0),
),
likely=False,
):
context.call_conv.return_exc(builder)
# add the materialized array to our Lookup
pyapi.call_function_objargs(
fill_obj, (arraypos_obj, array_obj, lookup_obj,)
)
with builder.if_then(
builder.icmp_signed(
"!=",
pyapi.err_occurred(),
context.get_constant(numba.types.voidptr, 0),
),
likely=False,
):
context.call_conv.return_exc(builder)
# decref the new references
pyapi.decref(array_obj)
pyapi.decref(arraypos_obj)
pyapi.decref(fill_obj)
pyapi.decref(numbatype_obj)
pyapi.gil_release(gil)
# normally, we just pass on the request to the materialized array
whichpos = posat(context, builder, viewproxy.pos, self.ARRAY)
nextpos = getat(context, builder, viewproxy.arrayptrs, whichpos)
nextviewtype = awkward1._connect._numba.arrayview.wrap(
numbatype, viewtype, None
)
proxynext = context.make_helper(builder, nextviewtype)
proxynext.pos = nextpos
proxynext.start = viewproxy.start
proxynext.stop = viewproxy.stop
proxynext.arrayptrs = viewproxy.arrayptrs
proxynext.sharedptrs = viewproxy.sharedptrs
proxynext.pylookup = viewproxy.pylookup
return numbatype.lower_getitem_at_check(
context,
builder,
rettype,
nextviewtype,
proxynext._getvalue(),
proxynext,
numba.intp,
atval,
wrapneg,
checkbounds,
)
|
"""ParallelGeneralGraph for parallel directed graphs (DiGraph) module"""
import logging
import sys
import warnings
from multiprocessing import Queue
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
import ctypes
import numpy as np
import networkx as nx
from .utils import chunk_it
from .general_graph import GeneralGraph
warnings.simplefilter(action='ignore', category=FutureWarning)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class ParallelGeneralGraph(GeneralGraph):
"""
Class ParallelGeneralGraph for parallel implementation of
directed graphs (DiGraph).
Constructs a new graph given an input file.
A DiGraph stores nodes and edges with optional data or attributes.
DiGraphs hold directed edges.
Nodes can be arbitrary python objects with optional key/value attributes.
Edges are represented as links between nodes with optional key/value
attributes.
"""
def __init__(self):
super().__init__()
self.manager = mp.Manager()
self.num = mp.cpu_count()
def measure_iteration(self, nodes, record, kernel, *measure_args):
"""
Inner iteration for parallel measures,
to update shared dictionary.
:param list nodes: nodes for which to compute the
shortest path between them and all the other nodes.
:param multiprocessing.managers.dict record:
shared dictionary to be updated.
:param callable kernel: kernel measure to be computed.
:param \*measure_args: arguments for kernel measures.
Have a look at specific kernel measures in GeneralGraph for
the particular variables/types for each measure.
"""
partial_dict = kernel(nodes, *measure_args)
record.update(partial_dict)
def measure_processes(self, record, kernel, *measure_args):
"""
Division of total number of nodes in chuncks and
parallel distribution of tasks into processes,
for different kernel measure functions.
:param multiprocessing.managers.dict record:
shared dictionary to be updated
:param callable kernel: kernel measure to be computed
:param \*measure_args: arguments for kernel measures
(have a look at specific kernel measures in GeneralGraph
for the particular variables/types for each measure)
"""
node_chunks = chunk_it(list(self.nodes()), self.num)
processes = [
mp.Process( target=self.measure_iteration,
args=(node_chunks[p], record, kernel, *measure_args) )
for p in range(self.num) ]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
def floyd_warshall_predecessor_and_distance(self):
"""
Parallel Floyd Warshall's APSP algorithm. The predecessors
and distance matrices are evaluated, together with the nested
dictionaries for shortest-path, length of the paths and
efficiency attributes.
.. note:: Edges weight is taken into account in the distance matrix.
Edge weight attributes must be numerical. Distances are calculated
as sums of weighted edges traversed.
:return: nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path;
nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path length.
:rtype: dict, dict
"""
dist, pred = self.floyd_warshall_initialization()
shared_d = mp.sharedctypes.RawArray(ctypes.c_double, dist.shape[0]**2)
dist_shared = np.frombuffer(shared_d, 'float64').reshape(dist.shape)
dist_shared[:] = dist
shared_p = mp.sharedctypes.RawArray(ctypes.c_double,pred.shape[0]**2)
pred_shared = np.frombuffer(shared_p, 'float64').reshape(pred.shape)
pred_shared[:] = pred
n = len(self.nodes())
chunk = [(0, int(n / self.num))]
node_chunks = chunk_it(list(self.nodes()), self.num)
for i in range(1, self.num):
chunk.append((chunk[i - 1][1],
chunk[i - 1][1] + len(node_chunks[i])))
barrier = mp.Barrier(self.num)
processes = [
mp.Process( target=self.floyd_warshall_kernel,
args=(dist_shared, pred_shared, chunk[p][0], chunk[p][1], barrier))
for p in range(self.num) ]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
all_shortest_path = self.manager.dict()
processes = [
mp.Process( target=self.measure_iteration,
args=(list(map(self.ids_reversed.get, node_chunks[p])),
all_shortest_path, self.construct_path_kernel, pred_shared) )
for p in range(self.num) ]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
nonempty_shortest_path = {}
for k in all_shortest_path.keys():
nonempty_shortest_path[k] = {
key: value
for key, value in all_shortest_path[k].items() if value
}
shortest_path_length = {}
for i in list(self.H):
shortest_path_length[self.ids[i]] = {}
for key, value in nonempty_shortest_path[self.ids[i]].items():
length_path = dist_shared[self.ids_reversed[value[0]],
self.ids_reversed[value[-1]]]
shortest_path_length[self.ids[i]][key] = length_path
return nonempty_shortest_path, shortest_path_length
def dijkstra_iteration_parallel(self, out_queue, nodes):
"""
Parallel SSSP algorithm based on Dijkstra’s method.
:param multiprocessing.queues.Queue out_queue: multiprocessing queue
:param list nodes: list of starting nodes from which the SSSP should be
computed to every other target node in the graph
.. note:: Edges weight is taken into account.
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
"""
for n in nodes:
ssspp = (n, nx.single_source_dijkstra(self, n, weight='weight'))
out_queue.put(ssspp)
def dijkstra_single_source_shortest_path(self):
"""
Wrapper for parallel SSSP algorithm based on Dijkstra’s method.
The nested dictionaries for shortest-path, length of the paths and
efficiency attributes are evaluated.
.. note:: Edges weight is taken into account.
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
:return: nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path;
nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path length.
:rtype: dict, dict
"""
attribute_ssspp = []
out_queue = Queue()
node_chunks = chunk_it(list(self.nodes()), self.num)
processes = [
mp.Process( target=self.dijkstra_iteration_parallel,
args=( out_queue,node_chunks[p] ))
for p in range(self.num) ]
for proc in processes:
proc.start()
while 1:
running = any(p.is_alive() for p in processes)
while not out_queue.empty():
attribute_ssspp.append(out_queue.get())
if not running:
break
shortest_path = {}
shortest_path_length = {}
for ssspp in attribute_ssspp:
n = ssspp[0]
shortest_path[n] = ssspp[1][1]
shortest_path_length[n] = ssspp[1][0]
return shortest_path, shortest_path_length
def calculate_shortest_path(self):
"""
Choose the most appropriate way to compute the all-pairs shortest
path depending on graph size and density.
For a dense graph choose Floyd Warshall algorithm.
For a sparse graph choose SSSP algorithm based on Dijkstra's method.
.. note:: Edge weights of the graph are taken into account
in the computation.
:return: nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path;
nested dictionary with key corresponding to
source, while as value a dictionary keyed by target and valued
by the source-target shortest path length.
:rtype: dict, dict
"""
n_of_nodes = self.order()
graph_density = nx.density(self)
logging.debug('Number of processors: %s', self.num)
logging.debug('In the graph are present %s nodes', n_of_nodes)
if graph_density <= 0.000001:
logging.debug('The graph is sparse, density = %s', graph_density)
shpath, shpath_len = self.dijkstra_single_source_shortest_path()
else:
logging.debug('The graph is dense, density = %s', graph_density)
shpath, shpath_len = self.floyd_warshall_predecessor_and_distance()
return shpath, shpath_len
def compute_efficiency(self):
"""
Efficiency calculation.
.. note:: The efficiency of a path connecting two nodes is defined
as the inverse of the path length, if the path has length non-zero,
and zero otherwise.
:return: efficiency computed for every node.
The keys correspond to source, while as value a dictionary keyed
by target and valued by the source-target efficiency.
:rtype: multiprocessing.managers.dict
"""
shortest_path_length = self.shortest_path_length
efficiency = self.manager.dict()
self.measure_processes(efficiency, self.efficiency_kernel,
shortest_path_length)
return efficiency
def compute_nodal_efficiency(self):
"""
Nodal efficiency calculation.
.. note:: The nodal efficiency of the node is equal to zero
for a node without any outgoing path and equal to one if from it
we can reach each node of the digraph.
:return: nodal efficiency computed for every node.
:rtype: multiprocessing.managers.dict
"""
graph_size = len(list(self))
efficiency = self.efficiency
nodal_efficiency = self.manager.dict()
self.measure_processes(nodal_efficiency, self.nodal_efficiency_kernel,
efficiency, graph_size)
return nodal_efficiency
def compute_local_efficiency(self):
"""
Local efficiency calculation.
.. note:: The local efficiency shows the efficiency of the connections
between the first-order outgoing neighbors of node v
when v is removed. Equivalently, local efficiency measures
the resilience of the digraph to the perturbation of node removal,
i.e. if we remove a node, how efficiently its first-order outgoing
neighbors can communicate.
It is in the range [0, 1].
:return: local efficiency computed for every node.
:rtype: multiprocessing.managers.dict
"""
nodal_efficiency = self.nodal_efficiency
local_efficiency = self.manager.dict()
self.measure_processes(local_efficiency, self.local_efficiency_kernel,
nodal_efficiency)
return local_efficiency
def shortest_path_list_iteration(self, nodes, shortest_path,
tot_shortest_paths_list):
"""
Inner iteration for parallel shortest path list calculation,
to update shared list.
:param list nodes: list of nodes for which to compute the
shortest path between them and all the other nodes
:param dict shortest_path: nested dictionary with key
corresponding to source, while as value a dictionary keyed by target
and valued by the source-target shortest path.
:param tot_shortest_paths_list: list of shortest paths
with at least two nodes
:type tot_shortest_paths_list: multiprocessing.managers.list
"""
partial_shortest_paths_list = self.shortest_path_list_kernel(nodes,
shortest_path)
tot_shortest_paths_list.extend(partial_shortest_paths_list)
def compute_betweenness_centrality(self):
"""
Betweenness_centrality calculation.
.. note:: Betweenness centrality is an index of the relative importance
of a node and it is defined by the number of shortest paths that run
through it.
Nodes with the highest betweenness centrality hold the higher level
of control on the information flowing between different nodes in
the network, because more information will pass through them.
:return: betweenness centrality computed for every node.
:rtype: multiprocessing.managers.dict
"""
shortest_path = self.shortest_path
tot_shortest_paths_list = self.manager.list()
node_chunks = chunk_it(list(self.nodes()), self.num)
processes = [
mp.Process( target=self.shortest_path_list_iteration,
args=(node_chunks[p], shortest_path, tot_shortest_paths_list) )
for p in range(self.num) ]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
betweenness_centrality = self.manager.dict()
self.measure_processes(betweenness_centrality,
self.betweenness_centrality_kernel, tot_shortest_paths_list)
return betweenness_centrality
def compute_closeness_centrality(self):
"""
Closeness_centrality calculation.
.. note:: Closeness centrality measures the reciprocal of the
average shortest path distance from a node to all other reachable
nodes in the graph. Thus, the more central a node is, the closer
it is to all other nodes. This measure allows to identify good
broadcasters, that is key elements in a graph, depicting how
closely the nodes are connected with each other.
:return: closeness centrality computed for every node.
:rtype: multiprocessing.managers.dict
"""
graph_size = len(list(self))
shortest_path = self.shortest_path
shortest_path_length = self.shortest_path_length
tot_shortest_paths_list = self.manager.list()
node_chunks = chunk_it(list(self.nodes()), self.num)
processes = [
mp.Process( target=self.shortest_path_list_iteration,
args=(node_chunks[p], shortest_path, tot_shortest_paths_list) )
for p in range(self.num) ]
for proc in processes:
proc.start()
for proc in processes:
proc.join()
closeness_centrality = self.manager.dict()
self.measure_processes(closeness_centrality,
self.closeness_centrality_kernel, shortest_path_length,
tot_shortest_paths_list, graph_size)
return closeness_centrality
def compute_degree_centrality(self):
"""
Degree centrality calculation.
.. note:: Degree centrality is a simple centrality measure that counts
how many neighbors a node has in an undirected graph.
The more neighbors the node has the most important it is,
occupying a strategic position that serves as a source or conduit
for large volumes of flux transactions with other nodes.
A node with high degree centrality is a node with many dependencies.
:return: degree centrality computed for every node.
:rtype: multiprocessing.managers.dict
"""
graph_size = len(list(self))
degree_centrality = self.manager.dict()
self.measure_processes(degree_centrality,
self.degree_centrality_kernel, graph_size)
return degree_centrality
def compute_indegree_centrality(self):
"""
In-degree centrality calculation.
.. note:: In-degree centrality is measured by the number of edges
ending at the node in a directed graph. Nodes with high in-degree
centrality are called cascade resulting nodes.
:return: in-degree centrality computed for every node.
:rtype: multiprocessing.managers.dict
"""
graph_size = len(list(self))
indegree_centrality = self.manager.dict()
self.measure_processes(indegree_centrality,
self.indegree_centrality_kernel, graph_size)
return indegree_centrality
def compute_outdegree_centrality(self):
"""
Out-degree centrality calculation.
.. note:: Out-degree centrality is measured by the number of edges
starting from a node in a directed graph. Nodes with high out-degree
centrality are called cascade inititing nodes.
:return: out-degree centrality computed for every node.
:rtype: multiprocessing.managers.dict
"""
graph_size = len(list(self))
outdegree_centrality = self.manager.dict()
self.measure_processes(outdegree_centrality,
self.outdegree_centrality_kernel, graph_size)
return outdegree_centrality
|
import operator
from typing import Type, Union, Callable, Any
from pydantic.utils import GetterDict
def bind_orm_fields(**fields: Union[str, Callable]) -> Type[GetterDict]:
"""GetterDict that binds ORM attributes to a Pydantic model.
A field is a key-value mapping where the key is the name of the
field in the Pydantic model, and the value is a string
representation of the attribute to map it to.
Example: bind_orm_fields(owner_username='owner.username')
This would bind the Pydantic model's owner_username field to the
database model's owner.username.
The value can also be a function with a parameter for the ORM
model. This function will be called and the returned value
will be applied to the Pydantic model.
Example: bind_orm_fields(votes=lambda obj: sum(v.vote for v in obj.votes))
This would bind the Pydantic model's votes field to the sum of all
elements in the database model's votes list.
"""
class FieldBinder(GetterDict):
"""GetterDict for resolving attributes from the ORM model.
Apply this to schemas that need to resolve inner attributes
from ORM objects, or needs to run a method to resolve any
given attribute.
"""
def __init__(self, obj):
self.custom_fields = {}
for field, value in fields.items():
if type(value) is str:
self.custom_fields[field] = operator.attrgetter(value)(obj)
else:
self.custom_fields[field] = value(obj)
super().__init__(obj)
def get(self, key: Any, default: Any = None) -> Any:
if key in self.custom_fields:
return self.custom_fields[key]
else:
return super().get(key, default)
def __getitem__(self, key: str) -> Any:
if key in self.custom_fields:
return self.custom_fields[key]
else:
return super().__getitem__(key)
return FieldBinder
|
from django.apps import AppConfig
class WiredriveConfig(AppConfig):
name = 'wiredrive'
|
# Copyright (c) 2011 - 2016, Zhenyu Wu, NEC Labs America Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ZWUtils-Java nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=============================================================================
## Third-Party Module Collection
## Author: Zhenyu Wu
## Revision 1: Initial Implementation
#=============================================================================
import sys
# Module list for * loading
__all__ = [ "wmi" ]
def __package_init():
# For debugging only
#print >>sys.stderr, "Package init: %s"%__name__
pass
#=============================================================================
#----------------------------- Default Execution -----------------------------
#=============================================================================
try:
__package_init()
except Exception, e:
print >>sys.stderr, "Failed to initialize package '%s': %s"%(__name__,e)
import traceback
print >>sys.stderr, traceback.format_exc().strip()
raise Exception("Could not continue")
|
#coding=utf-8
# 计算图像文件 '正方形面积占比计算.JPG'中粉红色区域占总正方形面积的比例
# 思路:蒙特卡洛算方法:
# 假设正方形边长为1
# 以正方形的左下角定点为直角坐标系圆点,记圆点为A(0,0)
# 正方形右下角的点为B(1,0)
# 正方形左上角的点为C(0,1)
# 正方形上面的那条边的中点为D(0.5,1)
# 记AD与BC两线的交点,即为粉红色三角形的定点为P
# 直线AD的方程式为 y = 2*x
# 直线BC的方程为 y = 1-x
# 粉红的区域的可行域为满足 y < 2*x 且 y < 1-x 且 y >0,其中x的范围为 0~1
# 正方形区域的可行域为 0<x<1;0<y<1
import numpy as np
import matplotlib.pyplot as plt
def meng_num(n):
col_X=[]; col_y=[]
z=np.zeros(n)
sum=0 # 程序刚开始的时候,没有点落入粉红色区域的可行域
for i in range(n):
x=np.random.rand() # 随机生成一个值为0~1的数即记为x坐标
y=np.random.rand() # 随机生成一个值为0~1的数即记为y坐标
col_X.append(x) # 收集生成的x,留着作图用
col_y.append(y) # 收集生成的y,留着作图用
if y<2*x and y < 1-x: # 若坐标(x,y)落入粉红区域,则进行计数
sum +=1
z[i]=1
area_rate=sum/len(range(n+1)) # 则粉红色区域所占正方形的概率为落入粉红色区域的点数除以总生成的随机数n
print('当随机生成 %i 个点的时候,面积占比为: %f ' %(n,area_rate))
#以下是可视化输出,只是把生成的点投影到区域中
x1 = np.linspace(0, 1 / 3, 1000)
x2 = np.linspace(1 / 3, 1, 1000)
x3 = np.linspace(0, 1, 1000)
y1 = 2 * x1
y2 = 1 - x2
y3 = 0 * x1
plt.plot(x1, y1, x2, y2, x3, y3)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.scatter(col_X,col_y,c=z,cmap='rainbow')
plt.title('when create %i random point' %n)
plt.show()
for n in list([10,100,1000,10000,100000]):
meng_num(n)
|
import json
import logging
import uuid
from datetime import datetime
from datetime import time as dt_time
from datetime import timedelta
from functools import partial
from typing import Callable, Dict, Optional, Sequence, Tuple, List
import flask
import werkzeug.exceptions
from dateutil.tz import tz
from flask import abort, request
from werkzeug.datastructures import TypeConversionDict
from werkzeug.exceptions import HTTPException, BadRequest
from cubedash.summary._stores import DatasetItem
from datacube.model import Range, Dataset
from datacube.utils import DocReader, parse_time
from eodatasets3 import serialise
from eodatasets3 import stac as eo3stac
from eodatasets3.model import DatasetDoc, ProductDoc, MeasurementDoc, AccessoryDoc
from eodatasets3.properties import StacPropertyView
from eodatasets3.utils import is_doc_eo3
from . import _model, _utils
from .summary import ItemSort
_LOG = logging.getLogger(__name__)
bp = flask.Blueprint("stac", __name__, url_prefix="/stac")
PAGE_SIZE_LIMIT = _model.app.config.get("STAC_PAGE_SIZE_LIMIT", 1000)
DEFAULT_PAGE_SIZE = _model.app.config.get("STAC_DEFAULT_PAGE_SIZE", 20)
# Should we force all URLs to include the full hostname?
FORCE_ABSOLUTE_LINKS = _model.app.config.get("STAC_ABSOLUTE_HREFS", True)
# Should searches return the full properties for every stac item by default?
# These searches are much slower we're forced us to use ODC's own metadata table.
DEFAULT_RETURN_FULL_ITEMS = _model.app.config.get(
"STAC_DEFAULT_FULL_ITEM_INFORMATION", True
)
STAC_VERSION = "1.0.0"
def url_for(*args, **kwargs):
if FORCE_ABSOLUTE_LINKS:
kwargs["_external"] = True
return flask.url_for(*args, **kwargs)
def stac_endpoint_information() -> Dict:
config = _model.app.config
o = dict(
id=config.get("STAC_ENDPOINT_ID", "odc-explorer"),
title=config.get("STAC_ENDPOINT_TITLE", "Default ODC Explorer instance"),
)
description = config.get(
"STAC_ENDPOINT_DESCRIPTION",
"Configure stac endpoint information in your Explorer `settings.env.py` file",
)
if description:
o["description"] = description
return o
def utc(d: datetime):
if d.tzinfo is None:
return d.replace(tzinfo=tz.tzutc())
return d.astimezone(tz.tzutc())
def _stac_response(doc: Dict, content_type="application/json") -> flask.Response:
"""Return a stac document as the flask response"""
# Any response without a links array already is a coding problem.
doc["links"].append(dict(rel="root", href=url_for(".root")))
return _utils.as_json(
{
# Always put stac version at the beginning for readability.
"stac_version": STAC_VERSION,
# The given doc may override it too.
**doc,
},
content_type=content_type,
)
def _geojson_stac_response(doc: Dict) -> flask.Response:
"""Return a stac item"""
return _stac_response(doc, content_type="application/geo+json")
@bp.route("", strict_slashes=False)
def root():
"""
The root stac page links to each collection (product) catalog
"""
return _stac_response(
dict(
**stac_endpoint_information(),
links=[
dict(
title="Collections",
description="All product collections",
rel="children",
type="application/json",
href=url_for(".collections"),
),
dict(
title="Arrivals",
description="Most recently added items",
rel="child",
type="application/json",
href=url_for(".arrivals"),
),
dict(
title="Item Search",
rel="search",
type="application/json",
href=url_for(".stac_search"),
),
dict(rel="self", href=request.url),
# Individual Product Collections
*(
dict(
title=product.name,
description=product.definition.get("description"),
rel="child",
href=url_for(".collection", collection=product.name),
)
for product, product_summary in _model.get_products_with_summaries()
),
],
conformsTo=[
"https://api.stacspec.org/v1.0.0-beta.1/core",
"https://api.stacspec.org/v1.0.0-beta.1/item-search",
],
)
)
@bp.route("/search", methods=["GET", "POST"])
def stac_search():
"""
Search api for stac items.
"""
if request.method == "GET":
args = request.args
else:
args = TypeConversionDict(request.get_json())
products = args.get("collections", default=[], type=_array_arg)
if "collection" in args:
products.append(args.get("collection"))
# Fallback for legacy 'product' argument
elif "product" in args:
products.append(args.get("product"))
return _geojson_stac_response(_handle_search_request(args, products))
def _array_arg(arg: str, expect_type=str, expect_size=None) -> List:
"""
Parse an argument that should be a simple list.
"""
if isinstance(arg, list):
return arg
# Make invalid arguments loud. The default ValueError behaviour is to quietly forget the param.
try:
arg = arg.strip()
# Legacy json-like format. This is what sat-api seems to do too.
if arg.startswith("["):
value = json.loads(arg)
else:
# Otherwise OpenAPI non-exploded form style.
# Eg. "1, 2, 3" or "string1,string2" or "string1"
args = [a.strip() for a in arg.split(",")]
value = [expect_type(a.strip()) for a in args if a]
except ValueError:
raise BadRequest(
f"Invalid argument syntax. Expected comma-separated list, got: {arg!r}"
)
if not isinstance(value, list):
raise BadRequest(f"Invalid argument syntax. Expected json list, got: {value!r}")
if expect_size is not None and len(value) != expect_size:
raise BadRequest(
f"Expected size {expect_size}, got {len(value)} elements in {arg!r}"
)
return value
def _bool_argument(s: str):
"""
Parse an argument that should be a bool
"""
if isinstance(s, bool):
return s
# Copying FastAPI booleans:
# https://fastapi.tiangolo.com/tutorial/query-params
return s.strip().lower() in ("1", "true", "on", "yes")
def _handle_search_request(
request_args: TypeConversionDict,
product_names: List[str],
require_geometry: bool = True,
include_total_count: bool = True,
) -> Dict:
bbox = request_args.get(
"bbox", type=partial(_array_arg, expect_size=4, expect_type=float)
)
# Stac-api <=0.7.0 used 'time', later versions use 'datetime'
time = request_args.get("datetime") or request_args.get("time")
limit = request_args.get("limit", default=DEFAULT_PAGE_SIZE, type=int)
ids = request_args.get(
"ids", default=None, type=partial(_array_arg, expect_type=uuid.UUID)
)
offset = request_args.get("_o", default=0, type=int)
# Request the full Item information. This forces us to go to the
# ODC dataset table for every record, which can be extremely slow.
full_information = request_args.get(
"_full", default=DEFAULT_RETURN_FULL_ITEMS, type=_bool_argument
)
if "intersects" in request_args:
raise werkzeug.exceptions.NotImplemented(
"'intersects' queries are not yet supported, sorry."
)
if limit > PAGE_SIZE_LIMIT:
abort(
400,
f"Max page size is {PAGE_SIZE_LIMIT}. "
f"Use the next links instead of a large limit.",
)
if bbox is not None and len(bbox) != 4:
abort(400, "Expected bbox of size 4. [min lon, min lat, max long, max lat]")
if time is not None:
time = _parse_time_range(time)
def next_page_url(next_offset):
return url_for(
".stac_search",
collections=product_names,
bbox="{},{},{},{}".format(*bbox) if bbox else None,
time=_unparse_time_range(time) if time else None,
ids=",".join(map(str, ids)) if ids else None,
limit=limit,
_o=next_offset,
_full=full_information,
)
feature_collection = search_stac_items(
product_names=product_names,
bbox=bbox,
time=time,
dataset_ids=ids,
limit=limit,
offset=offset,
get_next_url=next_page_url,
full_information=full_information,
require_geometry=require_geometry,
include_total_count=include_total_count,
)
feature_collection["links"].extend(
(
dict(
href=url_for(".stac_search"),
rel="search",
title="Search",
type="application/geo+json",
method="GET",
),
dict(
href=url_for(".stac_search"),
rel="search",
title="Search",
type="application/geo+json",
method="POST",
),
)
)
return feature_collection
def search_stac_items(
get_next_url: Callable[[int], str],
limit: int = DEFAULT_PAGE_SIZE,
offset: int = 0,
dataset_ids: Optional[str] = None,
product_names: Optional[List[str]] = None,
bbox: Optional[Tuple[float, float, float, float]] = None,
time: Optional[Tuple[datetime, datetime]] = None,
full_information: bool = False,
order: ItemSort = ItemSort.DEFAULT_SORT,
require_geometry: bool = True,
include_total_count: bool = False,
) -> Dict:
"""
Perform a search, returning a FeatureCollection of stac Item results.
:param get_next_url: A function that calculates a page url for the given offset.
"""
offset = offset or 0
items = list(
_model.STORE.search_items(
product_names=product_names,
time=time,
bbox=bbox,
limit=limit + 1,
dataset_ids=dataset_ids,
offset=offset,
full_dataset=full_information,
order=order,
require_geometry=require_geometry,
)
)
returned = items[:limit]
there_are_more = len(items) == limit + 1
page = 0
if limit != 0:
page = offset // limit
paging_properties = dict(
# Stac standard
numberReturned=len(returned),
# Compatibility with older implementation. Was removed from stac-api standard.
# (page numbers + limits are not ideal as they prevent some big db optimisations.)
context=dict(
page=page,
limit=limit,
returned=len(returned),
),
)
if include_total_count:
count_matching = _model.STORE.get_count(
product_names=product_names, time=time, bbox=bbox, dataset_ids=dataset_ids
)
paging_properties["numberMatched"] = count_matching
paging_properties["context"]["matched"] = count_matching
result = dict(
type="FeatureCollection",
features=[as_stac_item(f) for f in returned],
links=[],
**paging_properties,
)
if there_are_more:
result["links"].append(dict(rel="next", href=get_next_url(offset + limit)))
return result
@bp.route("/collections")
def collections():
"""
This is like the root "/", but has full information for each collection in
an array (instead of just a link to each collection).
"""
return _stac_response(
dict(
links=[],
collections=[
_stac_collection(product.name)
for product, product_summary in _model.get_products_with_summaries()
],
)
)
@bp.route("/arrivals")
def arrivals():
"""Collection of the items most recently indexed into this index"""
return _stac_response(
dict(
id="Arrivals",
title="Dataset Arrivals",
license="various",
description="The most recently added Items to this index",
properties={},
providers=[],
links=[
dict(
rel="items",
href=url_for(".arrivals_items"),
)
],
)
)
@bp.route("/arrivals/items")
def arrivals_items():
"""
Get the Items most recently indexed into this Open Data Cube instance.
This returns a Stac FeatureCollection of complete Stac Items, with paging links.
"""
limit = request.args.get("limit", default=DEFAULT_PAGE_SIZE, type=int)
offset = request.args.get("_o", default=0, type=int)
if limit > PAGE_SIZE_LIMIT:
abort(
400,
f"Max page size is {PAGE_SIZE_LIMIT}. "
f"Use the next links instead of a large limit.",
)
def next_page_url(next_offset):
return url_for(
".arrivals_items",
limit=limit,
_o=next_offset,
)
return _geojson_stac_response(
search_stac_items(
limit=limit,
offset=offset,
get_next_url=next_page_url,
full_information=True,
order=ItemSort.RECENTLY_ADDED,
require_geometry=False,
include_total_count=False,
)
)
@bp.route("/collections/<collection>")
def collection(collection: str):
"""
Overview of a WFS Collection (a datacube product)
"""
return _stac_response(_stac_collection(collection))
def _stac_collection(collection: str):
summary = _model.get_product_summary(collection)
try:
dataset_type = _model.STORE.get_dataset_type(collection)
except KeyError:
abort(404, f"Unknown collection {collection!r}")
all_time_summary = _model.get_time_summary(collection)
summary_props = {}
if summary and summary.time_earliest:
begin, end = utc(summary.time_earliest), utc(summary.time_latest)
extent = {"temporal": {"interval": [[begin, end]]}}
footprint = all_time_summary.footprint_wgs84
if footprint:
extent["spatial"] = {"bbox": [footprint.bounds]}
summary_props["extent"] = extent
stac_collection = dict(
id=summary.name,
title=summary.name,
type="Collection",
license=_utils.product_license(dataset_type),
description=dataset_type.definition.get("description"),
properties=dict(_build_properties(dataset_type.metadata)),
providers=[],
**summary_props,
links=[
dict(
rel="items",
href=url_for(".collection_items", collection=collection),
)
],
)
return stac_collection
@bp.route("/collections/<collection>/items")
def collection_items(collection: str):
"""
A geojson FeatureCollection of all items in a collection/product.
(with paging)
"""
all_time_summary = _model.get_time_summary(collection)
if not all_time_summary:
abort(404, f"Product {collection!r} not found among summaries.")
feature_collection = _handle_search_request(
request_args=request.args,
product_names=[collection],
)
# Maybe we shouldn't include total count, as it prevents some future optimisation?
if "numberMatched" not in feature_collection:
feature_collection["numberMatched"] = all_time_summary.dataset_count
# Backwards compatibility with older stac implementations.
feature_collection["context"]["matched"] = feature_collection["numberMatched"]
return _geojson_stac_response(feature_collection)
@bp.route("/collections/<collection>/items/<dataset_id>")
def item(collection: str, dataset_id: str):
dataset = _model.STORE.get_item(dataset_id)
if not dataset:
abort(404, f"No dataset found with id {dataset_id!r}")
actual_product_name = dataset.product_name
if collection != actual_product_name:
# We're not doing a redirect as we don't want people to rely on wrong urls
# (and we're unkind)
actual_url = url_for(
".item",
collection=actual_product_name,
dataset_id=dataset_id,
)
abort(
404,
f"No such dataset in collection.\n"
f"Perhaps you meant collection {actual_product_name}: {actual_url})",
)
return _geojson_stac_response(as_stac_item(dataset))
def _pick_remote_uri(uris: Sequence[str]) -> Optional[int]:
"""
Return the offset of the first uri with a remote path, if any.
"""
for i, uri in enumerate(uris):
scheme, *_ = uri.split(":")
if scheme in ("https", "http", "ftp", "s3", "gfs"):
return i
return None
def _parse_time_range(time: str) -> Optional[Tuple[datetime, datetime]]:
"""
>>> _parse_time_range('1986-04-16T01:12:16/2097-05-10T00:24:21')
(datetime.datetime(1986, 4, 16, 1, 12, 16), datetime.datetime(2097, 5, 10, 0, 24, 21))
>>> _parse_time_range('1986-04-16T01:12:16')
(datetime.datetime(1986, 4, 16, 1, 12, 16), datetime.datetime(1986, 4, 16, 1, 12, 17))
>>> # Time is optional:
>>> _parse_time_range('2019-01-01/2019-01-01')
(datetime.datetime(2019, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 0, 0))
>>> _parse_time_range('1986-04-16')
(datetime.datetime(1986, 4, 16, 0, 0), datetime.datetime(1986, 4, 17, 0, 0))
>>> # Open ranges:
>>> _parse_time_range('2019-01-01/..')[0]
datetime.datetime(2019, 1, 1, 0, 0)
>>> _parse_time_range('2019-01-01/..')[1] > datetime.now()
True
>>> _parse_time_range('../2019-01-01')
(datetime.datetime(1971, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 0, 0))
>>> # Unbounded time is the same as no time filter. ("None")
>>> _parse_time_range('../..')
>>>
"""
time_period = time.split("/")
if len(time_period) == 2:
start, end = time_period
if start == "..":
start = datetime(1971, 1, 1, 0, 0)
elif end == "..":
end = datetime.now() + timedelta(days=2)
# Were they both open? Treat it as no date filter.
if end == "..":
return None
return parse_time(start), parse_time(end)
elif len(time_period) == 1:
t: datetime = parse_time(time_period[0])
if t.time() == dt_time():
return t, t + timedelta(days=1)
else:
return t, t + timedelta(seconds=1)
def _unparse_time_range(time: Tuple[datetime, datetime]) -> str:
"""
>>> _unparse_time_range((
... datetime(1986, 4, 16, 1, 12, 16),
... datetime(2097, 5, 10, 0, 24, 21)
... ))
'1986-04-16T01:12:16/2097-05-10T00:24:21'
"""
start_time, end_time = time
return f"{start_time.isoformat()}/{end_time.isoformat()}"
def _band_to_measurement(band: Dict, dataset_location: str) -> MeasurementDoc:
"""Create EO3 measurement from an EO1 band dict"""
return MeasurementDoc(
path=band.get("path"),
band=band.get("band"),
layer=band.get("layer"),
name=band.get("name"),
alias=band.get("label"),
)
def as_stac_item(dataset: DatasetItem):
"""
Get a dict corresponding to a stac item
"""
ds: Dataset = dataset.odc_dataset
if ds is not None and is_doc_eo3(ds.metadata_doc):
dataset_doc = serialise.from_doc(ds.metadata_doc, skip_validation=True)
dataset_doc.locations = ds.uris
# Geometry is optional in eo3, and needs to be calculated from grids if missing.
# We can use ODC's own calculation that happens on index.
if dataset_doc.geometry is None:
fallback_extent = ds.extent
if fallback_extent is not None:
dataset_doc.geometry = fallback_extent.geom
dataset_doc.crs = str(ds.crs)
if ds.sources:
dataset_doc.lineage = {classifier: [d.id] for classifier, d in ds.sources}
# Does ODC still put legacy lineage into indexed documents?
elif ("source_datasets" in dataset_doc.lineage) and len(
dataset_doc.lineage
) == 1:
# From old to new lineage type.
dataset_doc.lineage = {
classifier: [dataset["id"]]
for classifier, dataset in dataset_doc.lineage["source_datasets"]
}
else:
# eo1 to eo3
dataset_doc = DatasetDoc(
id=dataset.dataset_id,
# Filled-in below.
label=None,
product=ProductDoc(dataset.product_name),
locations=ds.uris if ds is not None else None,
crs=str(dataset.geometry.crs),
geometry=dataset.geometry.geom,
grids=None,
# TODO: Convert these from stac to eo3
properties=StacPropertyView(
{
"datetime": utc(dataset.center_time),
**(dict(_build_properties(ds.metadata)) if ds else {}),
"odc:processing_datetime": utc(dataset.creation_time),
}
),
measurements={
name: _band_to_measurement(
b,
dataset_location=ds.uris[0] if ds is not None and ds.uris else None,
)
for name, b in ds.measurements.items()
}
if ds is not None
else {},
accessories=_accessories_from_eo1(ds.metadata_doc)
if ds is not None
else {},
# TODO: Fill in lineage. The datacube API only gives us full datasets, which is
# expensive. We only need a list of IDs here.
lineage={},
)
if dataset_doc.label is None and ds is not None:
dataset_doc.label = _utils.dataset_label(ds)
item_doc = eo3stac.to_stac_item(
dataset=dataset_doc,
stac_item_destination_url=url_for(
".item",
collection=dataset.product_name,
dataset_id=dataset.dataset_id,
),
odc_dataset_metadata_url=url_for("dataset.raw_doc", id_=dataset.dataset_id),
explorer_base_url=url_for("default_redirect"),
)
# Add the region code that Explorer inferred.
# (Explorer's region codes predate ODC's and support
# many more products.
item_doc["properties"]["cubedash:region_code"] = dataset.region_code
return item_doc
def _accessories_from_eo1(metadata_doc: Dict) -> Dict[str, AccessoryDoc]:
"""Create and EO3 accessories section from an EO1 document"""
accessories = {}
# Browse image -> thumbnail
if "browse" in metadata_doc:
for name, browse in metadata_doc["browse"].items():
accessories[f"thumbnail:{name}"] = AccessoryDoc(
path=browse["path"], name=name
)
# Checksum
if "checksum_path" in metadata_doc:
accessories["checksum:sha1"] = AccessoryDoc(
path=metadata_doc["checksum_path"], name="checksum:sha1"
)
return accessories
def field_platform(key, value):
yield "eo:platform", value.lower().replace("_", "-")
def field_instrument(key, value):
yield "eo:instrument", value
def field_path_row(key, value):
# Path/Row fields are ranges in datacube but 99% of the time
# they are a single value
# (they are ranges in telemetry products)
# Stac doesn't accept a range here, so we'll skip it in those products,
# but we can handle the 99% case when lower==higher.
if key == "sat_path":
kind = "landsat:wrs_path"
elif key == "sat_row":
kind = "landsat:wrs_row"
else:
raise ValueError(f"Path/row kind {repr(key)}")
# If there's only one value in the range, return it.
if isinstance(value, Range):
if value.end is None or value.begin == value.end:
# Standard stac
yield kind, int(value.begin)
else:
# Our questionable output. Only present in telemetry products?
yield f"odc:{key}", f"{value.begin}/{value.end}"
# Other Property examples:
# collection "landsat-8-l1"
# eo:gsd 15
# eo:platform "landsat-8"
# eo:instrument "OLI_TIRS"
# eo:off_nadir 0
# datetime "2019-02-12T19:26:08.449265+00:00"
# eo:sun_azimuth -172.29462212
# eo:sun_elevation -6.62176054
# eo:cloud_cover -1
# eo:row "135"
# eo:column "044"
# landsat:product_id "LC08_L1GT_044135_20190212_20190212_01_RT"
# landsat:scene_id "LC80441352019043LGN00"
# landsat:processing_level "L1GT"
# landsat:tier "RT"
_STAC_PROPERTY_MAP = {
"platform": field_platform,
"instrument": field_instrument,
# "measurements": field_bands,
"sat_path": field_path_row,
"sat_row": field_path_row,
}
def _build_properties(d: DocReader):
for key, val in d.fields.items():
if val is None:
continue
converter = _STAC_PROPERTY_MAP.get(key)
if converter:
yield from converter(key, val)
@bp.errorhandler(HTTPException)
def handle_exception(e):
"""Return JSON instead of HTML for HTTP errors."""
response = e.get_response()
response.data = json.dumps(
{
"code": e.code,
"name": e.name,
"description": e.description,
}
)
response.content_type = "application/json"
return response
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.hooks.telegram import TelegramHook
from airflow.utils import db
TELEGRAM_TOKEN = "dummy token"
class TestTelegramHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='telegram-webhook-without-token',
conn_type='http',
)
)
db.merge_conn(
Connection(
conn_id='telegram_default',
conn_type='http',
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id='telegram-webhook-with-chat_id',
conn_type='http',
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_raise_exception_if_both_connection_or_token_is_not_provided(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook()
self.assertEqual("Cannot get token: No valid Telegram connection supplied.", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_exist(self):
with self.assertRaises(airflow.exceptions.AirflowNotFoundException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-non-existent')
self.assertEqual("The conn_id `telegram-webhook-non-existent` isn't defined", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-without-token')
self.assertEqual("Missing token(password) in Telegram connection", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"text": "test telegram message"})
self.assertEqual("'chat_id' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222})
self.assertEqual("'text' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222, "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default', chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = mock.Mock(password="some_token")
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_get_conn.return_value.send_message.side_effect = side_effect
with self.assertRaises(Exception) as e:
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
self.assertTrue("RetryError" in str(e.exception))
self.assertTrue("state=finished raised TelegramError" in str(e.exception))
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
self.assertEqual(excepted_retry_count, mock_get_conn.return_value.send_message.call_count)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
|
""" Sensor platform for Alarm.com """
from datetime import timedelta
import logging
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.components.sensor import (
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import (
PERCENTAGE,
TEMP_FAHRENHEIT,
)
from .const import DOMAIN
from pyalarmdotcomredux import (
AlarmdotcomClient,
AlarmdotcomClientError,
AlarmdotcomClientAuthError,
)
_LOGGER = logging.getLogger(__name__)
SENSORS_DEFS = {
"ambientTemp": {
"label": "Ambient Temperature",
"type": DEVICE_CLASS_TEMPERATURE,
"unit": TEMP_FAHRENHEIT,
},
"humidityLevel": {
"label": "Humidity Level",
"type": DEVICE_CLASS_HUMIDITY,
"unit": PERCENTAGE,
},
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> bool:
"""Setup entities"""
alarm_client: AlarmdotcomClient = hass.data[DOMAIN][entry.entry_id]
async def async_update_data():
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
thermostats_data = await alarm_client.async_get_thermostats_data()
sensor_data = []
for thermostat in thermostats_data:
for sensor in ["ambientTemp", "humidityLevel"]:
sensor_data.append(
{
"id": "{}-{}".format(thermostat["id"], sensor),
"description": "{} {}".format(
thermostat["description"],
SENSORS_DEFS[sensor]["label"],
),
"device_class": SENSORS_DEFS[sensor]["type"],
"unit": SENSORS_DEFS[sensor]["unit"],
"value": thermostat[sensor],
}
)
_LOGGER.debug("Found %s sensors from Alarm.com", len(sensor_data))
return sensor_data
except AlarmdotcomClientAuthError as err:
# Raising ConfigEntryAuthFailed will cancel future updates
# and start a config flow with SOURCE_REAUTH (async_step_reauth)
raise ConfigEntryAuthFailed from err
except AlarmdotcomClientError as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="alarmdotcom sensors",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=30),
)
#
# Fetch initial data so we have data when entities subscribe
#
# If the refresh fails, async_config_entry_first_refresh will
# raise ConfigEntryNotReady and setup will try again later
#
# If you do not want to retry setup on failure, use
# coordinator.async_refresh() instead
#
await coordinator.async_config_entry_first_refresh()
async_add_entities(
ThermostatSensorEntity(coordinator, idx)
for idx, ent in enumerate(coordinator.data)
)
class ThermostatSensorEntity(CoordinatorEntity, SensorEntity):
"""An entity using CoordinatorEntity.
The CoordinatorEntity class provides:
should_poll
async_update
async_added_to_hass
available
"""
_attr_state_class = STATE_CLASS_MEASUREMENT
def __init__(self, coordinator, idx):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(coordinator)
self.idx = idx
@property
def name(self):
"""Name of the entity."""
return self.coordinator.data[self.idx]["description"]
@property
def unique_id(self):
"""Unique ID of the entity."""
return self.coordinator.data[self.idx]["id"]
@property
def native_value(self):
"""Return entity native value."""
return self.coordinator.data[self.idx]["value"]
@property
def native_unit_of_measurement(self):
"""Return entity native unit of measurement."""
return self.coordinator.data[self.idx]["unit"]
@property
def device_class(self):
return self.coordinator.data[self.idx]["device_class"]
|
import os
import json
readme = open('README.md', 'r')
contents = readme.read()
header = contents[:contents.find('##')+14]
readme.close()
readme = open('README.md', 'w')
readme.write(header)
url_stem = 'https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Finteresting-problems&branch=main&subPath=notebooks/'
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.ipynb'):
if not 'checkpoint' in filename:
notebook_name_and_path = os.path.join(root, filename)
notebook = json.load(open(notebook_name_and_path))
second_cell = notebook['cells'][1]['source'][0]
title = second_cell[2:second_cell.find('\n')]
link = url_stem+filename+'&depth=1'
code = '['+title+']('+link+')'
readme.write(code)
readme.write('\n\n')
readme.close()
|
import sys
def main():
name = input("Enter your name: ")
age = input("Enter your age: ")
print(f"The user {name} of age {age} is learning Python programming")
return None
if __name__ == "__main__":
sys.exit(main())
|
""" Utility module for validating camera feeds. """
from __future__ import absolute_import, division, print_function
from .textformatter import TextFormatter
from .feed import CameraFeed
def view_valid_camera_feeds():
"""
Shows all valid feed views, one after another. The next feed shows when the current is closed.
"""
valid_feeds = []
TextFormatter.print_heading("Checking for valid feeds.")
try:
for index in xrange(1, 5):
if check_feed(index):
valid_feeds.append(index)
except NameError:
for index in range(1, 5):
if check_feed(index):
valid_feeds.append(index)
if len(valid_feeds) > 0:
TextFormatter.print_heading("Valid Feeds:")
for feed in valid_feeds:
show_camera_feed(feed)
else:
TextFormatter.print_info("No Valid Feeds")
def check_feed(feed_index):
"""
Checks if the provided index points to a valid camera feed.
"""
camera_feed = CameraFeed(feed_index)
return camera_feed.is_valid()
def show_camera_feed(feed_index):
"""
Shows the camera feed pointed to by the provided feed_index.
"""
camera_feed = CameraFeed(feed_index)
# Show the uncorrected feed.
camera_feed.show()
if __name__ == "__main__":
view_valid_camera_feeds()
|
import math
import torch
from torch import Tensor
from torch import nn
from .concepts import Conceptizator
class EntropyLinear(nn.Module):
"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
def __init__(self, in_features: int, out_features: int, n_classes: int, temperature: float = 0.6,
bias: bool = True, conceptizator: str = 'identity_bool') -> None:
super(EntropyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.n_classes = n_classes
self.temperature = temperature
self.conceptizator = Conceptizator(conceptizator)
self.alpha = None
self.weight = nn.Parameter(torch.Tensor(n_classes, out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(n_classes, 1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
if len(input.shape) == 2:
input = input.unsqueeze(0)
self.conceptizator.concepts = input
# compute concept-awareness scores
gamma = self.weight.norm(dim=1, p=1)
self.alpha = torch.exp(gamma/self.temperature) / torch.sum(torch.exp(gamma/self.temperature), dim=1, keepdim=True)
# weight the input concepts by awareness scores
self.alpha_norm = self.alpha / self.alpha.max(dim=1)[0].unsqueeze(1)
self.concept_mask = self.alpha_norm > 0.5
x = input.multiply(self.alpha_norm.unsqueeze(1))
# compute linear map
x = x.matmul(self.weight.permute(0, 2, 1)) + self.bias
return x.permute(1, 0, 2)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, n_classes={}'.format(
self.in_features, self.out_features, self.n_classes
)
if __name__ == '__main__':
data = torch.rand((10, 5))
layer = EntropyLinear(5, 4, 2)
out = layer(data)
print(out.shape)
|
# python
import logging
# django
from django.conf import settings
from django.db import connections, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
# pyes
from pyes.exceptions import IndexAlreadyExistsException, ElasticSearchException
# django_elasticsearch
from django_elasticsearch.mapping import model_to_mapping
from django_elasticsearch.models import get_settings_by_meta
from django_elasticsearch import ENGINE, OPERATION_CREATE_INDEX
__author__ = 'jorgealegre'
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
engine = settings.DATABASES.get(DEFAULT_DB_ALIAS, {}).get('ENGINE', '')
global_index_name = settings.DATABASES.get(DEFAULT_DB_ALIAS, {}).get('NAME', '')
options = settings.DATABASES.get(DEFAULT_DB_ALIAS, {}).get('OPTIONS', {})
connection = connections[DEFAULT_DB_ALIAS]
es_connection = connection.connection
# Call regular migrate if engine is different from ours
if engine != ENGINE:
return super(Command, self).handle(**options)
else:
# project global index
has_alias = connection.ops.has_alias(global_index_name)
if not has_alias:
try:
index_name_final, alias = connection.ops.create_index(global_index_name, options,
skip_register=True)
self.stdout.write(u'index "{}" created with physical name "{}"'.format(alias, index_name_final))
connection.ops.build_django_engine_structure()
# register create index for global
connection.ops.register_index_operation(index_name_final, OPERATION_CREATE_INDEX,
connection.ops.build_es_settings_from_django(options))
except IndexAlreadyExistsException:
pass
except ElasticSearchException:
import traceback
logger.error(traceback.format_exc())
logger.debug(u'models: {}'.format(connection.introspection.models))
for app_name, app_models in connection.introspection.models.iteritems():
for model in app_models:
mapping = model_to_mapping(model, es_connection, global_index_name)
try:
mapping.save()
self.stdout.write(u'Mapping for model {}.{} updated'.format(app_name, model.__name__))
except Exception as e:
import traceback
logger.error(traceback.format_exc())
self.stderr.write(u'Could not update mapping, rebuilding global index...')
connection.ops.rebuild_index(global_index_name)
mapping.save()
if not hasattr(model._meta, 'indices'):
continue
for model_index in model._meta.indices:
model_index_name = model_index.keys()[0]
index_name = u'{}__{}'.format(model._meta.db_table, model_index_name)
logger.debug(u'model index name: {}'.format(index_name))
index_data = model_index[model_index_name]
logger.debug(u'index_data: {}'.format(index_data))
try:
index_physical, alias = connection.ops.create_index(index_name,
get_settings_by_meta(index_data))
self.stdout.write(u'index "{}" created with physical name "{}"'.format(alias,
index_physical))
except IndexAlreadyExistsException:
pass
mapping = model_to_mapping(model, es_connection, index_name)
try:
mapping.save()
self.stdout.write(u'Mapping for model {}.{} updated'
.format(app_name, index_name))
except Exception as e:
self.stderr.write(u'Could not update mapping, rebuilding index "{}" ...'
.format(index_name))
connection.ops.rebuild_index(index_name)
mapping.save()
|
"""
Gán nhãn
"""
|
import unittest
from models import sources
Source=sources.Sources
class SourcesTest(unittest.TestCase):
'''
Test class to test the behavior of our souce class
'''
def setUp(self):
'''
setUp method that is run before every test
'''
self.new_source=Source('1234','KBC','broadcasting','www.kbc.co.ke','general','Kenya')
def test_instance(self):
self.assertTrue(isinstance(self.new_source,Source))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
plugin
~~~~
plugin demo for sampling
:entity: sampling()
:invoked: trend/ref/label rendering
:distinct: true
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
import numpy as np
from v1 import utils
logger = utils.getLogger(__name__)
def sampling(api, line, target_amount):
"""
:param api: plugin api object, select is not implement during init.
:param line: tuple-like ((timestamp, value)), the timestamp and value is const
:param target_amount: amount of points after sampling
:return: (plugin_name, [[timestamp, value]]), tuple is not recommended
"""
# # Assume timestamp, value, range is not nullable
# if len(line) > target_amount and len(line) > 2:
# period = api.get_abstract().period # timestamp is marked as the start time of a period
# start_time = line[0][0]
# end_time = line[-1][0]
# amount = (end_time - start_time) / period # point amount without sampling
# aggr_period = iceil(amount, target_amount) / target_amount * period
# start_time = ifloor(line[0][0], aggr_period)
# tmp = {timestamp: [] for timestamp in range(start_time, end_time + period, aggr_period)}
# for point in line:
# tmp[ifloor(point[0], aggr_period)].append(point)
# line = [
# [timestamp, mean(points, lambda x: x[1]), mean(points, lambda x: x[2])]
# for timestamp, points in sorted(tmp.items())
# ]
logger.debug("Line[:5]: {}".format(line[:5]))
return 'default', line
def num_filter(x):
return isinstance(x, (int, float, long))
def mean(data, key=None):
if key is not None:
data = [key(point) for point in data]
data = filter(num_filter, data)
if len(data) < 1:
return None
return np.mean(data)
def ifloor(data, base=None):
"""
floor of int
:param data:
:param base:
:return:
"""
if base is None or base == 0:
return data
return data - data % base
def iceil(data, base=None):
"""
ceil of int
:param data:
:param base:
:return:
"""
if base is None or base == 0:
return data
return data + -data % base
|
class TempraryImageMixin:
@staticmethod
def temporary_image():
"""
임시 이미지 파일
"""
import tempfile
from PIL import Image
image = Image.new('RGB', (1, 1))
tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')
image.save(tmp_file, 'jpeg')
tmp_file.seek(0)
return tmp_file
|
import io
import mimetypes
import os
import unittest
from pyramid import testing
class TestResponse(unittest.TestCase):
def _getTargetClass(self):
from pyramid.response import Response
return Response
def test_implements_IResponse(self):
from pyramid.interfaces import IResponse
cls = self._getTargetClass()
self.assertTrue(IResponse.implementedBy(cls))
def test_provides_IResponse(self):
from pyramid.interfaces import IResponse
inst = self._getTargetClass()()
self.assertTrue(IResponse.providedBy(inst))
class TestFileResponse(unittest.TestCase):
def _makeOne(self, file, **kw):
from pyramid.response import FileResponse
return FileResponse(file, **kw)
def _getPath(self, suffix='txt'):
here = os.path.dirname(__file__)
return os.path.join(here, 'fixtures', 'minimal.%s' % (suffix,))
def test_with_image_content_type(self):
path = self._getPath('jpg')
r = self._makeOne(path, content_type='image/jpeg')
self.assertEqual(r.content_type, 'image/jpeg')
self.assertEqual(r.headers['content-type'], 'image/jpeg')
path = self._getPath()
r.app_iter.close()
def test_with_xml_content_type(self):
path = self._getPath('xml')
r = self._makeOne(path, content_type='application/xml')
self.assertEqual(r.content_type, 'application/xml')
self.assertEqual(r.headers['content-type'],
'application/xml; charset=UTF-8')
r.app_iter.close()
def test_with_pdf_content_type(self):
path = self._getPath('xml')
r = self._makeOne(path, content_type='application/pdf')
self.assertEqual(r.content_type, 'application/pdf')
self.assertEqual(r.headers['content-type'], 'application/pdf')
r.app_iter.close()
def test_without_content_type(self):
for suffix in ('txt', 'xml', 'pdf'):
path = self._getPath(suffix)
r = self._makeOne(path)
self.assertEqual(r.headers['content-type'].split(';')[0],
mimetypes.guess_type(path, strict=False)[0])
r.app_iter.close()
def test_python_277_bug_15207(self):
# python 2.7.7 on windows has a bug where its mimetypes.guess_type
# function returns Unicode for the content_type, unlike any previous
# version of Python. See https://github.com/Pylons/pyramid/issues/1360
# for more information.
from pyramid.compat import text_
import mimetypes as old_mimetypes
from pyramid import response
class FakeMimetypesModule(object):
def guess_type(self, *arg, **kw):
return text_('foo/bar'), None
fake_mimetypes = FakeMimetypesModule()
try:
response.mimetypes = fake_mimetypes
path = self._getPath('xml')
r = self._makeOne(path)
self.assertEqual(r.content_type, 'foo/bar')
self.assertEqual(type(r.content_type), str)
finally:
response.mimetypes = old_mimetypes
class TestFileIter(unittest.TestCase):
def _makeOne(self, file, block_size):
from pyramid.response import FileIter
return FileIter(file, block_size)
def test___iter__(self):
f = io.BytesIO(b'abc')
inst = self._makeOne(f, 1)
self.assertEqual(inst.__iter__(), inst)
def test_iteration(self):
data = b'abcdef'
f = io.BytesIO(b'abcdef')
inst = self._makeOne(f, 1)
r = b''
for x in inst:
self.assertEqual(len(x), 1)
r+=x
self.assertEqual(r, data)
def test_close(self):
f = io.BytesIO(b'abc')
inst = self._makeOne(f, 1)
inst.close()
self.assertTrue(f.closed)
class Test_patch_mimetypes(unittest.TestCase):
def _callFUT(self, module):
from pyramid.response import init_mimetypes
return init_mimetypes(module)
def test_has_init(self):
class DummyMimetypes(object):
def init(self):
self.initted = True
module = DummyMimetypes()
result = self._callFUT(module)
self.assertEqual(result, True)
self.assertEqual(module.initted, True)
def test_missing_init(self):
class DummyMimetypes(object):
pass
module = DummyMimetypes()
result = self._callFUT(module)
self.assertEqual(result, False)
class TestResponseAdapter(unittest.TestCase):
def setUp(self):
registry = Dummy()
self.config = testing.setUp(registry=registry)
def tearDown(self):
self.config.end()
def _makeOne(self, *types_or_ifaces, **kw):
from pyramid.response import response_adapter
return response_adapter(*types_or_ifaces, **kw)
def test_register_single(self):
from zope.interface import Interface
class IFoo(Interface): pass
dec = self._makeOne(IFoo)
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.adapters, [(foo, IFoo)])
def test_register_multi(self):
from zope.interface import Interface
class IFoo(Interface): pass
class IBar(Interface): pass
dec = self._makeOne(IFoo, IBar)
def foo(): pass
config = DummyConfigurator()
scanner = Dummy()
scanner.config = config
dec.register(scanner, None, foo)
self.assertEqual(config.adapters, [(foo, IFoo), (foo, IBar)])
def test___call__(self):
from zope.interface import Interface
class IFoo(Interface): pass
dec = self._makeOne(IFoo)
dummy_venusian = DummyVenusian()
dec.venusian = dummy_venusian
def foo(): pass
dec(foo)
self.assertEqual(dummy_venusian.attached,
[(foo, dec.register, 'pyramid', 1)])
def test___call___with_venusian_args(self):
from zope.interface import Interface
class IFoo(Interface): pass
dec = self._makeOne(IFoo, _category='foo', _depth=1)
dummy_venusian = DummyVenusian()
dec.venusian = dummy_venusian
def foo(): pass
dec(foo)
self.assertEqual(dummy_venusian.attached,
[(foo, dec.register, 'foo', 2)])
class TestGetResponseFactory(unittest.TestCase):
def test_get_factory(self):
from pyramid.registry import Registry
from pyramid.response import Response, _get_response_factory
registry = Registry()
response = _get_response_factory(registry)(None)
self.assertTrue(isinstance(response, Response))
class Dummy(object):
pass
class DummyConfigurator(object):
def __init__(self):
self.adapters = []
def add_response_adapter(self, wrapped, type_or_iface):
self.adapters.append((wrapped, type_or_iface))
class DummyVenusian(object):
def __init__(self):
self.attached = []
def attach(self, wrapped, fn, category=None, depth=None):
self.attached.append((wrapped, fn, category, depth))
|
from topology import node, edge
|
import pandas as pd
read_file = pd.read_csv (r'Path where the Text file is stored\File name.txt')
read_file.to_csv (r'Path where the CSV will be saved\File name.csv', index=None)
|
#
from .plugin import EdgeLBPlugin
__all__ = ["EdgeLBPlugin"]
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 Dr. Helder Marchetto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import socket
import time
import numpy as np
import struct
import sys
import readUview as ru
import matplotlib.pyplot as plt
from skimage import exposure
class imgStackClass():
def __init__(self,fn) -> None:
"""Initializes the imgStackClass object"""
#fn must be a list of file names to load as a numpy array
if len(fn) < 1:
print("Failed. No file loaded")
return
try:
self.imageWidth = 0
self.imageHeight = 0
self.nImages = len(fn)
for f in fn:
ind = fn.index(f)
if ind == 0:
ruObj = ru.readUviewClass()
img = ruObj.getImage(f)
self.imgType = img.dtype.name
self.imageWidth = ruObj.imageWidth
self.imageHeight = ruObj.imageHeight
self.stack = np.zeros((self.nImages,self.imageWidth,self.imageHeight),dtype=np.ndarray)
self.stack[0] = img
self.limits = np.percentile(img,(2,98))
# print("Loading image nr=%04i" %ind, end='\r')
print(f"\rLoading image nr="+str("%04i" %ind),end="")
sys.stdout.flush()
else:
img = ruObj.getImage(f)
self.imageWidth = ruObj.imageWidth
self.imageHeight = ruObj.imageHeight
try:
self.stack[ind] = img
# print("Loading image nr=%04i" %ind, end='\r')
print(f"\rLoading image nr="+str("%04i" %ind),end="")
sys.stdout.flush()
except:
raise Exception("Error loading image nr".format(fn.index(f)))
print("\n")
self.current = 0
self.fn = fn
self.rawfn = []
for f in fn:
self.rawfn.append(os.path.basename(os.path.abspath(f))+'.dat')
self.dir = os.path.dirname(os.path.abspath(self.fn[0]))
except:
print("Loading of images failed")
return
def getImage(self,pos=-1) -> np.ndarray:
if pos < 0:
pos = self.current
try:
return self.stack[pos].astype(self.imgType)
except:
raise Exception('Index not valid or stack not yet defined')
def getLimits(self,pos=-1,clip=2) -> tuple:
if pos < 0:
pos = self.current
try:
self.limits = np.percentile(self.getImage(pos),(clip,100-clip))
return self.limits
except:
raise Exception('Index not valid or stack not yet defined')
def getDrawImage(self,pos=-1,clip=2) -> np.ndarray:
if pos < 0:
pos = self.current
if clip != 2:
limits = np.percentile(self.getImage(pos),(2,98))
else:
limits = self.limits
try:
img = exposure.rescale_intensity(self.stack[pos].astype(self.imgType), in_range=(limits[0], limits[1]))
return img
#.astype(self.imgType)
except:
raise Exception('Index not valid or stack not yet defined')
def __repr__(self):
try:
outStr = [str("nImages = %04i" %self.nImages)]
outStr.append(str("First image = %s" %self.rawfn[0]))
outStr.append(str("Last image = %s" %self.rawfn[-1]))
outStr.append(str("Directory = %s" %self.dir))
outStr.append(str("Image size = (%i,%i)" %(self.imageWidth, self.imageHeight)))
return '\n'.join(outStr)
except AttributeError:
return "Object not defined"
class elmitecAnalysisClass():
def __init__(self) -> None:
"""Initializes the elmitecAnalysisClass object"""
def __repr__(self):
try:
outStr = [str("nImages = %04i" %self.imgStack.nImages)]
outStr.append(str("First image = %s" %self.imgStack.fn[0]))
outStr.append(str("Last image = %s" %self.imgStack.fn[-1]))
outStr.append(str("Directory = %s" %self.imgStack.dir))
outStr.append(str("Image size = (%i,%i)" %(self.imageWidth, self.imageHeight)))
return '\n'.join([outStr])
except AttributeError:
return "Object not defined"
def loadDir(self,dirName):
self.dir = dirName
import os
def getDatFilesInDir(mypath = r'K:\Data\TurningPointResolution'):
fileList = []
for file in os.listdir(mypath):
if file.endswith(".dat"):
fileList.append(os.path.join(mypath, file))
return fileList
#Run examples
fn = getDatFilesInDir()
stack = imgStackClass(fn)
#import pylab as pl
#from IPython import display
#from IPython import get_ipython
#get_ipython().run_line_magic('matplotlib', 'qt5')
#limits= stack.getLimits(pos=0,clip=2)
##imgObj = plt.imshow(stack.getImage(0), cmap=plt.cm.gray, vmin=limits[0], vmax=limits[1])
#fig,ax = plt.subplots(1,1)
#ax.imshow(stack.getImage(0), cmap=plt.cm.gray, vmin=limits[0], vmax=limits[1])
#fig.canvas.draw()
#ax.imshow(stack.getImage(1), cmap=plt.cm.gray, vmin=limits[0], vmax=limits[1])
#image = sitk.GetImageFromArray(stack.getImage(1), isVector=True)
import tkinter as tk
from tkinter import filedialog
import PIL.Image, PIL.ImageTk
import PIL.ImageOps
#import cv2
from skimage.transform import rescale
from skimage import exposure
class elmitecImageViewer():
def __init__(self,stack):
imgNr = 0
self.winSize = (1024,1024)
self.winPadding = (100,200)
self.stack = stack
self.root = tk.Tk()
self.mainFrame = tk.Frame(self.root, height=self.winSize[0]+self.winPadding[0], width=self.winSize[1]+self.winPadding[1], bg='white')
self.mainFrame.pack()
self.topFrame = tk.Frame(self.mainFrame, height=100, width=200, bg='white')
self.topFrame.pack(fill=tk.X)
self.listbox = tk.Listbox(self.mainFrame)
self.listbox.pack(side=tk.LEFT,fill=tk.Y)
self.listbox.bind('<<ListboxSelect>>', self.selectList)
self.imageCanvas = tk.Canvas(self.mainFrame, height=self.winSize[0], width=self.winSize[0], bg='orange')
self.imageCanvas.pack(side=tk.RIGHT,expand=True)
self.mainFrame.winfo_toplevel().title("Image number %04i" %imgNr)
for n, item in enumerate(self.stack.fn):
self.listbox.insert(tk.END, "{:04d} - {}".format(n,os.path.basename(item)))
openButton = tk.Button(self.topFrame, text="Open", command=self.openImageList)
openButton.grid(row=0, column=0, pady=20)
prevButton = tk.Button(self.topFrame, text="Previous")
prevButton.grid(row=0, column=2, pady=20)
nextButton = tk.Button(self.topFrame, text="Next")
nextButton.grid(row=0, column=1, pady=20)
img = self.stack.getImage(imgNr)
p2, p98 = np.percentile(img, (2, 98))
img_rescale_int = exposure.rescale_intensity(img, in_range=(p2, p98), out_range=(0,255))
intensityFixedImg = PIL.Image.fromarray(img_rescale_int)
self.photo = PIL.ImageTk.PhotoImage(intensityFixedImg, master=self.root)
self.imageOnCanvas = self.imageCanvas.create_image(0, 0, image=self.photo, anchor=tk.NW)
self.listbox.selection_clear(0,last=self.stack.nImages-1)
self.listbox.selection_set(0)
self.windowRunning = True
self.root.mainloop()
self.windowRunning = False
def showImage(self, imgNr=-1):
if not self.windowRunning:
return
current = self.stack.current
if imgNr >= 0:
current = imgNr
img = self.stack.getImage(current)
if (self.stack.imageWidth > self.winSize[0]) or (self.stack.imageHeight > self.winSize[1]):
rescaleFactorX = self.winSize[0]/self.stack.imageWidth
rescaleFactorY = self.winSize[1]/self.stack.imageHeight
rescaleFactor = min(rescaleFactorX,rescaleFactorY)
img = rescale(img,rescaleFactor,preserve_range=True)
p2, p98 = np.percentile(img, (2, 98))
img_rescale_int = exposure.rescale_intensity(img, in_range=(p2, p98), out_range=(0,255))
intensityFixedImg = PIL.Image.fromarray(img_rescale_int)
self.photo = PIL.ImageTk.PhotoImage(intensityFixedImg, master=self.root)
self.imageCanvas.itemconfig(self.imageOnCanvas, image=self.photo)
def selectList(self, evt):
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
self.stack.current = index
if self.stack.current < 0:
self.stack.current = 0
if self.stack.current > self.stack.nImages-1:
self.stack.current = self.stack.nImages-1
self.showImage()
self.updateTitle(value)
def updateTitle(self, value=''):
self.mainFrame.winfo_toplevel().title("Image number %04i (%s)" % (self.stack.current,os.path.basename(self.stack.fn[self.stack.current])))
def openImageList(self):
filenames = list(tk.filedialog.askopenfilenames(title="Select files", filetypes=(("uView","*.dat"),("Tiff files","*.tif"))))
print(type(filenames))
self.stack = imgStackClass(filenames)
self.listbox.delete(0,tk.END)
for n, item in enumerate(self.stack.fn):
self.listbox.insert(tk.END, "{:04d} - {}".format(n,os.path.basename(item)))
self.listbox.selection_set(0)
self.showImage(0)
imgViewer = elmitecImageViewer(stack)
imgViewer.showImage()
|
#!/usr/bin/env python
# Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Do not use this module
import tflite.Model
import tflite.SubGraph
from ir import graph_stats
from .subgraph_parser import SubgraphParser
class TFLiteParser(object):
def __init__(self, model_file):
self.model_file = model_file
def Parse(self):
# Generate Model: top structure of tflite model file
buf = self.model_file.read()
buf = bytearray(buf)
tf_model = tflite.Model.Model.GetRootAsModel(buf, 0)
stats = graph_stats.GraphStats()
# Model file can have many models
subg_list = list()
for subgraph_index in range(tf_model.SubgraphsLength()):
tf_subgraph = tf_model.Subgraphs(subgraph_index)
model_name = "#{0} {1}".format(subgraph_index, tf_subgraph.Name())
# 0th subgraph is main subgraph
if (subgraph_index == 0):
model_name += " (MAIN)"
# Parse Subgraphs
subg_parser = SubgraphParser(tf_model, tf_subgraph)
subg_parser.Parse()
stats += graph_stats.CalcGraphStats(subg_parser)
subg = (model_name, subg_parser)
subg_list.append(subg)
# Validate
assert subg_list is not None
assert len(subg_list) > 0
assert stats is not None
return (subg_list, stats)
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
# i think a general way to approach this is kadanes algorithm
# sum values as we iterate over from 1 -> len(nums) - 1, if it isn't negative
for i in range(1, len(nums)):
if nums[i - 1] > 0:
nums[i] += nums[i - 1]
return max(nums)
|
# coding=utf-8
# Copyright 2021 RLDSCreator Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Robosuite environment."""
from absl.testing import absltest
import numpy as np
import numpy.testing as npt
from rlds_creator import environment
from rlds_creator import study_pb2
from rlds_creator.envs import robosuite_env
class RobosuiteTest(absltest.TestCase):
def test_create(self):
env = robosuite_env.RobosuiteEnvironment(
study_pb2.EnvironmentSpec(
robosuite=study_pb2.EnvironmentSpec.Robosuite(
id='Lift', robots=['Panda'], config='single-arm-opposed')))
self.assertDictEqual(
env.metadata(), {
'camera_heights': 256,
'camera_names': ['agentview'],
'camera_widths': 256,
'control_freq': 20,
'controller_configs': {
'type': 'OSC_POSE',
'input_max': 1,
'input_min': -1,
'output_max': [0.05, 0.05, 0.05, 0.5, 0.5, 0.5],
'output_min': [-0.05, -0.05, -0.05, -0.5, -0.5, -0.5],
'kp': 150,
'damping_ratio': 1,
'impedance_mode': 'fixed',
'kp_limits': [0, 300],
'damping_ratio_limits': [0, 10],
'position_limits': None,
'orientation_limits': None,
'uncouple_pos_ori': True,
'control_delta': True,
'interpolation': None,
'ramp_ratio': 0.2
},
'env_name': 'Lift',
'horizon': 1000,
'pos_sensitivity': 1.5,
'reward_shaping': True,
'robots': ['Panda'],
'rot_sensitivity': 1.5,
'use_camera_obs': False,
})
dm_env = env.env()
# Robosuite observation is a dictionary.
self.assertSetEqual(
set(dm_env.observation_spec().keys()), {
'robot0_joint_pos_cos', 'robot0_joint_pos_sin', 'robot0_joint_vel',
'robot0_eef_pos', 'robot0_eef_quat', 'robot0_gripper_qpos',
'robot0_gripper_qvel', 'robot0_proprio-state', 'cube_pos',
'cube_quat', 'gripper_to_cube_pos', 'object-state'
})
self.assertEqual(dm_env.action_spec().shape, (7,))
# Mapping from the keys to the action vectors.
actions = {
'w': [-15., 0., 0., 0., 0., 0., -1.],
's': [15., 0., 0., 0., 0., 0., -1.],
'a': [0., -15., 0., 0., 0., 0., -1.],
'd': [0., 15., 0., 0., 0., 0., -1.],
'f': [0., 0., -15., 0., 0., 0., -1.],
'r': [0., 0., 15., 0., 0., 0., -1.],
'z': [0., 0., 0., 5.625, 0., 0., -1.],
'x': [0., 0., 0., -5.625, 0., 0., -1.],
't': [0., 0., 0., 0., 5.625, 0., -1.],
'g': [0., 0., 0., 0., -5.625, 0., -1.],
'c': [0., 0., 0., 0., 0., -5.625, -1.],
'Button4': [0., 0., 0., 0., 0., -5.625, -1.],
'v': [0., 0., 0., 0., 0., 5.625, -1.],
'Button5': [0., 0., 0., 0., 0., 5.625, -1.],
}
for key, action in actions.items():
npt.assert_allclose(env.keys_to_action({key: 1}), action, atol=0.01)
analog_actions = {
('Axis0', -1.0): [0., -1.5, 0., 0., 0., 0., -1.],
('Axis0', -0.5): [0., -0.75, 0., 0., 0., 0., -1.],
('Axis0', 0.5): [0., 0.75, 0., 0., 0., 0., -1.],
('Axis0', 1.0): [0., 1.5, 0., 0., 0., 0., -1.],
('Axis1', -1.0): [-1.5, 0., 0., 0., 0., 0., -1.],
('Axis1', -0.5): [-0.75, 0., 0., 0., 0., 0., -1.],
('Axis1', 0.5): [0.75, 0., 0., 0., 0., 0., -1.],
('Axis1', 1.0): [1.5, 0., 0., 0., 0., 0., -1.],
('Axis2', -1.0): [0., 0., 0., 0.5625, 0., 0., -1.],
('Axis2', -0.5): [0., 0., 0., 0.28125, 0., 0., -1.],
('Axis2', 0.5): [0., 0., 0., -0.28125, 0., 0., -1.],
('Axis2', 1.0): [0., 0., 0., -0.5625, 0., 0., -1.],
('Axis3', -1.0): [0., 0., 0., 0., -0.5625, 0., -1.],
('Axis3', -0.5): [0., 0., 0., 0., -0.28125, 0., -1.],
('Axis3', 0.5): [0., 0., 0., 0., 0.28125, 0., -1.],
('Axis3', 1.0): [0., 0., 0., 0., 0.5625, 0., -1.],
}
for (key, value), action in analog_actions.items():
npt.assert_allclose(env.keys_to_action({key: value}), action, atol=0.01)
spacemouse_actions = {
# Direction.
('Axis0', -1.0): [0., -1.5, 0., 0., 0., 0., -1.],
('Axis0', 1.0): [0., 1.5, 0., 0., 0., 0., -1.],
('Axis1', -1.0): [-1.5, 0., 0., 0., 0., 0., -1.],
('Axis1', 1.0): [1.5, 0., 0., 0., 0., 0., -1.],
('Axis2', -1.0): [0, 0., 1.5, 0., 0., 0., -1.],
('Axis2', 1.0): [0, 0., -1.5, 0., 0., 0., -1.],
# Rotation.
('Axis3', -1.0): [0., 0., 0., 0., -0.5625, 0., -1.],
('Axis3', 1.0): [0., 0., 0., 0., 0.5625, 0., -1.],
('Axis4', -1.0): [0., 0., 0., -0.5625, 0., 0., -1.],
('Axis4', 1.0): [0., 0., 0., 0.5625, 0., 0., -1.],
('Axis5', -1.0): [0., 0., 0., 0., 0., 0.5625, -1.],
('Axis5', 1.0): [0., 0., 0., 0., 0., -0.5625, -1.],
}
for (key, value), action in spacemouse_actions.items():
npt.assert_allclose(
env.user_input_to_action(
environment.UserInput(
keys={key: value},
controller=environment.Controller.SPACEMOUSE)),
action,
atol=0.01)
dm_env.reset()
image = env.render()
self.assertEqual(image.shape, (256, 256, 3))
def test_use_camera_obs(self):
env = robosuite_env.RobosuiteEnvironment(
study_pb2.EnvironmentSpec(
robosuite=study_pb2.EnvironmentSpec.Robosuite(
id='Lift',
robots=['Panda'],
config='single-arm-opposed',
use_camera_obs=True,
cameras=['agentview', 'frontview'])))
dm_env = env.env()
self.assertIn('agentview_image', dm_env.observation_spec().keys())
self.assertIn('frontview_image', dm_env.observation_spec().keys())
timestep = dm_env.reset()
camera_obs = timestep.observation['agentview_image']
self.assertEqual((256, 256, 3), camera_obs.shape)
image = env.render()
# Observation and rendered image should be the same.
npt.assert_equal(camera_obs, image)
# Change the camera.
camera_obs = timestep.observation['frontview_image']
self.assertEqual((256, 256, 3), camera_obs.shape)
camera = env.set_camera(1)
self.assertEqual(environment.Camera(1, 'frontview'), camera)
image = env.render()
npt.assert_equal(camera_obs, image)
# Reset the image from observation to check that the inversion happens.
env._env._image = None
image = env.render()
# Rendering may not be deterministic, e.g. due to texture mapping. We allow
# small divergence less than 1%.
self.assertLess(
np.count_nonzero(image != camera_obs) / np.array(image).size, 0.01)
# Third camera is not present.
self.assertIsNone(env.set_camera(2))
def test_max_episode_steps(self):
max_episode_steps = 10
env = robosuite_env.RobosuiteEnvironment(
study_pb2.EnvironmentSpec(
max_episode_steps=max_episode_steps,
robosuite=study_pb2.EnvironmentSpec.Robosuite(
id='Lift', robots=['Panda'], config='single-arm-opposed')))
dm_env = env.env()
timestep = dm_env.reset()
steps = 0
while not timestep.last():
timestep = dm_env.step(env.keys_to_action({'W': 1}))
steps += 1
self.assertEqual(max_episode_steps, steps)
if __name__ == '__main__':
absltest.main()
|
import splinter
import time
import random
import requests
import re
from bs4 import BeautifulSoup
from splinter import Browser
from selenium import webdriver
from pymongo import MongoClient
class DataNode(object):
"""docstring for DataNode"""
def __init__(self, arg):
super(DataNode, self).__init__()
self.arg = arg
|
"""
This script plots the printout file that contains the convergence history
of the Jacobi-Davidson eigenvalue solver
"""
import enum
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
import matplotlib.pyplot as plt
import os
import argparse
import re
class JDHistoryPlotter:
def __init__(self, filename):
# Name string for stdout printout file
self.filename = filename
# List of info for each optimization iteration
self.history = []
# Regex patterns
self.pattern_header = re.compile(
r"Iter JD Residual Ritz value toler")
# This matches the following formatted string:
# "4d %15.5e %15.5e %10.2e"
self.pattern_content = re.compile(
r"[\s\d]{4}\s{4}[-\s]\d\.[\d]{5}e[+-]\d\d\s{4}"\
r"[-\s]\d\.[\d]{5}e[+-]\d\d\s{2}[\s-]\d\.[\d]{2}e[+-]\d\d")
return
def read_file(self):
"""
Get JD residual, Ritz value and tolerance
"""
with open(self.filename, 'r') as fp:
entry = None
for line in fp:
is_header = self.pattern_header.search(line)
is_content = self.pattern_content.search(line)
# If header line, append previous JD history to self.history
# and reset entry
if is_header:
if entry is not None:
self.history.append(entry)
entry = []
# If content line, append it to entry
if is_content:
itr, JDr, ritz, toler = line.split()
entry.append({
'itr': int(itr),
'JDr': float(JDr),
'ritz': float(ritz),
'toler': float(toler)
})
# Append last entry to self.history
self.history.append(entry)
return
def plot(self, plot_type='all', savefig=False,
savename='JD_history.png', dpi=800):
"""
Plot the JD residual, Ritz Value and tolerance
Args:
plot_type: 'all' or 'last', plotting entire history or
only the history for the last optimization step
"""
# Set up plotting environment
# try:
# mpl_style_path = os.path.dirname(os.path.realpath(__file__)) \
# + '/paper.mplstyle'
# plt.style.use(mpl_style_path)
# except:
# print("[Warning] cannot load matplotlib style: paper.mplstyleFalse")
fig, ax = plt.subplots(1,1,figsize=(6.4, 4.8))
ax2 = ax.twinx()
if plot_type == 'last':
entry = self.history[-1]
itr = [d['itr'] for d in entry]
JDr = [d['JDr'] for d in entry]
ritz = [d['ritz'] for d in entry]
toler = [d['toler'] for d in entry]
elif plot_type == 'all':
offset = 0
vl_locs = []
itr = []
JDr = []
ritz = []
toler = []
for entry in self.history:
_itr = [d['itr'] + offset for d in entry]
_JDr = [d['JDr'] for d in entry]
_ritz = [d['ritz'] for d in entry]
_toler = [d['toler'] for d in entry]
itr.extend(_itr)
JDr.extend(_JDr)
ritz.extend(_ritz)
toler.extend(_toler)
offset = _itr[-1]
vl_locs.append(offset)
itr_converged = [itr[i] for i in range(len(itr)) if JDr[i] < toler[i]]
JDr_converged = [JDr[i] for i in range(len(itr)) if JDr[i] < toler[i]]
ritz_converged = [ritz[i] for i in range(len(itr)) if JDr[i] < toler[i]]
# Plot vertical lines
if plot_type == 'all':
iL = iR = 0
xtic = []
for vl_loc in vl_locs:
ax.axvline(x=vl_loc, linestyle='-', color='gray', lw=0.5)
iL = iR
iR = vl_loc
xtic.append(int(iL+iR)/2)
ax.set_xticks(xtic)
labels = [i+1 for i,_ in enumerate(xtic)]
ax.set_xticklabels(labels)
ax.tick_params(axis='x', length=0)
l1 = ax.semilogy(itr, JDr, '.', color='cornflowerblue',
label='JD residual')
l2 = ax.semilogy(itr_converged, JDr_converged, '.', color='red',
label='JD residual - converged')
l3 = ax.semilogy(itr, toler, lw=1.0, color='orange', label='toler')
l4 = ax2.plot(itr, ritz, '+', color='gray',
label='Ritz value')
l5 = ax2.plot(itr_converged, ritz_converged, '+', color='green',
label='Ritz value - converged')
ax2.axhline(y=0.0, linestyle='-', color='gray', lw=1.0)
ax.set_xlabel('JD iteration')
ax.set_ylabel('Residual')
ax2.set_ylabel('Ritz value')
ax2.set_ylim(np.min(ritz_converged),np.max(ritz_converged))
lns = l1 + l2 + l3 + l4 + l5
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc='upper right')
if not savefig:
plt.show()
return
else:
fig.savefig(savename, dpi=dpi)
return
def plot_JD_history(filename, plot_type, savefig=True,
savename="JD_history.png", dpi=800):
plotter = JDHistoryPlotter(filename)
plotter.read_file()
plotter.plot(plot_type=plot_type, savefig=savefig,
savename=savename, dpi=dpi)
return
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('filename', type=str)
p.add_argument('--plot-type', type=str, default='all',
choices=['all', 'last'])
p.add_argument('--savefig', action='store_true')
p.add_argument('--savename', type=str, default='JD_history.png')
p.add_argument('--dpi', type=int, default=800)
args = p.parse_args()
plot_JD_history(args.filename, args.plot_type, savefig=args.savefig,
savename=args.savename, dpi=args.dpi)
|
def success(request_type, properties={}):
return {
"success": True,
"type": request_type,
**properties
}
def fail(request_type, reason, properties={}):
return {
"success": False,
"reason": reason,
"type": request_type,
**properties
}
|
from django.db import models
from django.contrib.auth.models import User
class Image(models.Model):
"""A model of a Image."""
caption = models.TextField()
media = models.ImageField(upload_to='uploads/%Y/%m/%d/')
user = models.ForeignKey(User, on_delete=models.CASCADE)
# if published_on is null, this is a draft
published_on = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.caption
|
from lxml import etree
elemento = etree.Element("Teste")
elemento.text = "Este é o texto da tag Teste"
print(elemento.tag)
print()
root = etree.Element("clientes")
sub = etree.SubElement(root,"cliente")
print(root.tag)
print(sub.tag)
print()
root = etree.Element("clientes")
sub = etree.Element("cliente")
root.append(sub)
print(root.tag)
print(sub.tag)
print()
clientes = etree.Element("clientes", atributo="valor")
clientes.set("codigo","1248")
cliente1 = etree.Element("cliente")
nome1 = etree.Element("nome")
nome1.text = "Gabriel"
idade1 = etree.Element("idade")
idade1.text = "32"
sexo1 = etree.Element("sexo")
sexo1.text = "Masculino"
cpf1 = etree.Element("cpf")
cpf1.text = "012.345.678-90"
cliente1.append(nome1)
cliente1.append(idade1)
cliente1.append(sexo1)
cliente1.append(cpf1)
clientes.append(cliente1)
cliente2 = etree.Element("cliente")
nome2 = etree.Element("nome")
nome2.text = "Juliana"
idade2 = etree.Element("idade")
idade2.text = "24"
sexo2 = etree.Element("sexo")
sexo2.text = "Feminino"
cpf2 = etree.Element("cpf")
cpf2.text = "123.456.789-10"
cliente2.append(nome2)
cliente2.append(idade2)
cliente2.append(sexo2)
cliente2.append(cpf2)
clientes.append(cliente2)
clientes.insert(0, etree.Element("cliente0"))
clientes.insert(0, etree.Element("cliente1"))
print(etree.tostring(clientes,pretty_print=True).decode('utf-8'))
print(len(clientes))
print(clientes.get("codigo"))
print(clientes.keys())
print(clientes.attrib)
print(clientes.attrib["atributo"])
print()
for atributo, valor in sorted(clientes.items()) :
print(f"{atributo} = {valor}")
print()
clientes[0] = clientes[1]
for cliente_var in clientes[0:2] :
print(cliente_var.tag)
print(clientes is clientes[1].getparent())
|
from flask import Flask
from database import register_db
from flask_bootstrap import Bootstrap
from flask_debug import Debug
from nav import nav
from bundle import apply_assets
app = Flask(__name__)
app.config.from_object('config.DevConfig')
register_db(app)
nav.init_app(app)
Bootstrap(app)
apply_assets(app)
Debug(app)
from api import api_app
from frontend import frontend_app
app.register_blueprint(api_app, url_prefix='/api')
app.register_blueprint(frontend_app, url_prefix='/app')
@app.route('/')
def hello_world():
return 'Hello, world!'
if __name__ == '__main__':
app.run()
|
"""Preset views."""
|
from week_7.data.io import load_tweets
from utils.utils import get_data_path
import os.path
import pickle
tweets = load_tweets()
# Merge sentence lists.
for tweet in tweets:
text_concat = list()
if tweet.text_cleaned is not None:
for sentence in tweet.text_cleaned:
text_concat.extend(sentence)
tweet.text_cleaned = text_concat
russia_tweets = [t for t in tweets if t.text_cleaned is not None and 'russia' in t.text_cleaned and len(t.text_cleaned) > 10]
china_tweets = [t for t in tweets if t.text_cleaned is not None and 'china' in t.text_cleaned and len(t.text_cleaned) > 10]
mexico_tweets = [t for t in tweets if t.text_cleaned is not None and 'mexico' in t.text_cleaned and len(t.text_cleaned) > 10]
country_tweets = russia_tweets + china_tweets + mexico_tweets
file_path = os.path.join(os.path.dirname(__file__), os.path.pardir, 'notebooks', 'week_8')
file_name = 'country_tweets.pkl'
with open(os.path.join(file_path, file_name), 'wb') as f:
pickle.dump(country_tweets, f)
|
import numpy as np
import matplotlib
matplotlib.use("QT5Agg")
import qcodes as qc
from qcodes.instrument.parameter import ArrayParameter, MultiParameter
from qcodes.tests.instrument_mocks import DummyInstrument
from qcodes.utils.wrappers import do1d, do2d, do1dDiagonal, init
from qcodes.plots.qcmatplotlib_viewer_widget import *
from qcodes.data.data_array import DataArray
# importing QCoDeS-QtUI module
# for this example file the package path is added temporarily to the python path
# for use in Spyder add qcqtui to your search path with the package manager
import sys
import os
sys.path.append(os.path.join('../..',''))
from qcqtui.widgets.xsection import CrossSectionWidget
from qcqtui.app import ApplicationWindow, getImageResourcePath
# The DAC voltage source
dac = DummyInstrument(name="dac", gates=['ch1', 'ch2'])
# The DMM reader
dmm = DummyInstrument(name="dmm", gates=['voltage', 'current', 'voltage2'])
import random
dmm.voltage.get = lambda: random.randint(0, 100)
dmm.voltage2.get = lambda: random.randint(0, 100)
dmm.current.get = lambda: 1e-3*np.random.randn()
dmm.current.unit = 'A'
station = qc.Station(dac, dmm)
init(mainfolder='PlotTesting',
sample_name='plottestsample',
station=station,
annotate_image=False,
display_pdf=False)
plot, data = do2d(dac.ch1, 0, 10e-7, 50, 0.00,
dac.ch2, 0, 10, 55, 0.00, dmm.voltage, dmm.voltage2, do_plots=False)
dac.close()
dmm.close()
# from data_set import new_data
# new_data(name="testdata")
# generate test data
y, x = np.meshgrid(np.linspace(-3, 3,55), np.linspace(-3,3, 50))
z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
c = 1
a = 0.2
b = 0.3
z2 = z + c + a*x +b*y
# set the test data to the sample dataset
data.dmm_voltage.ndarray = z
data.dmm_voltage2.ndarray = z2
# create App
from PyQt5 import QtWidgets
if not QtWidgets.QApplication.instance():
qApp = QtWidgets.QApplication(sys.argv)
else:
qApp = QtWidgets.QApplication.instance()
qApp.setStyle('fusion')
from PyQt5.QtGui import QIcon
qApp.setWindowIcon(QIcon('/home/domnik/qdev/qcodes/qcodes-qtui/data/qcodes.png'))
aw = ApplicationWindow(data, rotateCrossSection=True)
aw.show()
# sys.exit(qApp.exec_())
# qApp.exec_()
|
from keras import backend as K
from keras.models import load_model
from keras.optimizers import Adam
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.average_precision_evaluator import Evaluator
import os
# Set a few configuration parameters.
img_height = 300
img_width = 300
n_classes = 1
model_mode = 'inference'
classes = ['Eixo']
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, 3),
n_classes=n_classes,
mode=model_mode,
l2_regularization=0.0005,
scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
# TODO: Set the path of the trained weights.
weights_path = 'ssd300_pascal_07+12_epoch-29_loss-1.7706_val_loss-2.0828.h5'
model.load_weights(weights_path, by_name=True)
# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
dataset = DataGenerator()
images_dir = r'Data\Test_Images\Vott-csv-export'
# TODO: Set the paths to the dataset here.
labels_filename = 'Test_csv.csv'
dataset.parse_csv(images_dir=images_dir,
labels_filename=labels_filename,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
include_classes='all')
evaluator = Evaluator(model=model,
n_classes=n_classes,
data_generator=dataset,
model_mode=model_mode)
results = evaluator(img_height=img_height,
img_width=img_width,
batch_size=8,
data_generator_mode='resize',
round_confidences=False,
matching_iou_threshold=0.5,
border_pixels='include',
sorting_algorithm='quicksort',
average_precision_mode='sample',
num_recall_points=11,
ignore_neutral_boxes=True,
return_precisions=True,
return_recalls=True,
return_average_precisions=True,
verbose=True)
mean_average_precision, average_precisions, precisions, recalls = results
print("AP")
print(average_precisions)
print("mAP")
print(mean_average_precision)
plt.plot(recalls[1], precisions[1], color='blue', linewidth=1.0)
plt.xlabel('recall', fontsize=14)
plt.ylabel('precision', fontsize=14)
plt.grid(True)
plt.set_xticks(np.linspace(0,1,11))
plt.set_yticks(np.linspace(0,1,11))
plt.set_title("Eixo, AP: {:.3f}".format(average_precisions[1]), fontsize=16)
|
# from napari_clio_test import napari_experimental_provide_dock_widget
# add your tests here...
|
#!/usr/bin/env python3
#
# Change json output to be human-readable
import argparse
import csv
import json
import re
import os
import sys
csv.field_size_limit(sys.maxsize)
def int_of_label(s):
total = 0
for c in s:
total += ord(c) - ord('a') + 1
return total
def label_of_int(i):
s = ""
maxint = ord('z') - ord('a') + 1
while i > maxint:
s += 'z'
i -= maxint
s += chr(i + ord('a') - 1)
return s
def convert_dict(d):
# for addresses, options, records, etc, just dereference
if "type" in d and d["type"] in ["address", "option_or_ind", "record", "tuple", "Collection", "Map", "Seq", "Set", "VMap", "MapE", "SortedMapE", "SortedMap", "SortedSet", "Vector", "MultiIndexVMap"]:
return convert_any(d["value"])
# change record mapping back to tuple
elif "ra" in d:
res = []
max = 0
for key in d:
k = int_of_label(key[1:])
if k > max:
max = k
for i in range(max): # +1 for record fields
res.append(convert_any(d["r" + label_of_int(i+1)]))
return res
elif "key" in d and "value" in d:
res = []
if d["key"] != "()":
res.append(convert_any(d["key"]))
res.append(convert_any(d["value"]))
return res
elif "elem" in d:
res = []
res.append(convert_any(d["elem"]))
return res
elif "addr" in d:
res = []
res.append(convert_any(d["addr"]))
return res
# standard record
else:
res = {}
for key in d:
res[key] = convert_any(d[key])
return res
def convert_list(xs):
res = []
for x in xs:
res.append(convert_any(x))
return res
def convert_any(x):
if type(x) is dict:
return convert_dict(x)
elif type(x) is list:
return convert_list(x)
else:
return x
# Special conversion for first level of object
# So we have all array values
def convert_fst_level(x):
if type(x) is dict:
return convert_dict(x)
elif type(x) is list:
return convert_list(x)
elif x == "()":
return []
else:
return [x]
def convert_file(file_nm, writer, treat_as_ints):
with open(file_nm, 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter='|', quotechar="'")
i = 0
for row in reader:
res = []
j = 0
for x in row:
#print(x)
try:
obj = json.loads(x)
# print("obj: " + str(obj))
# keep certain fields as raw integers rather than wrapping in array
if j in treat_as_ints:
new_obj = convert_any(obj)
else:
new_obj = convert_fst_level(obj)
#print(new_obj)
res += [json.dumps(new_obj)]
except:
# if we can't convert, just add it as a string
res += [x]
j += 1
writer.writerow(res)
i += 1
def process_files(files, prefix_path):
with open(os.path.join(prefix_path, "messages.dsv"), "w", newline='') as msg_file:
with open(os.path.join(prefix_path, "globals.dsv"), "w", newline='') as glb_file:
msg_writer = csv.writer(msg_file, delimiter='|', quotechar="'")
glb_writer = csv.writer(glb_file, delimiter='|', quotechar="'")
for f in files:
# is it a messages or a globals?
if re.search("Globals", f):
convert_file(f, glb_writer, [0])
else:
convert_file(f, msg_writer, [0, 5])
# Translate all files in their respective directories
def inplace_process(prefix):
for root, dirs, files in os.walk(prefix):
for f in files:
name, ext = os.path.splitext(f)
new_name = name + "_clean" + ext
tests = [("Globals$", [0]), ("Messages$", [0, 5])]
for regex, indices in tests:
if re.search(regex, name):
with open(os.path.join(root, new_name), "w", newline='') as out_file:
writer = csv.writer(out_file, delimiter='|', quotechar="'")
convert_file(os.path.join(root, f), writer, indices)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("json_files", type=str, nargs='*', help="Specify path")
parser.add_argument("--prefix-path", type=str, help="Specify prefix to output", default="")
parser.add_argument("--inplace", type=str, help="Modify files in-place", default="")
args = parser.parse_args()
if args.inplace != "":
inplace_process(args.inplace)
else:
process_files(args.json_files, args.prefix_path)
if __name__=='__main__':
main ()
|
n = int(input().strip())
c = [int(x) for x in input().strip().split(' ')]
count_ = []
socks = list(set(c))
for j in socks:
count_.append(c.count(j))
ans = 0
for k in count_:
ans += k//2
print(ans)
|
# -*- coding: utf-8 -*-
""" vsphere datastore expoter for prometheus init file """
from __future__ import absolute_import, unicode_literals, print_function
import os
import traceback
import logging
import logging.config
from flask import Flask
from vsphere_ds_exporter.views import metrics
from vsphere_ds_exporter.test import test
app = Flask(__name__)
app.register_blueprint(metrics)
app.register_blueprint(test, url_prefix='/test')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_DIR = os.path.join(BASE_DIR, 'logs')
logging_setting = {
# logging configuration
"version": 1,
"formatters": {
"simple": {
"format": "[%(asctime)s] %(levelname)s: %(name)s: %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"exporter": {
"class": "logging.handlers.TimedRotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": os.path.join(LOG_DIR, 'vsphere_ds_exporter.log'),
"when": "midnight",
"backupCount": 30
},
},
"loggers": {
"console": {
"level": "DEBUG",
"handlers": [
"console"
],
"propagate": False
},
"ds_exporter": {
"level": "DEBUG",
"handlers": [
"exporter",
],
"propagate": False
}
},
"root": {
"level": "DEBUG",
"handlers": [
"console",
"exporter"
],
}
}
logging.config.dictConfig(logging_setting)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.