content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
#
# This script generates the compile expansions file used by MCI as part of the push/
# release process.
#
# You can invoke it either with git describe:
# $ git describe | python generate_compile_expansions.py > compile_expansions.yml
# or with the version.json file
# $ python generate_compile_expansions.py version.json > compile_expansions.yml
#
import fileinput
import json
import re
import os
import sys
# This function matches a version string and captures the "extra" part
# If the version is a release like "2.3.4" or "2.3.4-rc0", this will return
# ( None )
# If the version is a pre-release like "2.3.4-325-githash" or "2.3.4-pre-", this will return
# ( "-pre-" ) or ( "-325-githash" )
# If the version begins with the letter 'r', it will also match, e.g.
# r2.3.4, r2.3.4-rc0, r2.3.4-git234, r2.3.4-rc0-234-githash
# If the version is invalid (i.e. doesn't start with "2.3.4" or "2.3.4-rc0", this will return
# False
def match_verstr(verstr):
res = re.match(r'^r?(?:\d+\.\d+\.\d+(?:-rc\d+)?)(-.*)?', verstr)
if not res:
return False
return res.groups()
input_obj = fileinput.input()
version_line = input_obj.readline()
version_parts = match_verstr(version_line)
if not version_parts:
if input_obj.filename().endswith(".json"):
version_data_buf = "".join([ version_line ] + [ l for l in input_obj ])
try:
version_data = json.loads(version_data_buf)
except Exception as e:
print "Unable to load json file: %s" % e
exit(1)
version_parts = match_verstr(version_data['version'])
version_line = version_data['version']
else:
version_line = version_line.lstrip("r").rstrip()
# If the version still doesn't match, give up and print an error message!
if not version_parts:
print "Unable to parse version data!"
exit(1)
if version_parts[0]:
print "suffix: latest"
print "src_suffix: latest"
else:
print "suffix: {0}".format(version_line)
print "src_suffix: r{0}".format(version_line)
print "version: {0}".format(version_line)
# configuration for scons cache.
#
if sys.platform.startswith("win"):
system_id_path = r"c:\mongodb-build-system-id"
default_cache_path_base = r"z:\data\scons-cache"
else:
system_id_path = "/etc/mongodb-build-system-id"
default_cache_path_base = "/data/scons-cache"
if os.path.isfile(system_id_path):
with open(system_id_path, "r") as f:
default_cache_path = os.path.join(default_cache_path_base, f.readline().strip())
print "scons_cache_path: {0}".format(default_cache_path)
if os.getenv("USE_SCONS_CACHE") not in (None, False, "false", ""):
print "scons_cache_args: --cache=nolinked --cache-dir='{0}'".format(default_cache_path)
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def process_test_df():
return pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
@pytest.fixture
def test_returns_dataframe():
return pd.DataFrame(
{"text": ["a1a2", "b1", "c1"], "numbers": [1, 2, 3]},
index=["A", "B", "C"],
)
def test_column_name_type(process_test_df):
"""Raise TypeError if `column_name` type is not `str`."""
with pytest.raises(TypeError):
process_test_df.process_text(["text"])
def test_new_column_names_type(process_test_df):
"""Raise TypeError if `new_column_names` type is not string or list."""
with pytest.raises(TypeError):
process_test_df.process_text(
column_name="text", new_column_names={"nutext": "rar"}
)
def test_column_name_presence(process_test_df):
"""Raise ValueError if `column_name` is not in dataframe."""
with pytest.raises(ValueError):
process_test_df.process_text(column_name="Test")
def test_new_column_names_presence_str(test_returns_dataframe):
"""
Raise ValueError if `new_column_names` is a str
and is in the dataframe.
"""
with pytest.raises(ValueError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names="text",
string_function="extractall",
pat=r"([ab])?(\d)",
)
def test_new_column_names_presence_list(test_returns_dataframe):
"""
Raise ValueError if `new_column_names` is a list and at least
one of the new names is in the dataframe.
"""
with pytest.raises(ValueError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names=["numbers", "newtext"],
string_function="extractall",
pat=r"([ab])?(\d)",
)
def test_merge_frame_type(test_returns_dataframe):
"""
Raise TypeError if `merge_frame` type is not bool."""
with pytest.raises(TypeError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names=["number", "newtext"],
string_function="extractall",
pat=r"([ab])?(\d)",
merge_frame="True",
)
def test_string_function_is_None(process_test_df):
"""Test that dataframe is returned if string_function is None."""
result = process_test_df.process_text(column_name="text")
assert_frame_equal(result, process_test_df)
def test_str_split(process_test_df):
"""Test wrapper for Pandas ``str.split()`` method."""
expected = process_test_df.assign(
text=process_test_df["text"].str.split("_")
)
result = process_test_df.process_text(
column_name="text", string_function="split", pat="_"
)
assert_frame_equal(result, expected)
def test_new_column_names(process_test_df):
"""
Test that a new column name is created when
`new_column_name` is not None.
"""
result = process_test_df.process_text(
column_name="text",
new_column_names="new_text",
string_function="slice",
start=2,
)
expected = process_test_df.assign(
new_text=process_test_df["text"].str.slice(start=2)
)
assert_frame_equal(result, expected)
@pytest.fixture
def no_nulls_df():
return pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)})
def test_str_cat(no_nulls_df):
"""Test outcome for Pandas ``.str.cat()`` method."""
result = no_nulls_df.process_text(
column_name="text",
string_function="cat",
others=["A", "B", "C", "D"],
)
expected = no_nulls_df.assign(
text=no_nulls_df["text"].str.cat(others=["A", "B", "C", "D"])
)
assert_frame_equal(result, expected)
def test_str_cat_result_is_a_string(no_nulls_df):
"""
Test wrapper for Pandas ``.str.cat()`` method
when the outcome is a string.
"""
result = no_nulls_df.process_text(
column_name="text",
string_function="cat",
)
expected = no_nulls_df.assign(text=no_nulls_df["text"].str.cat())
assert_frame_equal(result, expected)
def test_str_cat_result_is_a_string_and_new_column_names(no_nulls_df):
"""
Test wrapper for Pandas ``.str.cat()`` method when the outcome is a string,
and `new_column_names` is not None.
"""
result = no_nulls_df.process_text(
column_name="text", string_function="cat", new_column_names="combined"
)
expected = no_nulls_df.assign(combined=no_nulls_df["text"].str.cat())
assert_frame_equal(result, expected)
def test_str_get():
"""Test outcome for Pandas ``.str.get()`` method."""
df = pd.DataFrame(
{"text": ["aA", "bB", "cC", "dD"], "numbers": range(1, 5)}
)
expected = df.assign(text=df["text"].str.get(1))
result = df.process_text(column_name="text", string_function="get", i=-1)
assert_frame_equal(result, expected)
def test_str_lower():
"""Test string conversion to lowercase using ``.str.lower()``."""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = df.assign(names=df["names"].str.lower())
result = df.process_text(column_name="names", string_function="lower")
assert_frame_equal(result, expected)
def test_str_wrong(process_test_df):
"""Test that an invalid Pandas string method raises an exception."""
with pytest.raises(KeyError):
process_test_df.process_text(
column_name="text", string_function="invalid_function"
)
def test_str_wrong_parameters(process_test_df):
"""Test that invalid argument for Pandas string method raises an error."""
with pytest.raises(TypeError):
process_test_df.process_text(
column_name="text", string_function="split", pattern="_"
)
@pytest.fixture
def returns_frame_1():
return pd.DataFrame(
{
"ticker": [
"spx 5/25/2001 p500",
"spx 5/25/2001 p600",
"spx 5/25/2001 p700",
]
}
)
def test_return_dataframe_merge_is_None(returns_frame_1):
"""
Test that the dataframe returned when `merge_frame` is None
is the result of the text processing, and is not merged to
the original dataframe.
"""
expected_output = returns_frame_1["ticker"].str.split(" ", expand=True)
result = returns_frame_1.process_text(
column_name="ticker", string_function="split", expand=True, pat=" "
)
assert_frame_equal(result, expected_output)
def test_return_dataframe_merge_is_not_None(returns_frame_1):
"""
Test that the dataframe returned when `merge_frame` is not None
is a merger of the original dataframe, and the dataframe
generated from the text processing.
"""
expected_output = pd.concat(
[
returns_frame_1,
returns_frame_1["ticker"]
.str.split(" ", expand=True)
.add_prefix("new_"),
],
axis="columns",
)
result = returns_frame_1.process_text(
column_name="ticker",
new_column_names="new_",
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
assert_frame_equal(result, expected_output)
def test_return_dataframe_merge_is_not_None_new_column_names_is_a_list(
returns_frame_1,
):
"""
Test that the dataframe returned when `merge_frame` is not None
is a merger of the original dataframe, and the dataframe
generated from the text processing. Also, the `new_column_names`
is a list.
"""
expected_output = pd.concat(
[
returns_frame_1,
returns_frame_1["ticker"]
.str.split(" ", expand=True)
.set_axis(["header1", "header2", "header3"], axis="columns"),
],
axis="columns",
)
result = returns_frame_1.process_text(
column_name="ticker",
new_column_names=["header1", "header2", "header3"],
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
assert_frame_equal(result, expected_output)
def test_return_dataframe_new_column_names_is_a_list_len_unequal(
returns_frame_1,
):
"""
Raise error if text processing returns a dataframe,
`new_column_names` is not None, and the length of
`new_column_names` is not equal to the length of the
new dataframe's columns.
"""
with pytest.raises(ValueError):
returns_frame_1.process_text(
column_name="ticker",
new_column_names=["header1", "header2"],
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
def test_output_extractall(test_returns_dataframe):
"""
Test output when `string_function` is "extractall"
and `merge_frame` is None.
"""
expected_output = test_returns_dataframe["text"].str.extractall(
r"(?P<letter>[ab])?(?P<digit>\d)"
)
result = test_returns_dataframe.process_text(
column_name="text",
string_function="extractall",
pat=r"(?P<letter>[ab])?(?P<digit>\d)",
)
assert_frame_equal(result, expected_output)
def test_output_extractall_merge_frame_is_not_None(test_returns_dataframe):
"""
Test output when `string_function` is "extractall"
and `merge_frame` is not None.
"""
expected_output = test_returns_dataframe["text"].str.extractall(
r"(?P<letter>[ab])?(?P<digit>\d)"
)
expected_output = test_returns_dataframe.join(
expected_output.reset_index("match"), how="outer"
).set_index("match", append=True)
result = test_returns_dataframe.process_text(
column_name="text",
merge_frame=True,
string_function="extractall",
pat=r"(?P<letter>[ab])?(?P<digit>\d)",
)
assert_frame_equal(result, expected_output)
|
import sys
def re_complete(path, path_re, embedding_size=128, value=0.0001):
pre_line = 0
new_line = [value for r in range(embedding_size)]
line_dic = {}
with open(path) as f,\
open(path_re, "w") as fw:
for line in f:
line = line.strip()
if embedding_size == 128:
idx, _ = line.split(" ", 1)
else:
idx, _ = line.split("\t", 1)
if idx == "</s>":
print(line, file=fw)
continue
idx = int(idx)
line_dic[idx] = line
line_list = sorted(line_dic.items(), key=lambda x:x[0])
for idx, line in line_list:
for x in range(pre_line+1, idx):
if embedding_size == 128:
print(" ".join([str(s) for s in ([x]+new_line)]), file=fw)
else:
print("\t".join([str(s) for s in ([x]+new_line)]), file=fw)
print(line, file=fw)
pre_line = idx
if __name__ == "__main__":
re_complete("pre_data/Reddit_st_vectors", "pre_data/Reddit_st_vectors_re")
re_complete("pre_data/Reddit_pw_vectors", "pre_data/Reddit_pw_vectors_re")
re_complete("pre_data/Reddit_st_tf_df", "pre_data/Reddit_st_tf_df_re", 2, 1)
re_complete("pre_data/Reddit_pw_tf_df", "pre_data/Reddit_pw_tf_df_re", 2, 1)
|
import warnings
class NoMatplotlib:
def plot(self, *args, **kwargs):
raise NotImplementedError(
"Matplotlib was not loaded successfully, plotting not supported."
)
try:
import matplotlib
import matplotlib.pyplot as plt
except Exception as e:
warnings.warn(str(e))
plt = NoMatplotlib()
class Action:
def __init__(self, **kwargs):
self.kwargs = kwargs
@property
def action(self):
return self.__class__.__name__
class mmap(Action):
def page_ratio(self):
# TODO: Use projection
south = self.kwargs.get("subpage_lower_left_latitude", -90.0)
west = self.kwargs.get("subpage_lower_left_longitude", -180)
north = self.kwargs.get("subpage_upper_right_latitude", 90.0)
east = self.kwargs.get("subpage_upper_right_longitude", 180.0)
return (north - south) / (east - west)
def plot(*args, **kwargs):
return plt.plot(*args, **kwargs)
|
from collections import namedtuple
import numpy as np
import tensorflow as tf
#from util_conv import *
from CVAE_general import CVAE
class CVAE_type1(CVAE):
def _create_input_nodes(self, input_nodes):
if input_nodes is not None: # pair of (full, partial) data expected
self.use_placeholders = False
self.inputs, self.targets, self.zadd = input_nodes[1], input_nodes[0], None
else:
self.use_placeholders = True
self.inputs, self.targets, self.zadd = self._create_placeholders()
# boolean value indicating whether it is for training or testing
self.is_training = tf.placeholder(tf.bool, name='is_training') # for batch_norm
def _check_hps(self, encoder, decoder): # collection of assertions, called in hps_processing()
assert encoder.hps.latent_size == decoder.hps.latent_size
########################
# Functions for training
########################
def _get_feed_dict(self, targets, data):
feed_dict = {self.is_training:True}
#data -> encoder
if targets is not None:
feed_dict[self.targets] = targets
if data is not None:
feed_dict[self.inputs] = data
return feed_dict
########################
# Functions for testing
########################
# Return posterior mean, log(var.), and reconstruction result
def reconstruct(self, sess, X, z = None, get_add_info=False):
if z is None:
feed_dict={self.inputs: X, self.is_training:False}
else:
feed_dict={self.z: z, self.is_training: False}
output_nodes = [self.z_mean, self.z_log_sigma_sq, self.z, self.recon_result]
if get_add_info is True and self.additional_test_node is not None:
if type(additional_test_node) is not list:
output_nodes += [self.additional_test_node]
else:
output_nodes += self.additional_test_node
return sess.run(output_nodes, feed_dict=feed_dict)
# Return posterior mean, log(var.), and the reconstruction at the posterior mean
def reconstruct_best(self, sess, X):
z_mean, z_log_var = self.encode(sess, X)
feed_dict={self.z: z_mean, self.is_training:False}
reconstructed = sess.run(self.recon_result, feed_dict=feed_dict)
return z_mean, z_log_var, reconstructed
# Return the Reconstruction with Y (truth) to check it can actually generate Y
# In this model, Y (truth) can be be utilised to generate reconstructions
def reconstruct_with_full_data(self, sess, Y, X):
return self.reconstruct_best(sess, X)
def encode(self, sess, X):
feed_dict={self.inputs: X, self.is_training:False}
return sess.run([self.z_mean, self.z_log_sigma_sq], feed_dict=feed_dict)
def decode(self, sess, z, zadd = None):
feed_dict={self.z: z, self.is_training:False}
return sess.run(self.recon_result, feed_dict=feed_dict)
def generate_z(self, sess, X):
feed_dict={self.inputs: X, self.is_training:False}
return sess.run([self.z, self.z_mean, self.z_log_sigma_sq], feed_dict=feed_dict)
########################
# Other functions
########################
def create_name(self):
name = "CVAE_type1_{}_latent{}".format(self.shape_in[0], self.latent_size)
self.name = name + self.add_name
|
import datetime
import os
from scripts.ilapfuncs import timeline, open_sqlite_db_readonly
from scripts.plugin_base import ArtefactPlugin
from scripts.ilapfuncs import logfunc, tsv
from scripts import artifact_report
class SkypeCallLogsPlugin(ArtefactPlugin):
"""
"""
def __init__(self):
super().__init__()
self.author = 'Unknown'
self.author_email = ''
self.author_url = ''
self.category = 'Skype'
self.name = 'Call Logs'
self.description = ''
self.artefact_reference = '' # Description on what the artefact is.
self.path_filters = ['**/com.skype.raider/databases/live*'] # Collection of regex search filters to locate an artefact.
self.icon = 'phone' # feathricon for report.
def _processor(self) -> bool:
source_file = ''
for file_found in self.files_found:
file_name = str(file_found)
if (('live' in file_name.lower()) and ('db-journal' not in file_name.lower())):
skype_db = str(file_found)
# File name has a format of live: which does not write out to a file system correctly
# so this will fix it to the original name from what is actually written out.
(head, tail) = os.path.split(file_found.replace(self.seeker.directory, ''))
source_file = os.path.join(head, "live:" + tail[5:])
else:
continue
db = open_sqlite_db_readonly(skype_db)
cursor = db.cursor()
try:
cursor.execute('''
SELECT
contact_book_w_groups.conversation_id,
contact_book_w_groups.participant_ids,
messages.time/1000 as start_date,
messages.time/1000 + messages.duration as end_date,
case messages.is_sender_me when 0 then "Incoming" else "Outgoing"
end is_sender_me,
messages.person_id AS sender_id
FROM (SELECT conversation_id,
Group_concat(person_id) AS participant_ids
FROM particiapnt
GROUP BY conversation_id
UNION
SELECT entry_id AS conversation_id,
NULL
FROM person) AS contact_book_w_groups
join chatitem AS messages
ON messages.conversation_link = contact_book_w_groups.conversation_id
WHERE message_type == 3
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
except:
usageentries = 0
if usageentries > 0:
data_headers = ('Start Time', 'End Time', 'From ID', 'To ID', 'Call Direction')
data_list = []
for row in all_rows:
to_id = None
if row[4] == "Outgoing":
if ',' in row[1]:
to_id = row[1]
else:
to_id = row[0]
starttime = datetime.datetime.fromtimestamp(int(row[2])).strftime('%Y-%m-%d %H:%M:%S')
endtime = datetime.datetime.fromtimestamp(int(row[3])).strftime('%Y-%m-%d %H:%M:%S')
data_list.append((starttime, endtime, row[5], to_id, row[4]))
artifact_report.GenerateHtmlReport(self, file_found, data_headers, data_list)
tsv(self.report_folder, data_headers, data_list, self.full_name(), source_file)
timeline(self.report_folder, self.full_name(), data_list, data_headers)
else:
logfunc('No Skype Call Log available')
db.close()
return True
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .at_loss import ATLoss
from .model_docred import LukeModelDoc, LukeEntityAwareAttentionModelDoc
from .process_long_seq import process_long_input
class LukeForDocRED(LukeEntityAwareAttentionModelDoc):
def __init__(self, args, num_labels):
super(LukeForDocRED, self).__init__(args.model_config)
self.args = args
self.block_size = 64
self.num_labels = num_labels
self.dropout = nn.Dropout(args.model_config.hidden_dropout_prob)
if self.args.classifier == 'linear':
self.classifier = nn.Linear(args.model_config.hidden_size * 2, num_labels, False)
elif self.args.classifier == 'bilinear':
self.classifier = nn.Linear(args.model_config.hidden_size * self.block_size, num_labels)
if self.args.lop:
self.head_extractor = nn.Linear(args.model_config.hidden_size * 2, args.model_config.hidden_size)
self.tail_extractor = nn.Linear(args.model_config.hidden_size * 2, args.model_config.hidden_size)
elif not self.args.lop:
self.head_extractor = nn.Linear(args.model_config.hidden_size, args.model_config.hidden_size)
self.tail_extractor = nn.Linear(args.model_config.hidden_size, args.model_config.hidden_size)
self.apply(self.init_weights)
self.at_loss = ATLoss()
def get_labels(self, logits, k):
threshold_logit = logits[:, 0].unsqueeze(1)
output = torch.zeros_like(logits).to(logits)
logit_mask = (logits > threshold_logit)
if k > 0:
top_k, _ = torch.topk(input=logits, k=k, dim=1)
top_k = top_k[:, -1]
logit_mask = (logits >= top_k.unsqueeze(1)) & logit_mask
output[logit_mask] = 1.0
output[:, 0] = (output.sum(1) == 0.0).to(logits)
return output
def get_head_tail_representations(self, sequence_output, attention, head_tail_idxs, entity_position_ids):
"""
Get representations for (head, tail) pairs. You should end up with (batch_size * total_num_head_tail_pairs) samples.
"""
all_head_representations = []
all_tail_representations = []
all_local_attentions = []
for batch_idx, _ in enumerate(head_tail_idxs):
head_representations = []
tail_representations = []
local_attentions = []
encoded_text = sequence_output[batch_idx]
attention_output = attention[batch_idx]
head_tail_pairs = head_tail_idxs[batch_idx]
entities = entity_position_ids[batch_idx]
for pair in head_tail_pairs:
head_embeddings = []
tail_embeddings = []
head_attentions = []
tail_attentions = []
head_idx = pair[0]
tail_idx = pair[1]
head_entity_positions = entities[head_idx]
tail_entity_positions = entities[tail_idx]
for head_entity_position in head_entity_positions:
valid_position = [idx for idx in head_entity_position if idx != -1]
head_embeddings_ = encoded_text[valid_position]
head_attentions_ = attention_output[:, valid_position]
# if torch.any(torch.isnan(head_attentions_)):
# import pdb; pdb.set_trace()
head_embeddings.append(torch.sum(head_embeddings_, dim=0, keepdim=True))
head_attentions.append(head_attentions_)
for tail_entity_position in tail_entity_positions:
valid_position = [idx for idx in tail_entity_position if idx != -1]
tail_embeddings_ = encoded_text[valid_position]
tail_attentions_ = attention_output[:, valid_position]
# if torch.any(torch.isnan(tail_attentions_)):
# import pdb; pdb.set_trace()
tail_embeddings.append(torch.sum(tail_embeddings_, dim=0, keepdim=True))
tail_attentions.append(tail_attentions_)
head_embeddings = torch.cat(head_embeddings, dim=0)
tail_embeddings = torch.cat(tail_embeddings, dim=0)
head_entity_embedding = torch.sum(head_embeddings, dim=0, keepdim=True)
tail_entity_embedding = torch.sum(tail_embeddings, dim=0, keepdim=True)
head_attentions = torch.cat(head_attentions, dim=1).mean(1)
tail_attentions = torch.cat(tail_attentions, dim=1).mean(1)
head_tail_attentions = (head_attentions * tail_attentions).mean(0, keepdim=True)
head_tail_attentions = head_tail_attentions / (head_tail_attentions.sum(1, keepdim=True) + 1e-10)
local_attention = torch.matmul(head_tail_attentions, encoded_text)
head_representations.append(head_entity_embedding)
tail_representations.append(tail_entity_embedding)
local_attentions.append(local_attention)
all_head_representations.append(head_representations)
all_tail_representations.append(tail_representations)
all_local_attentions.append(local_attentions)
return all_head_representations, all_tail_representations, all_local_attentions
def forward(
self,
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
label=None,
head_tail_idxs=None
):
word_ids = word_ids.to(self.args.device)
word_segment_ids = word_segment_ids.to(self.args.device)
word_attention_mask = word_attention_mask.to(self.args.device)
entity_segment_ids = entity_segment_ids.to(self.args.device)
entity_attention_mask = entity_attention_mask.to(self.args.device)
encoder_outputs = super(LukeForDocRED, self).forward(
word_ids=word_ids,
word_segment_ids=word_segment_ids,
word_attention_mask=word_attention_mask,
entity_ids=entity_ids,
entity_position_ids=entity_position_ids,
entity_segment_ids=entity_segment_ids,
entity_attention_mask=entity_attention_mask,
token_type_ids=torch.tensor([[0, 0]]),
head_tail_idxs=head_tail_idxs
)
sequence_output = encoder_outputs[0]
attention_output = encoder_outputs[-1]
heads, tails, attentions = self.get_head_tail_representations(sequence_output, attention_output, head_tail_idxs, entity_position_ids)
all_heads = torch.cat(sum(heads, []), dim=0) # [batch_size * num_head_tails, hidden_dim]
all_tails = torch.cat(sum(tails, []), dim=0) # [batch_size * num_head_tails, hidden_dim]
all_attentions = torch.cat(sum(attentions, []), dim=0) # [batch_size * num_head_tails, hidden_dim]
# if torch.any(torch.isnan(all_attentions)):
# import pdb; pdb.set_trace()
if self.args.classifier == 'linear':
feature_vector = self.dropout(torch.cat([all_heads, all_tails], dim=1))
elif self.args.classifier == 'bilinear':
if self.args.lop:
all_heads = torch.cat([all_heads, all_attentions], dim=1)
all_tails = torch.cat([all_tails, all_attentions], dim=1)
z_s = torch.tanh(self.head_extractor(all_heads)) # [batch_size * num_head_tails, hidden_dim]
z_o = torch.tanh(self.tail_extractor(all_tails)) # [batch_size * num_head_tails, hidden_dim]
b1 = z_s.view(-1, self.args.model_config.hidden_size // self.block_size, self.block_size) # [batch_size * num_head_tails, hidden_dim / block_size, block_size]
b2 = z_o.view(-1, self.args.model_config.hidden_size // self.block_size, self.block_size) # [batch_size * num_head_tails, hidden_dim / block_size, block_size]
bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.args.model_config.hidden_size * self.block_size) # [batch_size * num_head_tails, hidden_dim * block_size]
feature_vector = bl
logits = self.classifier(feature_vector)
outputs = (self.get_labels(logits=logits, k=self.args.top_k),)
if label:
labels = torch.tensor(sum(label, [])).to(self.args.device)
if self.args.at_loss:
one_hot_labels = torch.zeros(size=(labels.shape[0], self.num_labels)).to(self.args.device)
for idx, label in enumerate(labels):
label_value = label.cpu().item()
one_hot_labels[idx][label_value] = 1
loss = self.at_loss(logits, one_hot_labels)
else:
loss = F.cross_entropy(logits, labels)
outputs = (loss,) + outputs
return outputs
|
"""
9 / 9 test cases passed.
Status: Accepted
Runtime: 652 ms
Memory Usage: 37.4 MB
"""
class WordFilter:
def _posibility(self, word, idx):
h = len(word)
for i in range(h):
prefix = word[:i+1]
for j in range(h):
suffix = word[h-j-1:]
self.cache[f'{prefix}#{suffix}'] = idx
def __init__(self, words: List[str]):
self.cache = {}
for idx, word in enumerate(words):
self._posibility(word, idx)
def f(self, prefix: str, suffix: str) -> int:
return self.cache.get(f'{prefix}#{suffix}', -1)
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(prefix,suffix)
|
from typing import Set
from dataclasses import dataclass
from datetime import datetime
@dataclass
class Nav:
"""Class for store the NAV value with references"""
value: float
updated: datetime
tags: Set[str]
fund: str
|
soma = 0
for c in range(1, 500):
if(c % 3 == 0):
if(c % 2 > 0):
soma += c
print(soma)
print('FIM')
|
from app.writers import prottable as writers
from app.drivers.base import BaseDriver
class PepProttableDriver(BaseDriver):
"""Base class for prottable.py"""
def __init__(self):
super().__init__()
self.oldheader = False
self.probability = False
self.poolnames = False
self.group_by_field = False
def run(self):
self.initialize_input()
self.create_header()
self.set_feature_generator()
self.write()
self.finish()
def write(self):
outfn = self.create_outfilepath(self.fn, self.outsuffix)
writers.write_prottable(self.header, self.features, outfn)
|
from typing import Tuple
import torchvision.transforms as transforms
import numpy as np
from torchvision.transforms.transforms import Compose
def get_train_transform() -> transforms.Compose:
"""Data augmentation pipeline for training consists in:
- cast to a PIL object (better handling by torchvision)
- random horizontal flipping (50% chance)
- random rotation in the range of [0, pi/5]
- cast back to tensor object
:return: Data augmentation pipeline for training
:rtype: transforms.Compose
"""
train_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(0.2*np.pi),
# transforms.RandomPerspective(distortion_scale=0.2),
transforms.ToTensor(),
# transforms.Normalize((0.5,),(0.5,))
])
return train_transform
def get_test_transform() -> transforms.Compose:
"""Transform consists in same steps of train transform without augmentation
:return: Data transform pipeline for evaluation
:rtype: transforms.Compose
"""
test_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
# transforms.Normalize((0.5,),(0.5,))
])
return test_transform
def get_augmentation_transforms() -> Tuple[Compose, Compose]:
"""Get transforms for training and evaluation
:return: Train and test transforms
:rtype: Tuple[Compose, Compose]
"""
train_transform = get_train_transform()
test_transform = get_test_transform()
return train_transform, test_transform
|
#fetchallrecs.py
#example 9.14
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from myclasses import Product,base, Customers
engine = create_engine('sqlite:///mydb.sqlite', echo=True)
base.metadata.create_all(engine)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
sessionobj = Session()
q=sessionobj.query(Products)
rows=q.all()
for row in rows:
print (row)
|
# encoding: utf8
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import date as dtdate, timedelta
from decimal import Decimal
import itertools
import json
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
BaseUserManager)
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.core.validators import (MaxValueValidator, MinValueValidator,
RegexValidator)
from django.dispatch import receiver
from django.db import models
from django.db.models import signals, Sum
from django.template.defaultfilters import date
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
import floppyforms.__future__ as forms
import pytz
from brambling.payment.core import TEST, LIVE
from brambling.payment.stripe.api import stripe_refund
from brambling.payment.stripe.core import (
stripe_test_settings_valid,
stripe_live_settings_valid,
)
DEFAULT_DANCE_STYLES = (
"Alt Blues",
"Trad Blues",
"Fusion",
"Swing",
"Balboa",
"Contra",
"West Coast Swing",
"Argentine Tango",
"Ballroom",
"Folk",
"Contact Improv",
)
DEFAULT_ENVIRONMENTAL_FACTORS = (
"Dogs",
"Cats",
"Birds",
"Bees",
"Peanuts",
"Children",
"Tobacco smoke",
"Other smoke",
"Alcohol",
"Recreational drugs",
)
DEFAULT_HOUSING_CATEGORIES = (
"Quiet",
"Noisy",
"All-dancer",
"Party",
"Substance-free",
"Early bird",
"Night owl",
"Co-op",
"Apartment",
"House",
)
UNAMBIGUOUS_CHARS = 'abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'
class AbstractNamedModel(models.Model):
"A base model for any model which needs a human name."
NAME_ORDER_CHOICES = (
('FML', "First Middle Last"),
('LFM', "Last First Middle"),
('FL', "First Last"),
('LF', "Last First"),
)
NAME_ORDER_PATTERNS = {
'FML': "{first} {middle} {last}",
'LFM': "{last} {first} {middle}",
'FL': "{first} {last}",
'LF': "{last} {first}",
}
first_name = models.CharField(max_length=50)
middle_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50)
name_order = models.CharField(max_length=3, choices=NAME_ORDER_CHOICES, default="FML")
def get_full_name(self):
name_dict = {
'first': self.first_name,
'middle': self.middle_name,
'last': self.last_name,
}
name_order = self.name_order
if not self.middle_name:
if name_order == 'FML':
name_order = 'FL'
elif name_order == 'LFM':
name_order = 'LF'
return self.NAME_ORDER_PATTERNS[name_order].format(**name_dict)
get_full_name.short_description = 'Name'
def get_short_name(self):
return self.first_name
class Meta:
abstract = True
class DanceStyle(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = ('name',)
def __unicode__(self):
return smart_text(self.name)
class EnvironmentalFactor(models.Model):
name = models.CharField(max_length=30, unique=True)
class Meta:
ordering = ('name',)
def __unicode__(self):
return smart_text(self.name)
class HousingCategory(models.Model):
name = models.CharField(max_length=20, unique=True)
class Meta:
ordering = ('name',)
verbose_name_plural = 'housing categories'
def __unicode__(self):
return smart_text(self.name)
@receiver(signals.post_migrate)
def create_defaults(app_config, **kwargs):
if app_config.name == 'brambling':
if not DanceStyle.objects.exists():
if kwargs.get('verbosity') >= 2:
print("Creating default dance styles")
DanceStyle.objects.bulk_create([
DanceStyle(name=name)
for name in DEFAULT_DANCE_STYLES
])
if not EnvironmentalFactor.objects.exists():
if kwargs.get('verbosity') >= 2:
print("Creating default environmental factors")
EnvironmentalFactor.objects.bulk_create([
EnvironmentalFactor(name=name)
for name in DEFAULT_ENVIRONMENTAL_FACTORS
])
if not HousingCategory.objects.exists():
if kwargs.get('verbosity') >= 2:
print("Creating default housing categories")
HousingCategory.objects.bulk_create([
HousingCategory(name=name)
for name in DEFAULT_HOUSING_CATEGORIES
])
class OrganizationMember(models.Model):
EDIT = '1-edit'
VIEW = '2-view'
OWNER = '0-owner'
ROLE_CHOICES = (
(OWNER, 'Is organization owner'),
(EDIT, 'Can edit organization'),
(VIEW, 'Can view organization'),
)
organization = models.ForeignKey('Organization')
person = models.ForeignKey('Person')
role = models.CharField(max_length=7, choices=ROLE_CHOICES)
# Internal tracking fields.
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('organization', 'person')
def __unicode__(self):
return u"{}: {}".format(self.organization, self.person)
class Organization(models.Model):
DEMO_SLUG = 'demo'
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50,
validators=[RegexValidator("^[a-z0-9-]+$")],
help_text="URL-friendly version of the event name."
" Dashes, 0-9, and lower-case a-z only.",
unique=True)
description = models.TextField(blank=True)
website_url = models.URLField(blank=True, verbose_name="website URL")
facebook_url = models.URLField(blank=True, verbose_name="facebook URL")
banner_image = models.ImageField(blank=True)
city = models.CharField(max_length=50, blank=True)
state_or_province = models.CharField(max_length=50, verbose_name='state / province', blank=True)
country = CountryField(default='US', blank=True)
dance_styles = models.ManyToManyField(DanceStyle, blank=True)
members = models.ManyToManyField(
'Person',
through=OrganizationMember,
related_name='organizations',
blank=True,
)
# This is a secret value set by admins. It will be cached on the event model.
default_application_fee_percent = models.DecimalField(max_digits=5, decimal_places=2, default=Decimal(2.5),
validators=[MaxValueValidator(100), MinValueValidator(0)])
# These are obtained with Stripe Connect via Oauth.
stripe_user_id = models.CharField(max_length=255, blank=True, default='')
stripe_access_token = models.CharField(max_length=255, blank=True, default='')
stripe_refresh_token = models.CharField(max_length=255, blank=True, default='')
stripe_publishable_key = models.CharField(max_length=255, blank=True, default='')
stripe_test_user_id = models.CharField(max_length=255, blank=True, default='')
stripe_test_access_token = models.CharField(max_length=255, blank=True, default='')
stripe_test_refresh_token = models.CharField(max_length=255, blank=True, default='')
stripe_test_publishable_key = models.CharField(max_length=255, blank=True, default='')
check_payment_allowed = models.BooleanField(default=False)
check_payable_to = models.CharField(max_length=50, blank=True)
check_recipient = models.CharField(max_length=50, blank=True)
check_address = models.CharField(max_length=200, blank=True)
check_address_2 = models.CharField(max_length=200, blank=True)
check_city = models.CharField(max_length=50, blank=True)
check_state_or_province = models.CharField(max_length=50, blank=True, verbose_name='state / province')
check_zip = models.CharField(max_length=12, blank=True, verbose_name="zip / postal code")
check_country = CountryField(default='US')
# Internal tracking fields.
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return smart_text(self.name)
def get_absolute_url(self):
return reverse('brambling_organization_detail', kwargs={
'organization_slug': self.slug,
})
def get_permissions(self, person):
if person.is_superuser:
return ('view', 'edit', 'change_permissions')
try:
member = OrganizationMember.objects.get(
organization=self,
person=person,
)
except OrganizationMember.DoesNotExist:
return ()
if member.role == OrganizationMember.OWNER:
return ('view', 'edit', 'change_permissions')
if member.role == OrganizationMember.EDIT:
return ('view', 'edit')
if member.role == OrganizationMember.VIEW:
return ('view',)
return ()
def stripe_live_connected(self):
return bool(stripe_live_settings_valid() and self.stripe_user_id)
def stripe_test_connected(self):
return bool(stripe_test_settings_valid() and self.stripe_test_user_id)
def stripe_live_can_connect(self):
return bool(stripe_live_settings_valid() and not self.stripe_user_id)
def stripe_test_can_connect(self):
return bool(stripe_test_settings_valid() and not self.stripe_test_user_id)
def is_demo(self):
return self.slug == Organization.DEMO_SLUG
class EventMember(models.Model):
EDIT = '1-edit'
VIEW = '2-view'
ROLE_CHOICES = (
(EDIT, 'Can edit event'),
(VIEW, 'Can view event'),
)
event = models.ForeignKey('Event')
person = models.ForeignKey('Person')
role = models.CharField(max_length=6, choices=ROLE_CHOICES)
# Internal tracking fields.
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('event', 'person')
def __unicode__(self):
return u"{}: {}: {}".format(self.event.organization, self.event, self.person)
class Event(models.Model):
PUBLIC = 'public'
LINK = 'link'
HALF_PUBLIC = 'half-public'
INVITED = 'invited'
PRIVACY_CHOICES = (
(PUBLIC, _("Anyone can find and view the event")),
(LINK, _("Anyone with a direct link can view the event")),
(HALF_PUBLIC, _("Anyone can find and view the event, but only people who are invited can register")),
(INVITED, _("Only people invited to the event can see the event and register")),
)
LIVE = LIVE
TEST = TEST
API_CHOICES = (
(LIVE, _('Live')),
(TEST, _('Test')),
)
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50,
validators=[RegexValidator("^[a-z0-9-]+$")],
help_text="URL-friendly version of the event name."
" Dashes, 0-9, and lower-case a-z only.")
description = models.TextField(blank=True)
website_url = models.URLField(blank=True, verbose_name="website URL")
facebook_url = models.URLField(blank=True, verbose_name="facebook event URL")
banner_image = models.ImageField(blank=True)
city = models.CharField(max_length=50, blank=True)
state_or_province = models.CharField(max_length=50, verbose_name='state / province', blank=True)
country = CountryField(default='US', blank=True)
timezone = models.CharField(max_length=40, default='America/New_York', choices=((tz, tz) for tz in pytz.common_timezones))
currency = models.CharField(max_length=10, default='USD')
start_date = models.DateField()
end_date = models.DateField()
start_time = models.TimeField(blank=True, null=True)
end_time = models.TimeField(blank=True, null=True)
dance_styles = models.ManyToManyField(DanceStyle, blank=True)
has_dances = models.BooleanField(verbose_name="Is a dance / Has dance(s)", default=False)
has_classes = models.BooleanField(verbose_name="Is a class / Has class(es)", default=False)
liability_waiver = models.TextField(default=_("I hereby release {organization}, its officers, and its employees from all "
"liability of injury, loss, or damage to personal property associated "
"with this event. I acknowledge that I understand the content of this "
"document. I am aware that it is legally binding and I accept it out "
"of my own free will."), help_text=_("'{event}' and '{organization}' will be automatically replaced with your event and organization names respectively when users are presented with the waiver."))
transfers_allowed = models.BooleanField(default=True, help_text="Whether users can transfer items directly to other users.")
privacy = models.CharField(max_length=11, choices=PRIVACY_CHOICES,
default=PUBLIC)
is_published = models.BooleanField(default=False)
# If an event is "frozen", it can no longer be edited or unpublished.
is_frozen = models.BooleanField(default=False)
# Unpublished events can use test APIs, so that event organizers
# and developers can easily run through things without accidentally
# charging actual money.
api_type = models.CharField(max_length=4, choices=API_CHOICES, default=LIVE)
organization = models.ForeignKey(Organization)
members = models.ManyToManyField(
'Person',
through=EventMember,
related_name='events',
blank=True,
)
collect_housing_data = models.BooleanField(default=True)
collect_survey_data = models.BooleanField(default=True)
check_postmark_cutoff = models.DateField(blank=True, null=True)
# Time in minutes.
cart_timeout = models.PositiveSmallIntegerField(default=15,
help_text="Minutes before a user's cart expires.")
# This is a secret value set by admins
application_fee_percent = models.DecimalField(max_digits=5, decimal_places=2, default=Decimal('2.5'),
validators=[MaxValueValidator(100), MinValueValidator(0)])
# Internal tracking fields
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('slug', 'organization')
def __unicode__(self):
return smart_text(self.name)
def get_absolute_url(self):
return reverse('brambling_event_root', kwargs={
'event_slug': self.slug,
'organization_slug': self.organization.slug,
})
def get_liability_waiver(self):
return (self.liability_waiver
.replace('{event}', self.name)
.replace('{organization}', self.organization.name))
def get_permissions(self, person):
if person.is_superuser:
return ('view', 'edit', 'change_permissions')
default_perms = ()
try:
member = OrganizationMember.objects.get(
organization=self.organization,
person=person,
)
except OrganizationMember.DoesNotExist:
pass
else:
if member.role in (OrganizationMember.OWNER, OrganizationMember.EDIT):
# Return here because event perms can't give more.
return ('view', 'edit', 'change_permissions')
if member.role == OrganizationMember.VIEW:
default_perms = ('view',)
try:
member = EventMember.objects.get(
event=self,
person=person,
)
except EventMember.DoesNotExist:
return default_perms
if member.role == EventMember.EDIT:
return ('view', 'edit')
if member.role == EventMember.VIEW:
return ('view',)
return default_perms
def viewable_by(self, user):
if user.has_perm('view', self):
return True
if not self.is_published:
return False
if self.privacy == self.INVITED:
if not user.is_authenticated():
return False
if not Order.objects.filter(person=user, event=self).exists():
return False
return True
def can_be_published(self):
return ItemOption.objects.filter(item__event=self).exists()
def stripe_connected(self):
if self.api_type == Event.LIVE:
return self.organization.stripe_live_connected()
return self.organization.stripe_test_connected()
def stripe_can_connect(self):
if self.api_type == Event.LIVE:
return self.organization.stripe_live_can_connect()
return self.organization.stripe_test_can_connect()
def is_demo(self):
return self.organization.is_demo()
def get_housing_dates(self):
return [
self.start_date + timedelta(n - 1)
for n in xrange((self.end_date - self.start_date).days + 2)
]
def check_postmark_past(self):
"""
If there is a check postmark cutoff,
returns whether that date is past or not.
If there isn't a cutoff yet, returns false.
"""
if self.check_postmark_cutoff:
return self.check_postmark_cutoff < dtdate.today()
return False
class Item(models.Model):
name = models.CharField(max_length=30, help_text="Full pass, dance-only pass, T-shirt, socks, etc.")
description = models.TextField(blank=True)
event = models.ForeignKey(Event, related_name='items')
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return smart_text(self.name)
class ItemImage(models.Model):
item = models.ForeignKey(Item, related_name='images')
order = models.PositiveSmallIntegerField()
image = models.ImageField()
class ItemOption(models.Model):
TOTAL_AND_REMAINING = 'both'
TOTAL = 'total'
REMAINING = 'remaining'
HIDDEN = 'hidden'
REMAINING_DISPLAY_CHOICES = (
(TOTAL_AND_REMAINING, _('Remaining / Total')),
(TOTAL, _('Total only')),
(REMAINING, _('Remaining only')),
(HIDDEN, _("Don't display")),
)
item = models.ForeignKey(Item, related_name='options')
name = models.CharField(max_length=30)
price = models.DecimalField(max_digits=6, decimal_places=2, validators=[MinValueValidator(0)])
total_number = models.PositiveSmallIntegerField(blank=True, null=True, help_text="Leave blank for unlimited.")
available_start = models.DateTimeField(default=timezone.now)
available_end = models.DateTimeField()
remaining_display = models.CharField(max_length=9, default=TOTAL_AND_REMAINING, choices=REMAINING_DISPLAY_CHOICES)
order = models.PositiveSmallIntegerField()
class Meta:
ordering = ('order',)
def __unicode__(self):
return smart_text(self.name)
@property
def remaining(self):
if not hasattr(self, 'taken'):
self.taken = self.boughtitem_set.exclude(status__in=(BoughtItem.REFUNDED, BoughtItem.TRANSFERRED)).count()
return self.total_number - self.taken
class Discount(models.Model):
CODE_REGEX = '[0-9A-Za-z \'"~+=]+'
PERCENT = 'percent'
FLAT = 'flat'
TYPE_CHOICES = (
(FLAT, _('Flat')),
(PERCENT, _('Percent')),
)
name = models.CharField(max_length=40)
code = models.CharField(max_length=20, validators=[RegexValidator("^{}$".format(CODE_REGEX))],
help_text="Allowed characters: 0-9, a-z, A-Z, space, and '\"~+=")
item_options = models.ManyToManyField(ItemOption)
available_start = models.DateTimeField(default=timezone.now)
available_end = models.DateTimeField()
discount_type = models.CharField(max_length=7,
choices=TYPE_CHOICES,
default=FLAT)
amount = models.DecimalField(max_digits=6, decimal_places=2,
validators=[MinValueValidator(0)],
verbose_name="discount value")
event = models.ForeignKey(Event)
class Meta:
unique_together = ('code', 'event')
def __unicode__(self):
return self.name
class PersonManager(BaseUserManager):
def _create_user(self, email, password, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not email:
raise ValueError('Email must be given')
email = self.normalize_email(email)
person = self.model(email=email, is_superuser=is_superuser,
last_login=now, **extra_fields)
person.set_password(password)
person.save(using=self._db)
return person
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, **extra_fields)
class Person(AbstractNamedModel, AbstractBaseUser, PermissionsMixin):
NOTIFY_NEVER = 'never'
NOTIFY_EACH = 'each'
NOTIFY_DAILY = 'daily'
NOTIFY_NEW_PURCHASES_CHOICES = (
(NOTIFY_NEVER, "Don't email me about new purchases"),
(NOTIFY_EACH, "Email me about every new purchase"),
(NOTIFY_DAILY, "Email me a daily report of new purchases"),
)
email = models.EmailField(unique=True)
confirmed_email = models.EmailField()
home = models.ForeignKey('Home', blank=True, null=True,
related_name='residents')
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
# Notification settings
last_new_purchases_digest_sent = models.DateTimeField(default=None, blank=True, null=True)
notify_new_purchases = models.CharField(max_length=5, default=NOTIFY_EACH,
choices=NOTIFY_NEW_PURCHASES_CHOICES)
notify_product_updates = models.BooleanField(default=True)
# Start custom user requirements
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
@property
def is_staff(self):
return self.is_superuser
is_active = models.BooleanField(default=True)
objects = PersonManager()
# End custom user requirements
# Stripe-related fields
stripe_customer_id = models.CharField(max_length=36, blank=True)
stripe_test_customer_id = models.CharField(max_length=36, blank=True, default='')
class Meta:
verbose_name = _('person')
verbose_name_plural = _('people')
def __unicode__(self):
return self.get_full_name()
def get_claimable_orders(self):
if self.email != self.confirmed_email:
return Order.objects.none()
event_pks = Event.objects.filter(order__person=self).values_list('pk', flat=True).distinct()
return Order.objects.filter(
person__isnull=True,
email=self.email,
).exclude(
event__in=event_pks,
)
def get_mergeable_orders(self):
if self.email != self.confirmed_email:
return Order.objects.none()
event_pks = Event.objects.filter(order__person=self).values_list('pk', flat=True).distinct()
return Order.objects.filter(
person__isnull=True,
email=self.email,
event__in=event_pks,
)
class CreditCard(models.Model):
BRAND_CHOICES = (
('Visa', 'Visa'),
('American Express', 'American Express'),
('MasterCard', 'MasterCard'),
('Discover', 'Discover'),
('JCB', 'JCB'),
('Diners Club', 'Diners Club'),
('Unknown', 'Unknown'),
)
LIVE = LIVE
TEST = TEST
API_CHOICES = (
(LIVE, 'Live'),
(TEST, 'Test'),
)
ICONS = {
'Visa': 'cc-visa',
'American Express': 'cc-amex',
'Discover': 'cc-discover',
'MasterCard': 'cc-mastercard',
}
stripe_card_id = models.CharField(max_length=40)
api_type = models.CharField(max_length=4, choices=API_CHOICES, default=LIVE)
person = models.ForeignKey(Person, related_name='cards', blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
exp_month = models.PositiveSmallIntegerField()
exp_year = models.PositiveSmallIntegerField()
fingerprint = models.CharField(max_length=32)
last4 = models.CharField(max_length=4)
brand = models.CharField(max_length=16)
is_saved = models.BooleanField(default=False)
def __unicode__(self):
return (u"{} " + u"\u2022" * 4 + u"{}").format(self.brand, self.last4)
def get_icon(self):
return self.ICONS.get(self.brand, 'credit-card')
class OrderManager(models.Manager):
_session_key = '_brambling_order_code'
def _get_session(self, request):
return request.session.get(self._session_key, {})
def _set_session_code(self, request, event, code):
session_orders = self._get_session(request)
session_orders[str(event.pk)] = code
request.session[self._session_key] = session_orders
def _get_session_code(self, request, event):
session_orders = self._get_session(request)
if str(event.pk) in session_orders:
return session_orders[str(event.pk)]
return None
def _delete_session_code(self, request, event):
session_orders = self._get_session(request)
if str(event.pk) in session_orders:
del session_orders[str(event.pk)]
request.session[self._session_key] = session_orders
def _can_claim(self, order, user):
# An order can be auto-claimed if:
# 1. It doesn't have a person.
# 2. User is authenticated
# 3. User doesn't have an order for the event yet.
# 4. Order hasn't checked out yet.
if order.person_id is not None:
return False
if not user.is_authenticated():
return False
if Order.objects.filter(person=user, event=order.event_id).exists():
return False
if order.bought_items.filter(status__in=(BoughtItem.BOUGHT, BoughtItem.REFUNDED, BoughtItem.TRANSFERRED)).exists():
return False
return True
def for_request(self, event, request, create=True):
order = None
created = False
# Check if the user is authenticated and has an order for this
# event.
if request.user.is_authenticated():
try:
order = Order.objects.get(
event=event,
person=request.user,
)
except Order.DoesNotExist:
pass
# Next, check if there's a session-stored order. Assign it
# if the order hasn't checked out yet and the user is authenticated.
if order is None:
code = self._get_session_code(request, event)
if code:
try:
order = Order.objects.get(
event=event,
person__isnull=True,
code=code,
)
except Order.DoesNotExist:
pass
else:
if self._can_claim(order, request.user):
order.person = request.user
order.save()
elif request.user.is_authenticated():
order = None
if order is None and create:
# Okay, then create for this user.
created = True
person = request.user if request.user and request.user.is_authenticated() else None
while True:
code = get_random_string(8, UNAMBIGUOUS_CHARS)
if not Order.objects.filter(event=event, code=code).exists():
break
order = Order.objects.create(event=event, code=code, person=person)
if not request.user.is_authenticated():
self._set_session_code(request, event, order.code)
if order is None:
raise Order.DoesNotExist
if order.cart_is_expired():
order.delete_cart()
return order, created
class Order(models.Model):
"""
This model represents metadata connecting an event and a person.
For example, it links to the items that a person has bought. It
also contains denormalized metadata - for example, the person's
current balance.
"""
FLYER = 'flyer'
FACEBOOK = 'facebook'
WEBSITE = 'website'
INTERNET = 'internet'
FRIEND = 'friend'
ATTENDEE = 'attendee'
DANCER = 'dancer'
OTHER = 'other'
HEARD_THROUGH_CHOICES = (
(FLYER, "Flyer"),
(FACEBOOK, 'Facebook'),
(WEBSITE, 'Event website'),
(INTERNET, 'Other website'),
(FRIEND, 'Friend'),
(ATTENDEE, 'Former attendee'),
(DANCER, 'Other dancer'),
(OTHER, 'Other'),
)
event = models.ForeignKey(Event)
person = models.ForeignKey(Person, blank=True, null=True)
email = models.EmailField(blank=True)
code = models.CharField(max_length=8, db_index=True)
cart_start_time = models.DateTimeField(blank=True, null=True)
# "Survey" questions for Order
survey_completed = models.BooleanField(default=False)
heard_through = models.CharField(max_length=8,
choices=HEARD_THROUGH_CHOICES,
blank=True)
heard_through_other = models.CharField(max_length=128, blank=True)
send_flyers = models.BooleanField(default=False)
send_flyers_address = models.CharField(max_length=200, verbose_name='address', blank=True)
send_flyers_address_2 = models.CharField(max_length=200, verbose_name='address line 2', blank=True)
send_flyers_city = models.CharField(max_length=50, verbose_name='city', blank=True)
send_flyers_state_or_province = models.CharField(max_length=50, verbose_name='state / province', blank=True)
send_flyers_zip = models.CharField(max_length=12, verbose_name="zip / postal code", blank=True)
send_flyers_country = CountryField(verbose_name='country', blank=True)
providing_housing = models.BooleanField(default=False)
custom_data = GenericRelation('CustomFormEntry', content_type_field='related_ct', object_id_field='related_id')
# Organizer-only data
notes = models.TextField(blank=True)
objects = OrderManager()
class Meta:
unique_together = ('event', 'code')
def add_discount(self, discount):
"""
Add a discount to all items in the order that don't already have that discount.
Return True if any discounts are added and False otherwise.
"""
if discount.event_id != self.event_id:
raise ValueError("Discount is not for the correct event")
bought_items = BoughtItem.objects.filter(
order=self,
item_option__discount=discount,
status__in=(
BoughtItem.UNPAID,
BoughtItem.RESERVED,
),
).exclude(
discounts__discount=discount,
).distinct()
created = bool(bought_items)
if created:
BoughtItemDiscount.objects.bulk_create([
BoughtItemDiscount(
discount=discount,
bought_item=bought_item,
name=discount.name,
code=discount.code,
discount_type=discount.discount_type,
amount=discount.amount,
)
for bought_item in bought_items
])
return created
def add_to_cart(self, item_option):
if self.cart_is_expired():
self.delete_cart()
BoughtItem.objects.create(
item_option=item_option,
order=self,
status=BoughtItem.RESERVED,
item_name=item_option.item.name,
item_description=item_option.item.description,
item_option_name=item_option.name,
price=item_option.price,
)
if self.cart_start_time is None:
self.cart_start_time = timezone.now()
self.save()
def remove_from_cart(self, bought_item):
if bought_item.order.id == self.id:
bought_item.delete()
if not self.has_cart():
self.cart_start_time = None
self.save()
def mark_cart_paid(self, payment):
bought_items = self.bought_items.filter(
status__in=(BoughtItem.RESERVED, BoughtItem.UNPAID)
)
payment.bought_items = bought_items
bought_items.update(status=BoughtItem.BOUGHT)
if self.cart_start_time is not None:
self.cart_start_time = None
self.save()
def cart_expire_time(self):
if self.cart_start_time is None:
return None
return self.cart_start_time + timedelta(minutes=self.event.cart_timeout)
def cart_is_expired(self):
return (self.cart_start_time is not None and
timezone.now() > self.cart_expire_time())
def has_cart(self):
if self.cart_is_expired():
self.delete_cart()
return (self.cart_start_time is not None and
self.bought_items.filter(status=BoughtItem.RESERVED).exists())
def delete_cart(self):
self.bought_items.filter(status=BoughtItem.RESERVED).delete()
if self.cart_start_time is not None:
self.cart_start_time = None
self.save()
def get_groupable_cart(self):
return self.bought_items.filter(
status=BoughtItem.RESERVED
).order_by('item_name', 'item_option_name', '-added')
def get_summary_data(self):
if self.cart_is_expired():
self.delete_cart()
# First, fetch all transactions
transactions_qs = self.transactions.order_by('-timestamp')
# First fetch BoughtItems and group by transaction.
bought_items_qs = self.bought_items.prefetch_related(
'discounts',
'transactions',
).order_by('-added')
transactions = OrderedDict()
# Prepopulate transactions dictionary.
for txn in itertools.chain([None], transactions_qs):
transactions[txn] = {
'items': [],
'discounts': [],
'gross_cost': 0,
'total_savings': 0,
'net_cost': 0,
}
def add_item(txn, item):
txn_dict = transactions[txn]
txn_dict['items'].append(item)
multiplier = -1 if txn and txn.transaction_type == Transaction.REFUND else 1
if not txn or txn.transaction_type != Transaction.TRANSFER:
txn_dict['gross_cost'] += multiplier * item.price
for discount in item.discounts.all():
txn_dict['discounts'].append(discount)
txn_dict["total_savings"] -= multiplier * discount.savings()
txn_dict['net_cost'] = txn_dict['gross_cost'] + txn_dict['total_savings']
for item in bought_items_qs:
if not item.transactions.all():
add_item(None, item)
else:
for txn in item.transactions.all():
add_item(txn, item)
if not transactions[None]['items']:
del transactions[None]
gross_cost = 0
total_savings = 0
net_cost = 0
total_payments = 0
total_refunds = 0
unconfirmed_check_payments = False
for txn, txn_dict in transactions.iteritems():
gross_cost += txn_dict['gross_cost']
total_savings += txn_dict['total_savings']
net_cost += txn_dict['net_cost']
if txn:
if txn.transaction_type == Transaction.REFUND:
total_refunds += txn.amount
else:
total_payments += txn.amount
if not unconfirmed_check_payments and txn and txn.method == Transaction.CHECK and not txn.is_confirmed:
unconfirmed_check_payments = True
return {
'transactions': transactions,
'gross_cost': gross_cost,
'total_savings': total_savings,
'total_refunds': total_refunds,
'total_payments': total_payments,
'net_cost': net_cost,
'net_balance': net_cost - (total_payments + total_refunds),
'unconfirmed_check_payments': unconfirmed_check_payments
}
def get_eventhousing(self):
# Workaround for DNE exceptions on nonexistant reverse relations.
if not hasattr(self, '_eventhousing'):
try:
self._eventhousing = self.eventhousing
except EventHousing.DoesNotExist:
self._eventhousing = None
return self._eventhousing
class Transaction(models.Model):
STRIPE = 'stripe'
DWOLLA = 'dwolla'
CASH = 'cash'
CHECK = 'check'
FAKE = 'fake'
NONE = 'none'
METHOD_CHOICES = (
(STRIPE, 'Stripe'),
(DWOLLA, 'Dwolla'),
(CASH, 'Cash'),
(CHECK, 'Check'),
(FAKE, 'Fake'),
(NONE, 'No balance change'),
)
LIVE = LIVE
TEST = TEST
API_CHOICES = (
(LIVE, _('Live')),
(TEST, _('Test')),
)
PURCHASE = 'purchase'
REFUND = 'refund'
TRANSFER = 'transfer'
OTHER = 'other'
TRANSACTION_TYPE_CHOICES = (
(PURCHASE, _('Purchase')),
(REFUND, _('Refunded purchase')),
(TRANSFER, _('Transfer')),
(OTHER, _('Other')),
)
REMOTE_URLS = {
(STRIPE, PURCHASE, LIVE): 'https://dashboard.stripe.com/payments/{remote_id}',
(STRIPE, PURCHASE, TEST): 'https://dashboard.stripe.com/test/payments/{remote_id}',
(STRIPE, REFUND, LIVE): 'https://dashboard.stripe.com/payments/{related_remote_id}',
(STRIPE, REFUND, TEST): 'https://dashboard.stripe.com/test/payments/{related_remote_id}',
}
amount = models.DecimalField(max_digits=9, decimal_places=2, default=0)
application_fee = models.DecimalField(max_digits=9, decimal_places=2, default=0)
processing_fee = models.DecimalField(max_digits=9, decimal_places=2, default=0)
timestamp = models.DateTimeField(default=timezone.now)
created_by = models.ForeignKey(Person, blank=True, null=True)
method = models.CharField(max_length=7, choices=METHOD_CHOICES)
transaction_type = models.CharField(max_length=8, choices=TRANSACTION_TYPE_CHOICES)
is_confirmed = models.BooleanField(default=False)
api_type = models.CharField(max_length=4, choices=API_CHOICES, default=LIVE)
event = models.ForeignKey(Event)
related_transaction = models.ForeignKey('self', blank=True, null=True, related_name='related_transaction_set')
order = models.ForeignKey('Order', related_name='transactions', blank=True, null=True)
remote_id = models.CharField(max_length=40, blank=True)
card = models.ForeignKey('CreditCard', blank=True, null=True, on_delete=models.SET_NULL)
bought_items = models.ManyToManyField('BoughtItem', related_name='transactions', blank=True)
class Meta:
get_latest_by = 'timestamp'
def get_remote_url(self):
key = (self.method, self.transaction_type, self.api_type)
if self.remote_id and key in Transaction.REMOTE_URLS:
return Transaction.REMOTE_URLS[key].format(
remote_id=self.remote_id,
related_remote_id=self.related_transaction.remote_id if self.related_transaction else ''
)
return None
@classmethod
def from_stripe_charge(cls, charge, **kwargs):
# charge is expected to be a stripe charge with
# balance_transaction expanded.
application_fee = 0
processing_fee = 0
for fee in charge.balance_transaction.fee_details:
if fee.type == 'application_fee':
application_fee = Decimal(fee.amount) / 100
elif fee.type == 'stripe_fee':
processing_fee = Decimal(fee.amount) / 100
return Transaction.objects.create(
transaction_type=Transaction.PURCHASE,
amount=Decimal(charge.amount) / 100,
method=Transaction.STRIPE,
remote_id=charge.id,
is_confirmed=True,
application_fee=application_fee,
processing_fee=processing_fee,
**kwargs
)
@classmethod
def from_stripe_refund(cls, refund, related_transaction, **kwargs):
application_fee_refund = refund['application_fee_refund']
refund = refund['refund']
processing_fee = 0
for fee in refund.balance_transaction.fee_details:
if fee.type == 'stripe_fee':
processing_fee = Decimal(fee.amount) / 100
return Transaction.objects.create(
transaction_type=Transaction.REFUND,
method=Transaction.STRIPE,
amount=-1 * Decimal(refund.amount) / 100,
is_confirmed=True,
related_transaction=related_transaction,
remote_id=refund.id,
application_fee=-1 * Decimal(application_fee_refund.amount) / 100,
processing_fee=processing_fee,
**kwargs
)
def is_unconfirmed_check(self):
return self.method == Transaction.CHECK and not self.is_confirmed
def get_returnable_items(self):
return self.bought_items.filter(status=BoughtItem.BOUGHT)
def get_refundable_amount(self):
refunded = self.related_transaction_set.filter(
transaction_type=Transaction.REFUND
).aggregate(refunded=Sum('amount'))['refunded']
# None means there are no refunds, which is relevant
# for 0-amount transactions.
return self.amount if refunded is None else self.amount + refunded
def refund(self, amount=None, bought_items=None, issuer=None):
refundable_amount = self.get_refundable_amount()
returnable_items = self.get_returnable_items()
if amount is None:
amount = refundable_amount
if bought_items is None:
bought_items = returnable_items
# Early return if there is no amount and no items to refund
if not amount and not bought_items:
return
# Refundable is the amount that hasn't been refunded from total.
# Amount is how much we're trying to refund. If we know amount is greater
# than what's left on the transaction, don't go through with it.
if amount > refundable_amount:
raise ValueError("Not enough money available")
if amount < 0:
raise ValueError("Refund cannot be negative")
# Make sure we're not returning items that aren't part of t
if any([item not in returnable_items for item in bought_items]):
raise ValueError("Encountered item not in tranasction to refund")
if self.method == Transaction.DWOLLA:
raise ValueError('Dwolla transactions cannot be refunded through the Dancerfly website.')
refund_kwargs = {
'order': self.order,
'related_transaction': self,
'api_type': self.api_type,
'created_by': issuer,
'event': self.event,
}
if amount != 0:
# May raise an error
if self.method == Transaction.STRIPE:
refund = stripe_refund(
event=self.order.event,
order=self.order,
payment_id=self.remote_id,
amount=amount,
)
txn = Transaction.from_stripe_refund(refund, **refund_kwargs)
# If no payment processor was involved, just make a transaction
if amount == 0 or self.method != Transaction.STRIPE:
txn = Transaction.objects.create(
transaction_type=Transaction.REFUND,
amount=-1 * amount,
is_confirmed=True,
remote_id='',
method=self.method,
**refund_kwargs
)
txn.bought_items = bought_items
bought_items.update(status=BoughtItem.REFUNDED)
return txn
refund.alters_data = True
class BoughtItem(models.Model):
"""
Represents an item bought (or reserved) by a person.
"""
# These are essentially just sugar. They might be used
# for display, but they don't really guarantee anything.
RESERVED = 'reserved'
UNPAID = 'unpaid'
BOUGHT = 'bought'
REFUNDED = 'refunded'
TRANSFERRED = 'transferred'
STATUS_CHOICES = (
(RESERVED, _('Reserved')),
(UNPAID, _('Unpaid')),
(BOUGHT, _('Bought')),
(REFUNDED, _('Refunded')),
(TRANSFERRED, _('Transferred')),
)
item_option = models.ForeignKey(ItemOption, blank=True, null=True, on_delete=models.SET_NULL)
order = models.ForeignKey(Order, related_name='bought_items')
added = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=11,
choices=STATUS_CHOICES,
default=UNPAID)
# Values cached at creation time, in case the values change / the
# referenced items are deleted.
item_name = models.CharField(max_length=30)
item_description = models.TextField(blank=True)
item_option_name = models.CharField(max_length=30)
price = models.DecimalField(max_digits=6, decimal_places=2, validators=[MinValueValidator(0)])
# BoughtItem has a single attendee, but attendee can have
# more than one BoughtItem. Basic example: Attendee can
# have more than one class. Or, hypothetically, merch bought
# by a single person could be assigned to multiple attendees.
attendee = models.ForeignKey('Attendee', blank=True, null=True,
related_name='bought_items', on_delete=models.SET_NULL)
def can_transfer(self):
return (
self.status == self.BOUGHT and
not self.transactions.filter(is_confirmed=False).exists()
)
class Meta:
ordering = ('added',)
def __unicode__(self):
return u"{} – {} ({})".format(
self.item_option_name,
self.order.code,
self.pk,
)
class BoughtItemDiscount(models.Model):
""""Tracks whether an item has had a discount applied to it."""
PERCENT = 'percent'
FLAT = 'flat'
TYPE_CHOICES = (
(FLAT, _('Flat')),
(PERCENT, _('Percent')),
)
discount = models.ForeignKey(Discount, blank=True, null=True, on_delete=models.SET_NULL)
bought_item = models.ForeignKey(BoughtItem, related_name='discounts')
timestamp = models.DateTimeField(default=timezone.now)
# Values cached at creation time, in case the values change / the
# referenced items are deleted.
name = models.CharField(max_length=40)
code = models.CharField(max_length=20)
discount_type = models.CharField(max_length=7,
choices=TYPE_CHOICES,
default=FLAT)
amount = models.DecimalField(max_digits=6, decimal_places=2,
validators=[MinValueValidator(0)])
class Meta:
unique_together = ('bought_item', 'code')
def savings(self):
return min(self.amount
if self.discount_type == BoughtItemDiscount.FLAT
else self.amount / 100 * self.bought_item.price,
self.bought_item.price)
class HousingRequestNight(models.Model):
date = models.DateField()
class Meta:
ordering = ('date',)
def __unicode__(self):
return date(self.date, 'l, F jS')
@receiver(signals.post_save, sender=Event)
def create_request_nights(sender, instance, **kwargs):
"""
At some point in the future, we might want to switch this to
a ForeignKey relationship in the other direction, and let folks
annotate each night with specific information. For now, for simplicity,
we're sticking with the relationship that already exists.
"""
date_set = set(instance.get_housing_dates())
seen = set(HousingRequestNight.objects.filter(date__in=date_set).values_list('date', flat=True))
to_create = date_set - seen
if to_create:
HousingRequestNight.objects.bulk_create([
HousingRequestNight(date=date) for date in to_create
])
class Attendee(AbstractNamedModel):
"""
This model represents information about someone attending an event.
"""
NEED = 'need'
HAVE = 'have'
HOME = 'home'
HOUSING_STATUS_CHOICES = (
(NEED, 'Needs housing'),
(HAVE, 'Already arranged / hosting not required'),
(HOME, 'Staying at own home'),
)
# Internal tracking data
order = models.ForeignKey(Order, related_name='attendees')
saved_attendee = models.ForeignKey('SavedAttendee', blank=True, null=True, on_delete=models.SET_NULL)
# Basic data - always required for attendees.
basic_completed = models.BooleanField(default=False)
email = models.EmailField()
phone = models.CharField(max_length=50, blank=True)
liability_waiver = models.BooleanField(default=False, help_text="Must be agreed to by the attendee themselves.")
photo_consent = models.BooleanField(default=False, verbose_name='I consent to have my photo taken at this event.')
housing_status = models.CharField(max_length=4, choices=HOUSING_STATUS_CHOICES,
default=HAVE, verbose_name='housing status')
# Housing information - all optional.
housing_completed = models.BooleanField(default=False)
nights = models.ManyToManyField(HousingRequestNight, blank=True)
ef_cause = models.ManyToManyField(EnvironmentalFactor,
related_name='attendee_cause',
blank=True,
verbose_name="People around me may be exposed to")
ef_avoid = models.ManyToManyField(EnvironmentalFactor,
related_name='attendee_avoid',
blank=True,
verbose_name="I can't/don't want to be around")
person_prefer = models.TextField(blank=True,
verbose_name="I need to be placed with these people",
help_text="Provide a list of names, separated by line breaks.")
person_avoid = models.TextField(blank=True,
verbose_name="I do not want to be around these people",
help_text="Provide a list of names, separated by line breaks.")
housing_prefer = models.ManyToManyField(HousingCategory,
related_name='event_preferred_by',
blank=True,
verbose_name="I prefer to stay somewhere that is (a/an)")
other_needs = models.TextField(blank=True)
custom_data = GenericRelation('CustomFormEntry', content_type_field='related_ct', object_id_field='related_id')
# Organizer-only data
notes = models.TextField(blank=True)
def __unicode__(self):
return self.get_full_name()
def get_groupable_items(self):
return self.bought_items.order_by('item_name', 'item_option_name', '-added')
def needs_housing(self):
return self.housing_status == self.NEED
class SavedAttendee(AbstractNamedModel):
person = models.ForeignKey(Person)
email = models.EmailField()
phone = models.CharField(max_length=50, blank=True)
ef_cause = models.ManyToManyField(EnvironmentalFactor,
related_name='saved_attendee_cause',
blank=True,
verbose_name="People around me may be exposed to")
ef_avoid = models.ManyToManyField(EnvironmentalFactor,
related_name='saved_attendee_avoid',
blank=True,
verbose_name="I can't/don't want to be around")
person_prefer = models.TextField(blank=True,
verbose_name="I need to be placed with these people",
help_text="Provide a list of names, separated by line breaks.")
person_avoid = models.TextField(blank=True,
verbose_name="I do not want to be around these people",
help_text="Provide a list of names, separated by line breaks.")
housing_prefer = models.ManyToManyField(HousingCategory,
blank=True,
verbose_name="I prefer to stay somewhere that is (a/an)")
other_needs = models.TextField(blank=True)
# Internal tracking fields.
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.get_full_name()
class Home(models.Model):
address = models.CharField(max_length=200)
address_2 = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=50)
state_or_province = models.CharField(max_length=50, verbose_name='state / province')
zip_code = models.CharField(max_length=12, blank=True, verbose_name="zip / postal code")
country = CountryField()
public_transit_access = models.BooleanField(default=False,
verbose_name="My/Our house has easy access to public transit")
ef_present = models.ManyToManyField(EnvironmentalFactor,
related_name='home_present',
blank=True,
verbose_name="People in my/our home may be exposed to")
ef_avoid = models.ManyToManyField(EnvironmentalFactor,
related_name='home_avoid',
blank=True,
verbose_name="I/We don't want in my/our home")
person_prefer = models.TextField(blank=True,
verbose_name="I/We would love to host",
help_text="Provide a list of names, separated by line breaks.")
person_avoid = models.TextField(blank=True,
verbose_name="I/We don't want to host",
help_text="Provide a list of names, separated by line breaks.")
housing_categories = models.ManyToManyField(HousingCategory,
related_name='homes',
blank=True,
verbose_name="My/Our home is (a/an)")
class EventHousing(models.Model):
event = models.ForeignKey(Event)
home = models.ForeignKey(Home, blank=True, null=True, on_delete=models.SET_NULL)
order = models.OneToOneField(Order, related_name='eventhousing')
# Eventually add a contact_person field.
contact_name = models.CharField(max_length=100)
contact_email = models.EmailField(blank=True)
contact_phone = models.CharField(max_length=50)
# Duplicated data from Home, plus confirm fields.
address = models.CharField(max_length=200)
address_2 = models.CharField(max_length=200, blank=True)
city = models.CharField(max_length=50)
state_or_province = models.CharField(max_length=50, verbose_name='state / province')
zip_code = models.CharField(max_length=12, blank=True, verbose_name="zip / postal code")
country = CountryField()
public_transit_access = models.BooleanField(default=False,
verbose_name="My/Our house has easy access to public transit")
ef_present = models.ManyToManyField(EnvironmentalFactor,
related_name='eventhousing_present',
blank=True,
verbose_name="People in the home may be exposed to")
ef_avoid = models.ManyToManyField(EnvironmentalFactor,
related_name='eventhousing_avoid',
blank=True,
verbose_name="I/We don't want in my/our home")
person_prefer = models.TextField(blank=True,
verbose_name="I/We would love to host",
help_text="Provide a list of names, separated by line breaks.")
person_avoid = models.TextField(blank=True,
verbose_name="I/We don't want to host",
help_text="Provide a list of names, separated by line breaks.")
housing_categories = models.ManyToManyField(HousingCategory,
related_name='eventhousing',
blank=True,
verbose_name="Our home is (a/an)")
custom_data = GenericRelation('CustomFormEntry', content_type_field='related_ct', object_id_field='related_id')
class HousingSlot(models.Model):
eventhousing = models.ForeignKey(EventHousing)
date = models.DateField()
spaces = models.PositiveSmallIntegerField(default=0,
validators=[MaxValueValidator(100)])
spaces_max = models.PositiveSmallIntegerField(default=0,
validators=[MaxValueValidator(100)])
class HousingAssignment(models.Model):
# Home plans are ignored when checking against spaces.
AUTO = 'auto'
MANUAL = 'manual'
ASSIGNMENT_TYPE_CHOICES = (
(AUTO, _("Automatic")),
(MANUAL, _("Manual"))
)
attendee = models.ForeignKey(Attendee)
slot = models.ForeignKey(HousingSlot)
assignment_type = models.CharField(max_length=6, choices=ASSIGNMENT_TYPE_CHOICES)
class InviteManager(models.Manager):
def get_or_create_invite(self, email, user, kind, content_id):
while True:
code = get_random_string(
length=20,
allowed_chars='abcdefghijkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ23456789-~'
)
if not Invite.objects.filter(code=code).exists():
break
defaults = {
'user': user,
'code': code,
}
return self.get_or_create(email=email, content_id=content_id, kind=kind, defaults=defaults)
class Invite(models.Model):
objects = InviteManager()
code = models.CharField(max_length=20, unique=True)
email = models.EmailField()
#: User who sent the invitation.
user = models.ForeignKey(Person, blank=True, null=True)
is_sent = models.BooleanField(default=False)
kind = models.CharField(max_length=10)
content_id = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
unique_together = (('email', 'content_id', 'kind'),)
class CustomForm(models.Model):
ATTENDEE = 'attendee'
ORDER = 'order'
HOUSING = 'housing'
HOSTING = 'hosting'
FORM_TYPE_CHOICES = (
(ATTENDEE, _('Attendee')),
(ORDER, _('Order')),
(HOUSING, _('Housing')),
(HOSTING, _('Hosting')),
)
form_type = models.CharField(max_length=8, choices=FORM_TYPE_CHOICES)
event = models.ForeignKey(Event, related_name="forms")
# TODO: Add fk/m2m to BoughtItem to limit people the form is
# displayed to.
name = models.CharField(max_length=50,
help_text="For organization purposes. This will not be displayed to attendees.")
index = models.PositiveSmallIntegerField(default=0,
help_text="Defines display order if you have multiple forms.")
def __unicode__(self):
return self.name
class Meta:
ordering = ('index',)
def get_fields(self):
# Returns field definition dict that can be added to a form
return OrderedDict((
(field.key, field.formfield())
for field in self.fields.all()
))
def get_data(self, related_obj):
related_ct = ContentType.objects.get_for_model(related_obj)
related_id = related_obj.pk
entries = CustomFormEntry.objects.filter(
related_ct=related_ct,
related_id=related_id,
form_field__form=self,
)
raw_data = {
entry.form_field_id: entry.get_value()
for entry in entries
}
return {
field.key: raw_data[field.pk]
for field in self.fields.all()
if field.pk in raw_data
}
def save_data(self, cleaned_data, related_obj):
related_ct = ContentType.objects.get_for_model(related_obj)
related_id = related_obj.pk
for field in self.fields.all():
value = json.dumps(cleaned_data.get(field.key))
CustomFormEntry.objects.update_or_create(
related_ct=related_ct,
related_id=related_id,
form_field=field,
defaults={'value': value},
)
class CustomFormField(models.Model):
TEXT = 'text'
TEXTAREA = 'textarea'
BOOLEAN = 'boolean'
RADIO = 'radio'
SELECT = 'select'
CHECKBOXES = 'checkboxes'
SELECT_MULTIPLE = 'select_multiple'
CHOICE_TYPES = (RADIO, SELECT, CHECKBOXES, SELECT_MULTIPLE)
FIELD_TYPE_CHOICES = (
(TEXT, _('Text')),
(TEXTAREA, _('Paragraph text')),
(BOOLEAN, _('Checkbox')),
(RADIO, _('Radio buttons')),
(SELECT, _('Dropdown')),
(CHECKBOXES, _('Multiple checkboxes')),
(SELECT_MULTIPLE, _('Dropdown (Multiple)')),
)
field_type = models.CharField(max_length=15, choices=FIELD_TYPE_CHOICES, default=TEXT)
form = models.ForeignKey(CustomForm, related_name='fields')
name = models.CharField(max_length=255)
default = models.CharField(max_length=255, blank=True)
required = models.BooleanField(default=False)
index = models.PositiveSmallIntegerField(default=0)
# Choices are linebreak-separated values
choices = models.TextField(help_text='Put each choice on its own line', default='', blank=True)
help_text = models.CharField(max_length=255, blank=True)
class Meta:
ordering = ('index',)
@property
def key(self):
return "custom_{}_{}".format(self.form_id, self.pk)
def formfield(self):
kwargs = {
'required': self.required,
'initial': self.default,
'label': self.name,
'help_text': self.help_text,
}
if self.field_type in self.CHOICE_TYPES:
choices = self.choices.splitlines()
kwargs['choices'] = zip(choices, choices)
if self.field_type == self.TEXT:
field_class = forms.CharField
elif self.field_type == self.TEXTAREA:
field_class = forms.CharField
kwargs['widget'] = forms.Textarea
elif self.field_type == self.BOOLEAN:
field_class = forms.BooleanField
elif self.field_type == self.RADIO:
field_class = forms.ChoiceField
kwargs['widget'] = forms.RadioSelect
elif self.field_type == self.SELECT:
field_class = forms.ChoiceField
elif self.field_type == self.CHECKBOXES:
field_class = forms.MultipleChoiceField
kwargs['widget'] = forms.CheckboxSelectMultiple
elif self.field_type == self.SELECT_MULTIPLE:
field_class = forms.MultipleChoiceField
return field_class(**kwargs)
class CustomFormEntry(models.Model):
related_ct = models.ForeignKey(ContentType)
related_id = models.IntegerField()
related_obj = GenericForeignKey('related_ct', 'related_id')
form_field = models.ForeignKey(CustomFormField)
value = models.TextField(blank=True)
class Meta:
unique_together = (('related_ct', 'related_id', 'form_field'),)
def set_value(self, value):
self.value = json.dumps(value)
def get_value(self):
try:
return json.loads(self.value)
except Exception:
return ''
class SavedReport(models.Model):
ATTENDEE = 'attendee'
ORDER = 'order'
REPORT_TYPE_CHOICES = (
(ATTENDEE, _('Attendee')),
(ORDER, _('Order')),
)
report_type = models.CharField(max_length=8, choices=REPORT_TYPE_CHOICES)
event = models.ForeignKey(Event)
name = models.CharField(max_length=40)
querystring = models.TextField()
class ProcessedStripeEvent(models.Model):
LIVE = LIVE
TEST = TEST
API_CHOICES = (
(LIVE, _('Live')),
(TEST, _('Test')),
)
api_type = models.CharField(max_length=4, choices=API_CHOICES, default=LIVE)
stripe_event_id = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('api_type', 'stripe_event_id')
# Update event / org last-modified stats on various changes
@receiver(signals.post_save, sender=Transaction)
@receiver(signals.post_save, sender=Item)
@receiver(signals.post_save, sender=Discount)
@receiver(signals.post_save, sender=CustomForm)
def update_event_and_org_last_modified(sender, instance, **kwargs):
now = timezone.now()
event_id = instance.event_id
Event.objects.filter(pk=event_id).update(last_modified=now)
Organization.objects.filter(event=event_id).update(last_modified=now)
@receiver(signals.post_save, sender=Event)
def update_org_last_modified(sender, instance, **kwargs):
now = timezone.now()
org_id = instance.organization_id
Organization.objects.filter(pk=org_id).update(last_modified=now)
|
from django.db import models
from mdeditor.fields import MDTextField
import markdown
import emoji
class Comment(models.Model):
# 评论者id
author_id = models.IntegerField(verbose_name='评论者id')
belong_article_id = models.IntegerField(verbose_name='所属文章id')
create_date = models.DateTimeField('创建时间', auto_now_add=True)
content = MDTextField('评论内容')
parent_id = models.IntegerField(verbose_name='父级评论id')
rep_to_id = models.IntegerField(verbose_name='回复评论id')
is_deleted = models.BooleanField('是否已删除', default=False)
class Meta:
'''这是一个元类,用来继承的'''
abstract = True
def __str__(self):
return self.content[:20]
def content_to_markdown(self):
# 先转换成emoji然后转换成markdown,'escape':所有原始HTML将被转义并包含在文档中
to_emoji_content = emoji.emojize(self.content, use_aliases=True)
to_md = markdown.markdown(to_emoji_content,
safe_mode='escape',
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
return to_md
# 留言板
class Message_Board(Comment):
author_id = models.IntegerField(verbose_name='留言者id')
message_parent_id = models.IntegerField(verbose_name='父级评论id')
message_rep_id = models.IntegerField(verbose_name='回复评论id')
is_deleted = models.BooleanField('是否已删除', default=False)
class Meta:
verbose_name = '网站留言'
verbose_name_plural = verbose_name
ordering = ['create_date']
class Comment_Notification(models.Model):
create_user_id = models.IntegerField(verbose_name='所属评论id')
recieve_user_id = models.IntegerField(verbose_name='所属评论id')
comment_id = models.IntegerField(verbose_name='所属评论id')
create_date = models.DateTimeField('提示时间', auto_now_add=True)
is_read = models.BooleanField('是否已读', default=False)
is_deleted = models.BooleanField('是否已删除', default=False)
def mark_to_read(self):
self.is_read = True
self.save(update_fields=['is_read'])
class Meta:
verbose_name = '提示信息'
verbose_name_plural = verbose_name
ordering = ['-create_date']
def __str__(self):
return '{}@了{}'.format(self.create_p,self.get_p)
|
#!/usr/bin/python
# coding:utf-8
from __future__ import print_function, unicode_literals
# import requests
from requests.adapters import HTTPAdapter
import crawspider
from config import *
from logger_router import LoggerRouter
logger = LoggerRouter().getLogger(__name__)
import cfscrape
def main():
"""
项目入口
"""
logger.info("已启动。。")
# request_client = requests.Session()
request_client = cfscrape.create_scraper()
request_client.mount('http://', HTTPAdapter(max_retries=3))
while True:
try:
task_content = request_client.get(TASK_API).content
logger.info(task_content)
except Exception as e:
logger.error("任务队列服务器崩溃, exception: '%s'" % e)
break
info_dict = eval(str(task_content).split("y")[1].split("<")[0])
logger.info(info_dict["id"])
ip_group_list = info_dict["ip"].split(".")
info_dict["ip"] = ip_group_list[0] + "." + ip_group_list[1] + "." + ip_group_list[2]
logger.info(info_dict["ip"])
crawspider.get_all(info_dict["ip"])
result_content = request_client.get(RESULT_API + info_dict["id"]).content
logger.info(result_content)
logger.info("所有任务均爬取完成,请核对数据库中可能存在的因中断而极个别漏爬的IP")
if __name__ == '__main__':
main()
|
import csv
import json
import pandas as pd
from pandas.io.json import json_normalize
Q2file=json.load(open("Commenthour.json"))
Q2data = pd.DataFrame.from_dict(Q2file, orient='columns')
Q2data.columns = ['days', 'hour','comments']
Q2data.loc[Q2data['days'] == 0, ['days']] = 'Sun'
Q2data.loc[Q2data['days'] == 1, ['days']] = 'Mon'
Q2data.loc[Q2data['days'] == 2, ['days']] = 'Tue'
Q2data.loc[Q2data['days'] == 3, ['days']] = 'Wed'
Q2data.loc[Q2data['days'] == 4, ['days']] = 'Thurs'
Q2data.loc[Q2data['days'] == 5, ['days']] = 'Fri'
Q2data.loc[Q2data['days'] == 6, ['days']] = 'Sat'
Q2data['hour']=Q2data['hour'].astype(str)+".00 ~ "+(Q2data['hour']+1).astype(str)+".00"
Q2data.to_csv("ProjectdataQ2.csv")
|
import math
from src.Simulator import *
from src.arms import *
'''This script illustrates the dynamic behavior of a robot model without control inputs '''
# Create robot
q_init = np.array([[-math.pi / 10], [0], [0]])
robot = ThreeDofArm()
robot.set_q_init(q_init)
# Create simulator without controller
sim = Simulator(robot)
# Simulate dynamics
sim.simulate()
# OR Step (10 times) through the dynamics
# dt = 0.1
# joint_torque = np.zeros((2, 1))
# for i in range(10):
# robot.step(joint_torque, dt)
# robot.plot()
# plt.show()
|
from unittest import mock
from unittest.mock import sentinel
import pytest
from h.streamer.db import get_session, read_only_transaction
from h.streamer.streamer import UnknownMessageType
class TestMakeSession:
def test_it(self, db):
session = get_session(sentinel.settings)
db.make_engine.assert_called_once_with(sentinel.settings)
db.Session.assert_called_once_with(bind=db.make_engine.return_value)
assert session == db.Session.return_value
@pytest.fixture
def db(self, patch):
return patch("h.streamer.db.db")
class TestReadOnlyTransaction:
def test_it_starts_a_read_only_transaction(self, session):
with read_only_transaction(session):
...
assert session.method_calls[0] == mock.call.execute(
"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE"
)
def test_it_calls_closes_correctly(self, session):
with read_only_transaction(session):
...
assert session.method_calls[-2:] == [mock.call.commit(), mock.call.close()]
@pytest.mark.parametrize("exception", (UnknownMessageType, RuntimeError))
def test_it_rolls_back_on_handler_exception(self, session, exception):
with read_only_transaction(session):
raise exception()
self._assert_rollback_and_close(session)
@pytest.mark.parametrize("exception", (KeyboardInterrupt, SystemExit))
def test_it_reraises_certain_exceptions(self, session, exception):
with pytest.raises(exception):
with read_only_transaction(session):
raise exception
self._assert_rollback_and_close(session)
def _assert_rollback_and_close(self, session):
session.commit.assert_not_called()
assert session.method_calls[-2:] == [mock.call.rollback(), mock.call.close()]
@pytest.fixture
def session(self):
return mock.Mock(spec_set=["close", "commit", "execute", "rollback"])
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "Amino",
version = "0.0.0.1",
description = "A collection of modules for performing simple PDB file analysis.",
long_description=long_description,
long_description_content_type="text/markdown",
author = "Njagi Mwaniki",
author_email= "njagi@urbanslug.com",
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(),
test_suite="pdb.tests"
)
|
"""
FIFO queue data structure.
"""
from __future__ import absolute_import
import abc
import tf_encrypted as tfe
class AbstractFIFOQueue(abc.ABC):
"""
FIFO queues mimicking `tf.queue.FIFOQueue`.
"""
@abc.abstractmethod
def enqueue(self, tensor):
"""
Push `tensor` onto queue.
Blocks if queue is full.
"""
@abc.abstractmethod
def dequeue(self):
"""
Pop tensor from queue.
Blocks if queue is empty.
"""
def FIFOQueue(capacity, shape, shared_name=None):
return tfe.fifo_queue(capacity=capacity, shape=shape, shared_name=shared_name,)
|
import tensorflow.keras.models
def model_skeleton_from_tensorflow.keras_file(filename):
with open (filename) as f:
return tensorflow.keras.models.model_from_json(f.read())
|
#!/usr/bin/env python3
# Welcome to the (portable) main file of carafe
# A tiny management tool for wine bottles/carafes/containers
# Program configuration is saved in "~/.carafe"
__author__ = "Jelmer van Arnhem"
# See README.md for more details and usage instructions
__license__ = "MIT"
# See LICENSE for more details and exact terms
__version__ = "1.3.0"
# See https://github.com/jelmerro/carafe for repo and updates
import argparse
import glob
import json
import os
import shutil
import subprocess
import sys
# MAIN CONFIG FOLDER LOCATION
# If you really want to, you can change the folder location here
CONFIG_FOLDER = os.path.join(os.path.expanduser("~"), ".carafe")
# CONFIG FILE LOCATION
# It's recommended to leave this path as is and only change the folder location
CONFIG_FILE = os.path.join(CONFIG_FOLDER, "config.json")
# UTIL methods for small/common tasks
def read_config():
if not os.path.isdir(CONFIG_FOLDER):
return {}
if not os.path.isfile(CONFIG_FILE):
return {}
with open(CONFIG_FILE) as f:
config = json.load(f)
if config == {}:
try:
os.remove(CONFIG_FILE)
except OSError:
pass
return config
def remove_config(name):
config = read_config()
config.pop(name, None)
if config == {}:
try:
os.remove(CONFIG_FILE)
except OSError:
pass
else:
with open(CONFIG_FILE, "w") as f:
json.dump(config, f)
def modify_config(name, field, value):
config = read_config()
if name not in config:
config[name] = {}
config[name][field] = value
os.makedirs(CONFIG_FOLDER, exist_ok=True)
with open(CONFIG_FILE, "w") as f:
json.dump(config, f)
def list_carafes():
carafes = []
if os.path.isdir(CONFIG_FOLDER):
for item in os.listdir(CONFIG_FOLDER):
if os.path.isdir(os.path.join(CONFIG_FOLDER, item)):
carafes.append(item)
if carafes:
print("The following carafes are currently configured:")
for carafe in carafes:
print(carafe)
print(f"Run '{sys.argv[0]} <carafe_name> info' for more information")
else:
print("There are currently no carafes configured")
print(f"Use '{sys.argv[0]} <carafe_name> create' to add a new carafe")
sys.exit(0)
def check_for_tool(name, location):
if shutil.which(location):
return
print(f"\nThe required tool '{name}' could not be found")
if location == name:
print("Please install it using your package manager")
print("(Most required tools will be installed with wine)\n")
print(f"Or set a custom location in '{CONFIG_FILE}'")
print("(You might need to create the file manually)")
print(f"(In the main object set '{name}' to the correct path)\n")
else:
print("The path was manually changed in the config file")
print(f"The location is set to '{location}'")
print(f"Please remove the custom location from '{CONFIG_FILE}'")
print(f"Or update the path to the correct '{name}' location\n")
# Wine command locations, optionally loaded from the config file
# It's recommended to change them manually in the config file and not here
conf = read_config()
WINE = conf.get("wine", "wine")
WINETRICKS = conf.get("winetricks", "winetricks")
check_for_tool("wine", WINE)
# Carafe class for managing and starting carafes
class Carafe:
def __init__(self, name):
self.name = name
self.forbidden_names = ["config.json", "wine", "winetricks"]
if not self.name:
print("The current name is not allowed because it appears empty")
sys.exit(1)
if self.name in self.forbidden_names:
print("The current name is not allowed because it is reserved")
sys.exit(1)
self.prefix = os.path.join(CONFIG_FOLDER, self.name)
self.arch = self.read_arch()
self.link_location = self.read_link()
self.wine = self.read_wine()
# Linked functions directly called from the parser
def create(self, args):
if os.path.isdir(self.prefix):
print(
f"{self.name} is already a carafe\n"
f"Please see the list with '{sys.argv[0]} list'")
sys.exit(1)
os.makedirs(self.prefix, exist_ok=True)
self.arch = args.arch
remove_config(self.name)
if self.arch:
modify_config(self.name, "arch", self.arch)
self.run_command(f"{self.wine} wineboot --init")
def install(self, args):
self.exists()
executable = args.executable
if not executable:
executable = input(
"To install a program to the carafe, enter the location: ")
executable = executable.strip()
for char in ["'", "\""]:
if executable.startswith(char) and executable.endswith(char):
executable = executable.replace(char, "", 1)
executable = executable[::-1].replace(char, "", 1)[::-1]
executable = executable.strip()
if not os.path.isfile(executable):
print("The specified executable could not be found")
sys.exit(1)
if executable.endswith(".msi"):
self.run_command(f"{self.wine} msiexec /i \"{executable}\"")
else:
self.run_command(f"{self.wine} \"{executable}\"")
def start(self, args):
self.exists()
if args.ask:
start = self.ask_for_executable(True)
if start == "link":
start = self.link_location
elif args.location:
start = self.try_to_sanitize_location(args.location)
elif not self.link_location:
print(
f"{self.name} has no default/linked program path\n"
f"Please add one with '{sys.argv[0]} {self.name} link'")
sys.exit(1)
else:
start = self.link_location
self.arch = self.read_arch()
path = os.path.join(self.prefix, "drive_c", start)
arg_string = " "
for arg in args.arguments:
arg_string += f"{arg} "
if args.keep_log:
self.run_command(
f"{self.wine} \"{path}\" {arg_string}",
os.path.dirname(path))
else:
env = os.environ
env["WINEPREFIX"] = self.prefix
if self.arch:
env["WINEARCH"] = self.arch
env["WINEDEBUG"] = "-all"
subprocess.run(
f"{self.wine} \"{path}\" {arg_string}", shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
cwd=os.path.dirname(path), env=env)
def rename(self, args):
self.copy(args)
self.remove(None)
def copy(self, args):
self.exists()
newname = args.newname.replace(" ", "").replace("/", "-")
if not newname:
print("The new name is not allowed because it appears empty")
sys.exit(1)
additional_reserved = ["-h", "--help", "list"]
if newname in self.forbidden_names or newname in additional_reserved:
print("The new name is not allowed because it is reserved")
sys.exit(1)
newpath = os.path.join(CONFIG_FOLDER, newname)
if os.path.isdir(newpath):
print(
f"{newname} is already a carafe\n"
f"Please see the list with '{sys.argv[0]} list'")
sys.exit(1)
shutil.copytree(self.prefix, newpath, symlinks=True)
if self.arch:
modify_config(newname, "arch", self.arch)
if self.link_location:
modify_config(newname, "link", self.link_location)
try:
os.remove(os.path.join(newpath, "log"))
except OSError:
pass
def remove(self, _args):
remove_config(self.name)
self.exists()
shutil.rmtree(self.prefix)
if not os.listdir(CONFIG_FOLDER):
shutil.rmtree(CONFIG_FOLDER)
def info(self, _args):
self.exists()
executables = self.list_executables()
print(f"All information about carafe '{self.name}':")
if self.arch:
print(f"Configured with custom arch: {self.arch}")
else:
print("Configured with default system arch")
if self.link_location:
print("A link for easy startup is configured to the following:")
print(self.link_location)
else:
print("No link is currently configured")
print(
"When a carafe is linked, you can start the program with "
f"'{sys.argv[0]} {self.name} start'")
print(f"To modify the link, use '{sys.argv[0]} {self.name} link'")
if executables:
print("\nThe current list of executables looks like this:")
for exe in executables:
print(f"C:/{exe}")
print(
f"You can add more with '{sys.argv[0]} {self.name} install'")
else:
print("\nThere are currently no executables found for this carafe")
print(
f"Please add them with '{sys.argv[0]} {self.name} install'")
def link(self, args):
self.exists()
if args.location:
loc = self.try_to_sanitize_location(args.location)
else:
loc = self.ask_for_executable(False)
modify_config(self.name, "link", loc)
def shortcut(self, args):
self.exists()
if not os.path.isdir(args.output_folder):
print("The output folder does not seem to exist")
sys.exit(1)
loc = args.location
if not loc:
loc = self.ask_for_executable(True)
elif loc != "link":
loc = self.try_to_sanitize_location(args.location)
if args.type:
shortcut_type = args.type
else:
shortcut_type = ""
print("carafe can make two types of shortcut")
print("One type needs carafe, but it can auto-update the link")
print("The other type is a pure wine shortcut, but is static")
while shortcut_type not in ["carafe", "wine"]:
shortcut_type = input("Choose the type of shortcut to make: ")
if shortcut_type == "carafe":
shortcut_contents = self.carafe_shortcut(loc)
else:
shortcut_contents = self.wine_shortcut(loc)
if args.name:
file_name = f"{args.name}.desktop"
else:
file_name = f"{self.name}.desktop"
output_file = os.path.join(args.output_folder, file_name)
with open(output_file, "w") as f:
f.write(shortcut_contents)
def log(self, _args):
self.exists()
log_file = os.path.join(self.prefix, "log")
if os.path.isfile(log_file):
with open(log_file) as f:
print(f.read())
else:
print(f"No logs for '{self.name}' carafe yet")
def regedit(self, _args):
self.exists()
self.run_command(f"{self.wine} regedit")
def winecfg(self, _args):
self.exists()
self.run_command(f"{self.wine} winecfg")
def winetricks(self, args):
self.exists()
check_for_tool("winetricks", WINETRICKS)
arg_string = " "
for arg in args.arguments:
arg_string += f"{arg} "
self.run_command(f"{WINETRICKS} {arg_string}")
# Class helper functions
def exists(self):
if not os.path.isdir(self.prefix):
print(
f"{self.name} is not a known carafe\n"
f"For a list of all carafes: '{sys.argv[0]} list'\n"
f"Or add a new one with '{sys.argv[0]} {self.name} create'")
sys.exit(1)
def read_link(self):
config = read_config()
if self.name in config:
if "link" in config[self.name]:
return config[self.name]["link"]
return None
def read_wine(self):
config = read_config()
if self.name in config:
if "wine" in config[self.name]:
return config[self.name]["wine"]
return WINE
def read_arch(self):
config = read_config()
if self.name in config:
if "arch" in config[self.name]:
return config[self.name]["arch"]
return None
def run_command(self, command, cwd=None):
env = os.environ
env["WINEPREFIX"] = self.prefix
if self.arch:
env["WINEARCH"] = self.arch
with open(os.path.join(self.prefix, "log"), "wb") as log_file:
subprocess.run(
command, shell=True, stderr=log_file, stdout=log_file,
cwd=cwd, env=env)
def try_to_sanitize_location(self, loc):
loc = loc.strip()
if loc.startswith("C:"):
loc = loc.replace("C:", "", 1)
if loc.startswith(os.path.join(self.prefix, "drive_c")):
loc = loc.replace(os.path.join(self.prefix, "drive_c"), "", 1)
if loc.startswith("/"):
loc = loc.replace("/", "", 1)
loc = loc.strip()
absolute = os.path.join(self.prefix, "drive_c", loc)
if not os.path.isfile(absolute):
print("Location provided could not be found")
sys.exit(1)
return loc
def list_executables(self):
drive_c = os.path.join(self.prefix, "drive_c")
windows = os.path.join(drive_c, "windows")
exe_pattern = os.path.join(drive_c, "**", "*.exe")
executables = []
exe_files = sorted(glob.glob(exe_pattern, recursive=True))
for exe in exe_files:
if not exe.startswith(windows):
exe = exe.replace(drive_c, "", 1)
if exe.startswith("/"):
exe = exe.replace("/", "", 1)
executables.append(exe)
return executables
def ask_for_executable(self, include_link):
executables = self.list_executables()
if not executables:
print(
"There are currently no executables found for this carafe")
print(
f"Please add them with '{sys.argv[0]} {self.name} install'")
sys.exit(1)
for index, exe in enumerate(executables):
print(f"{index}: C:/{exe}")
link_text = ""
if self.link_location and include_link:
print(f"link: C:/{self.link_location}")
link_text = " (or choose 'link')"
chosen_app = -1
while chosen_app < 0 or chosen_app >= len(executables):
chosen_app = input(
"Choose the number of the application "
f"location{link_text}: ").strip()
if chosen_app == "link" and link_text:
break
try:
chosen_app = int(chosen_app)
except ValueError:
chosen_app = -1
if chosen_app == "link":
return "link"
return executables[chosen_app]
def carafe_shortcut(self, loc):
carafe_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
carafe_exec = os.path.join(carafe_dir, os.path.basename(sys.argv[0]))
if loc == "link":
command = f"{carafe_exec} {self.name} start"
else:
command = f"{carafe_exec} {self.name} start -l \"{loc}\""
return "#!/usr/bin/env xdg-open\n" \
"[Desktop Entry]\n" \
f"Name={self.name}\n" \
"Type=Application\n" \
f"Exec={command}\n"
def wine_shortcut(self, loc):
if loc == "link":
loc = self.link_location
command = f"env WINEPREFIX=\"{self.prefix}\""
if self.arch:
command += " WINEARCH=\"{self.arch}\""
command += f" {self.wine} \"C:/{loc}\""
path = os.path.dirname(os.path.join(self.prefix, "drive_c", loc))
return "#!/usr/bin/env xdg-open\n" \
"[Desktop Entry]\n" \
f"Name={self.name}\n" \
"Type=Application\n" \
f"Exec={command}\n" \
f"Path={path}\n"
def main():
# Prepare the main parser
description = f"Welcome to carafe {__version__}\n" \
"carafe is a tiny management tool for wine bottles/carafes.\n"
usage = "carafe {<carafe_name>,list} <sub_command>"
parser = argparse.ArgumentParser(
usage=usage,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
epilog=f"carafe was made by {__author__} and is {__license__} licensed"
"\nFor documentation and other information, see the README.md")
# Sub commands parser
sub = parser.add_subparsers(
title="sub-commands", dest="sub",
description="All the valid sub-commands to manage the carafes")
# Create
sub_create = sub.add_parser(
"create", help="create a new carafe",
usage="carafe <carafe_name> create",
description="Use 'create' to make a new carafe, you should start here")
sub_create.add_argument(
"--arch", help="Change the default arch, e.g. to win32")
# Install
sub_install = sub.add_parser(
"install", help="install software to the carafe",
usage="carafe <carafe_name> install",
description="Use 'install' to run an ext"
"ernal exe/msi inside the carafe")
sub_install.add_argument(
"-e", "--executable",
help="Location of the external executable to run inside the carafe")
# Start
sub_start = sub.add_parser(
"start", help="start an installed program",
usage="carafe <carafe_name> start",
description="Use 'start' to start a program inside an existing carafe")
sub_start.add_argument(
"-k", "--keep-log", action="store_true",
help="Keep the wine log (can be multiple GBs and will slow down wine)")
sub_start.add_argument(
"-a", "--ask", action="store_true",
help="Instead of starting the link or --location, ask for the path")
sub_start.add_argument(
"-l", "--location",
help="Location of the executable inside the carafe to start")
sub_start.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments will directly be passed to the started executable")
# Rename
sub_rename = sub.add_parser(
"rename", help="rename an existing carafe",
usage="carafe <carafe_name> rename <new_name>",
description="Use 'rename' to change the name of an existing carafe")
sub_rename.add_argument("newname", help="New name of the carafe")
# Copy
sub_copy = sub.add_parser(
"copy", help="copy an existing carafe",
usage="carafe <carafe_name> copy <new_name>",
description="Use 'copy' to duplicate an existing carafe to a new one")
sub_copy.add_argument("newname", help="Name of the new carafe")
# Remove
sub.add_parser(
"remove", help="remove a carafe",
usage="carafe <carafe_name> remove",
description="Use 'remove' to delete an existing carafe")
# Info
sub.add_parser(
"info", help="all info about a carafe",
usage="carafe <carafe_name> info",
description="Use 'info' to print all information about a carafe")
# Link
sub_link = sub.add_parser(
"link", help="link a program to the carafe",
usage="carafe <carafe_name> link",
description="Use 'link' to connect the startup link (recommended)")
sub_link.add_argument(
"-l", "--location",
help="Location of the executable inside the carafe to link")
# Shortcut
sub_shortcut = sub.add_parser(
"shortcut", help="generate a desktop shortcut",
usage="carafe <carafe_name> shortcut",
description="Use 'shortcut' to create a .desktop shortcut to a carafe")
location_help = "Location of the executable inside the carafe to " \
"shortcut, normally a path, but can be set to 'link' as well"
sub_shortcut.add_argument(
"-l", "--location",
help=location_help)
sub_shortcut.add_argument(
"-o", "--output-folder",
default=os.path.join(os.path.expanduser("~"), "Desktop"),
help="Which folder to place the shortcut, default is the user desktop")
sub_shortcut.add_argument(
"-n", "--name",
help="Name of the new shortcut, default is the name of the carafe")
sub_shortcut.add_argument(
"-t", "--type", choices=["carafe", "wine"],
help="The type of shortcut to make")
# Log
sub.add_parser(
"log", help="show the last command output",
usage="carafe <carafe_name> log <new_name>",
description="Use 'log' to show the output of the last command")
# Regedit
sub.add_parser(
"regedit", help="run regedit",
usage="carafe <carafe_name> regedit",
description="Use 'regedit' to edit the windows registry")
# Winecfg
sub.add_parser(
"winecfg", help="run winecfg",
usage="carafe <carafe_name> winecfg",
description="Use 'winecfg' to configure all wine settings")
# Winetricks
sub_tricks = sub.add_parser(
"winetricks", help="run winetricks",
usage="carafe <carafe_name> winetricks <optional_arguments>",
description="Use 'winetricks' to install winetricks components")
sub_tricks.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments will directly be passed to winetricks")
# Actually handle all the arguments
args = sys.argv[1:]
if not args:
parser.print_help()
sys.exit(0)
carafe_name = args.pop(0).replace(" ", "").replace("/", "-")
if carafe_name == "list":
list_carafes()
subargs = parser.parse_args(args)
if not subargs.sub or carafe_name in ["-h", "--help"]:
parser.print_help()
sys.exit(0)
# Call the correct subcommand on the Carafe class
carafe = globals()["Carafe"](carafe_name)
getattr(carafe, subargs.sub)(subargs)
# Main startup steps
if __name__ == "__main__":
main()
|
# codigo python
for n in range(10):
print n + 1
|
from __future__ import division
import numpy as np
import tensorflow as tf
''' This file aims to solve the end to end communication problem in Rayleigh fading channel '''
''' The condition of channel GAN is the encoding and information h '''
''' We should compare with baseline that equalizor of Rayleigh fading'''
def generator_conditional(z, conditioning): # Convolution Generator
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([z, conditioning], -1)
conv1_g = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
# conv1_g_bn = tf.layers.batch_normalization(conv1_g, training=training)
conv1_g = tf.nn.leaky_relu(conv1_g)
conv2_g = tf.layers.conv1d(inputs=conv1_g, filters=128, kernel_size=3, padding='same')
conv2_g = tf.nn.leaky_relu(conv2_g)
conv3_g = tf.layers.conv1d(inputs=conv2_g, filters=64, kernel_size=3, padding='same')
conv3_g = tf.nn.leaky_relu(conv3_g)
conv4_g = tf.layers.conv1d(inputs=conv3_g, filters=2, kernel_size=3, padding='same')
return conv4_g
def discriminator_condintional(x, conditioning):
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
z_combine = tf.concat([x, conditioning], -1)
conv1 = tf.layers.conv1d(inputs=z_combine, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv1 = tf.reduce_mean(conv1, axis=0, keep_dims=True)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=16, kernel_size=3, padding='same')
FC = tf.nn.relu(tf.layers.dense(conv4, 100, activation=None))
D_logit = tf.layers.dense(FC, 1, activation=None)
D_prob = tf.nn.sigmoid(D_logit)
return D_prob, D_logit
def encoding(x):
with tf.variable_scope("encoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2 = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=3, padding='same')
conv2 = tf.nn.relu(conv2)
conv3 = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=3, padding='same')
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=2, kernel_size=3, padding='same')
layer_4_normalized = tf.scalar_mul(tf.sqrt(tf.cast(block_length/2, tf.float32)),
tf.nn.l2_normalize(conv4, dim=1)) # normalize the encoding.
return layer_4_normalized
def decoding(x, channel_info):
x_combine = tf.concat([x, channel_info], -1)
with tf.variable_scope("decoding", reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv1d(inputs=x_combine, filters=256, kernel_size=5, padding='same')
conv1 = tf.nn.relu(conv1)
conv2_ori = tf.layers.conv1d(inputs=conv1, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2_ori)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.conv1d(inputs=conv2, filters=128, kernel_size=5, padding='same')
conv2 += conv2_ori
conv2 = tf.nn.relu(conv2)
conv3_ori = tf.layers.conv1d(inputs=conv2, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3_ori)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=5, padding='same')
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.conv1d(inputs=conv3, filters=64, kernel_size=3, padding='same')
conv3 += conv3_ori
conv3 = tf.nn.relu(conv3)
conv4 = tf.layers.conv1d(inputs=conv3, filters=32, kernel_size=3, padding='same')
conv4 = tf.nn.relu(conv4)
Decoding_logit = tf.layers.conv1d(inputs=conv4, filters=1, kernel_size=3, padding='same')
Decoding_prob = tf.nn.sigmoid(Decoding_logit)
return Decoding_logit, Decoding_prob
def sample_Z(sample_size):
''' Sampling the generation noise Z from normal distribution '''
return np.random.normal(size=sample_size)
def sample_uniformly(sample_size):
return np.random.randint(size=sample_size, low=-15, high=15) / 10
def gaussian_noise_layer(input_layer, std):
noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=std, dtype=tf.float32)
return input_layer + noise
def Rayleigh_noise_layer(input_layer, h_r, h_i, std):
h_complex = tf.complex(real=h_r, imag=h_i)
input_layer_real = input_layer[:, :, 0]
input_layer_imag = input_layer[:, :, 1]
input_layer_complex = tf.complex(real=input_layer_real, imag=input_layer_imag)
# input_layer_complex = tf.reshape(input_layer_complex, [-1, block_length, 1])
noise = tf.cast(tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32),
tf.complex64)
noise = tf.complex(
real=tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32),
imag=tf.random_normal(shape=tf.shape(input_layer_complex), mean=0.0, stddev=std, dtype=tf.float32))
output_complex = tf.add(tf.multiply(h_complex, input_layer_complex), noise)
output_complex_reshape = tf.reshape(output_complex, [-1, block_length, 1])
print("Shape of the output complex", output_complex, output_complex_reshape)
# print("shape of the complex matrix", input_layer_complex, output_complex, tf.concat([tf.real(output_complex), tf.imag(output_complex)], -1))
return tf.concat([tf.real(output_complex_reshape), tf.imag(output_complex_reshape)], -1)
def sample_h(sample_size):
return np.random.normal(size=sample_size) / np.sqrt(2.)
""" Start of the Main function """
''' Building the Graph'''
batch_size = 512
block_length = 128
Z_dim_c = 16
learning_rate = 1e-4
X = tf.placeholder(tf.float32, shape=[None, block_length, 1])
E = encoding(X)
Z = tf.placeholder(tf.float32, shape=[None, block_length, Z_dim_c])
Noise_std = tf.placeholder(tf.float32, shape=[])
h_r = tf.placeholder(tf.float32, shape=[None, 1])
h_i = tf.placeholder(tf.float32, shape=[None, 1])
#h_r_noise = tf.add(h_r, tf.random_normal(shape=tf.shape(h_r), mean=0.0, stddev=Noise_std, dtype=tf.float32))
#h_i_noise = tf.add(h_i, tf.random_normal(shape=tf.shape(h_i), mean=0.0, stddev=Noise_std, dtype=tf.float32))
Channel_info = tf.tile(tf.concat([tf.reshape(h_r, [-1, 1, 1]), tf.reshape(h_i, [-1, 1, 1])], -1), [1, block_length, 1])
Conditions = tf.concat([E, Channel_info], axis=-1)
G_sample = generator_conditional(Z, Conditions)
R_sample = Rayleigh_noise_layer(E, h_r, h_i, Noise_std)
R_decodings_logit, R_decodings_prob = decoding(R_sample, Channel_info)
G_decodings_logit, G_decodings_prob = decoding(G_sample, Channel_info)
encodings_uniform_generated = tf.placeholder(tf.float32, shape=[None, block_length, 2])
Conditions_uniform = tf.concat([encodings_uniform_generated, Channel_info], axis=-1)
print("shapes G and R and channel info", G_sample, R_sample, encodings_uniform_generated)
G_sample_uniform = generator_conditional(Z, Conditions_uniform)
R_sample_uniform = Rayleigh_noise_layer(encodings_uniform_generated, h_r, h_i, Noise_std)
D_prob_real, D_logit_real = discriminator_condintional(R_sample_uniform, Conditions_uniform)
D_prob_fake, D_logit_fake = discriminator_condintional(G_sample_uniform, Conditions_uniform)
Disc_vars = [v for v in tf.trainable_variables() if v.name.startswith('discriminator')]
Gen_vars = [v for v in tf.trainable_variables() if v.name.startswith('generator')]
Tx_vars = [v for v in tf.trainable_variables() if v.name.startswith('encoding')]
Rx_vars = [v for v in tf.trainable_variables() if v.name.startswith('decoding')]
''' Standard GAN '''
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
# Set up solvers
D_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(D_loss, var_list=Disc_vars)
G_solver = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(G_loss, var_list=Gen_vars)
loss_receiver_R = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=R_decodings_logit, labels=X))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
Rx_solver = optimizer.minimize(loss_receiver_R, var_list=Rx_vars)
loss_receiver_G = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=G_decodings_logit, labels=X))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
Tx_solver = optimizer.minimize(loss_receiver_G, var_list=Tx_vars)
accuracy_R = tf.reduce_mean(tf.cast((tf.abs(R_decodings_prob - X) > 0.5), tf.float32))
accuracy_G = tf.reduce_mean(tf.cast((tf.abs(G_decodings_prob - X) > 0.5), tf.float32))
WER_R = 1 - tf.reduce_mean(tf.cast(tf.reduce_all(tf.abs(R_decodings_prob-X)<0.5, 1),tf.float32))
init = tf.global_variables_initializer()
number_steps_receiver = 5000
number_steps_channel = 5000
number_steps_transmitter = 5000
display_step = 100
batch_size = 320
number_iterations = 1000 # in each iteration, the receiver, the transmitter and the channel will be updated
EbNo_train = 20.
EbNo_train = 10. ** (EbNo_train / 10.)
EbNo_train_GAN = 35.
EbNo_train_GAN = 10. ** (EbNo_train_GAN / 10.)
EbNo_test = 15.
EbNo_test = 10. ** (EbNo_test / 10.)
R = 0.5
def generate_batch_data(batch_size):
global start_idx, data
if start_idx + batch_size >= N_training:
start_idx = 0
data = np.random.binomial(1, 0.5, [N_training, block_length, 1])
batch_x = data[start_idx:start_idx + batch_size]
start_idx += batch_size
#print("start_idx", start_idx)
return batch_x
N_training = int(1e6)
data = np.random.binomial(1, 0.5, [N_training, block_length, 1])
N_val = int(1e4)
val_data = np.random.binomial(1, 0.5, [N_val, block_length, 1])
N_test = int(1e4)
test_data = np.random.binomial(1, 0.5, [N_test, block_length, 1])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
start_idx = 0
for iteration in range(number_iterations):
number_steps_transmitter += 5000
number_steps_receiver += 5000
number_steps_channel += 2000
print("iteration is ", iteration)
''' =========== Training the Channel Simulator ======== '''
for step in range(number_steps_channel):
if step % 100 == 0:
print("Training ChannelGAN, step is ", step)
batch_x = generate_batch_data(int(batch_size / 2))
encoded_data = sess.run([E], feed_dict={X: batch_x})
random_data = sample_uniformly([int(batch_size / 2), block_length, 2])
input_data = np.concatenate((np.asarray(encoded_data).reshape([int(batch_size / 2), block_length, 2])
+ np.random.normal(0, 0.1, size=([int(batch_size / 2), block_length, 2])),
random_data), axis=0)
_, D_loss_curr = sess.run([D_solver, D_loss],
feed_dict={encodings_uniform_generated: input_data,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Z: sample_Z([batch_size, block_length, Z_dim_c]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train_GAN)))})
_, G_loss_curr = sess.run([G_solver, G_loss],
feed_dict={encodings_uniform_generated: input_data,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Z: sample_Z([batch_size, block_length, Z_dim_c]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train_GAN)))})
''' =========== Training the Transmitter ==== '''
for step in range(number_steps_transmitter):
if step % 100 == 0:
print("Training transmitter, step is ", step)
batch_x = generate_batch_data(batch_size)
sess.run(Tx_solver, feed_dict={X: batch_x, Z: sample_Z([batch_size, block_length, Z_dim_c]),
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train)))
})
''' ========== Training the Receiver ============== '''
for step in range(number_steps_receiver):
if step % 100 == 0:
print("Training receiver, step is ", step)
batch_x = generate_batch_data(batch_size)
sess.run(Rx_solver, feed_dict={X: batch_x,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: (np.sqrt(1 / (2 * R * EbNo_train)))})
''' ----- Testing ---- '''
loss, acc = sess.run([loss_receiver_R, accuracy_R],
feed_dict={X: batch_x,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Noise_std: np.sqrt(1 / (2 * R * EbNo_train))})
print("Real Channel Evaluation:", "Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
loss, acc = sess.run([loss_receiver_G, accuracy_G],
feed_dict={X: batch_x,
h_i: sample_h([batch_size, 1]),
h_r: sample_h([batch_size, 1]),
Z: sample_Z([batch_size, block_length, Z_dim_c]),
Noise_std: np.sqrt(1 / (2 * R * EbNo_train))
})
print("Generated Channel Evaluation:", "Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
EbNodB_range = np.arange(0, 30)
ber = np.ones(len(EbNodB_range))
wer = np.ones(len(EbNodB_range))
for n in range(0, len(EbNodB_range)):
EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
ber[n], wer[n] = sess.run([accuracy_R, WER_R],
feed_dict={X: test_data, Noise_std: (np.sqrt(1 / (2 * R * EbNo))),
h_i: sample_h([len(test_data), 1]),
h_r: sample_h([len(test_data), 1]),
})
print('SNR:', EbNodB_range[n], 'BER:', ber[n], 'WER:', wer[n])
print(ber)
print(wer)
|
import os
import tempfile
from pathlib import Path
from typing import Callable
from uuid import uuid1
import pytest
from faker import Faker
from flask.testing import FlaskClient
from overhave import OverhaveAdminApp, overhave_app
from overhave.base_settings import DataBaseSettings
from overhave.factory import IAdminFactory
from overhave.factory.context.base_context import BaseFactoryContext
from overhave.pytest_plugin import IProxyManager
@pytest.fixture()
def patched_app_admin_factory(
db_settings: DataBaseSettings,
database: None,
mocked_context: BaseFactoryContext,
clean_admin_factory: Callable[[], IAdminFactory],
) -> IAdminFactory:
db_settings.setup_db()
factory = clean_admin_factory()
factory.set_context(mocked_context)
return factory
@pytest.fixture()
def test_pullrequest_id(faker: Faker) -> int:
return faker.random_int()
@pytest.fixture()
def test_pullrequest_published_by() -> str:
return uuid1().hex
@pytest.fixture()
def test_report_without_index(patched_app_admin_factory: IAdminFactory) -> Path:
report_dir = patched_app_admin_factory.context.file_settings.tmp_reports_dir / uuid1().hex
report_dir.mkdir()
return report_dir
@pytest.fixture()
def test_report_with_index(test_report_without_index: Path, faker: Faker) -> Path:
report_index = test_report_without_index / "index.html"
report_index.write_text(faker.word())
yield test_report_without_index
report_index.unlink()
@pytest.fixture()
def test_app(
clean_proxy_manager: Callable[[], IProxyManager], patched_app_admin_factory: IAdminFactory
) -> OverhaveAdminApp:
return overhave_app(factory=patched_app_admin_factory)
@pytest.fixture()
def test_client(test_app: OverhaveAdminApp) -> FlaskClient:
db_fd, test_app.config["DATABASE"] = tempfile.mkstemp()
test_app.config["TESTING"] = True
with test_app.test_client() as client:
yield client
os.close(db_fd)
os.unlink(test_app.config["DATABASE"])
|
# https://www.codingame.com/training/easy/dead-mens-shot
def within_polygon(x, y, points, num_corners):
pos, neg = False, False
for i in range(num_corners):
x1, y1 = points[i]
x2, y2 = points[(i + 1) % num_corners]
d = (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1)
if d > 0:
pos = True
else:
neg = True
if pos == neg: return False
return True
def solution():
points = []
num_corners = int(input())
for _ in range(num_corners):
points.append(list(map(int, input().split())))
num_shots = int(input())
for _ in range(num_shots):
x, y = map(int, input().split())
if within_polygon(x, y, points, num_corners):
print('hit')
else:
print('miss')
solution()
|
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
from keras import backend as K
from keras.layers import GlobalAveragePooling2D
from keras.layers import Reshape, Dense, multiply, Permute
def squeeze_excite_block(input, ratio=16):
''' Create a squeeze-excite block
Args:
input: input tensor
filters: number of output filters
k: width factor
Returns: a keras tensor'''
init = input
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = init._keras_shape[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
# kernel_initializer='he_normal', use_bias=False
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_uniform', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_uniform', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
|
import discord
import random
import os
import json
import datetime
from datetime import timedelta
from discord.ext import commands
class qotd(commands.Cog):
client = commands.Bot(command_prefix="h!")
def __init__(self, client):
self.client == client
@commands.command
async def randnum(self, ctx):
value = random.randint(5)
await ctx.send(value)
with open('qotd_qafase.json') as f:
json.load(f)
# end
def setup(client):
client.add_cog(qotd(client))
|
from subprocess import call
import sys
from os import listdir
from os.path import isfile, join
# Schedule conversion for the normal 32x32 cifar-10 dataset
vers_to_run = [1]
in_vers = [0]
for index in range(0,len(vers_to_run)):
call('python cifar-convert.py '+str(in_vers[index])+
' '+str(vers_to_run[index]), shell=True)
|
# Copyright 2020 BULL SAS All rights reserved
"""Command Line application."""
import typer
import uvicorn
cli = typer.Typer()
@cli.command()
def dev(host: str = "0.0.0.0", port: int = 5000):
"""Run the application in development mode."""
uvicorn.run("shaman_api:app",
host=host,
port=port,
reload=True,
access_log=False)
@cli.command()
def prod(
host: str = "0.0.0.0",
port: int = 8080,
access_log: bool = False,
workers: int = 2,
loop: str = "uvloop",
):
"""Run the application in production mode."""
uvicorn.run(
"shaman_api:app",
host=host,
port=port,
workers=workers,
access_log=access_log,
loop=loop,
reload=False,
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Carson Anderson <rcanderson23@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_optional_feature
version_added: "2.8"
short_description: Manage optional Windows features
description:
- Install or uninstall optional Windows features on non-Server Windows.
- This module uses the C(Enable-WindowsOptionalFeature) and C(Disable-WindowsOptionalFeature) cmdlets.
options:
name:
description:
- The name(s) of the feature to install.
- This relates to C(FeatureName) in the Powershell cmdlet.
- To list all available features use the PowerShell command C(Get-WindowsOptionalFeature).
type: list
required: yes
state:
description:
- Whether to ensure the feature is absent or present on the system.
type: str
choices: [ absent, present ]
default: present
include_parent:
description:
- Whether to enable the parent feature and the parent's dependencies.
type: bool
default: no
source:
description:
- Specify a source to install the feature from.
- Can either be C({driveletter}:\sources\sxs) or C(\\{IP}\share\sources\sxs).
type: str
seealso:
- module: win_chocolatey
- module: win_feature
- module: win_package
author:
- Carson Anderson (@rcanderson23)
'''
EXAMPLES = r'''
- name: Install .Net 3.5
win_optional_feature:
name: NetFx3
state: present
- name: Install .Net 3.5 from source
win_optional_feature:
name: NetFx3
source: \\share01\win10\sources\sxs
state: present
- name: Install Microsoft Subsystem for Linux
win_optional_feature:
name: Microsoft-Windows-Subsystem-Linux
state: present
register: wsl_status
- name: Reboot if installing Linux Subsytem as feature requires it
win_reboot:
when: wsl_status.reboot_required
- name: Install multiple features in one task
win_optional_feature:
name:
- NetFx3
- Microsoft-Windows-Subsystem-Linux
state: present
'''
RETURN = r'''
reboot_required:
description: True when the target server requires a reboot to complete updates
returned: success
type: bool
sample: true
'''
|
'''
Holds some long-running (in milliseconds) test to demo the time chart a bit
'''
import time
import pytest
def test_50():
time.sleep(0.05)
@pytest.mark.parametrize('run', range(10))
def test_10(run):
time.sleep(0.01)
|
# Extended from: https://github.com/mlmed/torchxrayvision/blob/master/torchxrayvision/datasets.py
from skimage.io import imread
import os, os.path
import numpy as np
import pandas as pd
import torchxrayvision as xrv
from torchxrayvision.datasets import normalize, Dataset
import padchest_config
datapath = os.path.dirname(os.path.realpath(__file__))
class PC_Dataset_Custom(Dataset):
"""
PadChest dataset
Hospital San Juan de Alicante - University of Alicante
PadChest: A large chest x-ray image dataset with multi-label annotated reports.
Aurelia Bustos, Antonio Pertusa, Jose-Maria Salinas, and Maria de la Iglesia-Vayá.
arXiv preprint, 2019. https://arxiv.org/abs/1901.07441
Dataset website:
http://bimcv.cipf.es/bimcv-projects/padchest/
"""
def __init__(self, imgpath,
csvpath=os.path.join(datapath, "PADCHEST_chest_x_ray_images_labels_160K_01.02.19_DifferentialDiagnosis.csv"),
views=["PA"],
transform=None,
data_aug=None,
flat_dir=True,
seed=0,
unique_patients=True):
super(PC_Dataset_Custom, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.pathologies = sorted(padchest_config.pathologies)
mapping = dict()
mapping["Infiltration"] = ["infiltrates",
"interstitial pattern",
"ground glass pattern",
"reticular interstitial pattern",
"reticulonodular interstitial pattern",
"alveolar pattern",
"consolidation",
"air bronchogram"]
mapping["Pleural_Thickening"] = ["pleural thickening"]
mapping["Consolidation"] = ["air bronchogram"]
mapping["Hilar Enlargement"] = ["adenopathy",
"pulmonary artery enlargement"]
mapping["Support Devices"] = ["device",
"pacemaker"]
mapping["Tube'"] = ["stent'"] ## the ' is to select findings which end in that word
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.flat_dir = flat_dir
self.csvpath = csvpath
self.check_paths_exist()
self.csv = pd.read_csv(self.csvpath, low_memory=False)
# standardize view names
self.csv.loc[self.csv["Projection"].isin(["AP_horizontal"]),"Projection"] = "AP Supine"
# Keep only the specified views
if type(views) is not list:
views = [views]
self.views = views
self.csv["view"] = self.csv['Projection']
self.csv = self.csv[self.csv["view"].isin(self.views)]
# remove null stuff
self.csv = self.csv[~self.csv["Labels"].isnull()]
self.csv = self.csv[~self.csv["ImageID"].isin(padchest_config.missing_files)]
if unique_patients:
self.csv = self.csv.groupby("PatientID").first().reset_index()
# filter out age < 10 (paper published 2019)
self.csv = self.csv[(2019-self.csv.PatientBirth > 10)]
# Get our classes.
self.labels = []
for pathology in self.pathologies:
mask = self.csv["Labels"].str.contains(pathology.lower())
if pathology in mapping:
for syn in mapping[pathology]:
#print("mapping", syn)
mask |= self.csv["Labels"].str.contains(syn.lower())
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
########## add consistent csv values
# patientid
self.csv["patientid"] = self.csv["PatientID"].astype(str)
print('Pathologies:', self.pathologies)
output_dir = './outputs'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_file = output_dir + os.sep + 'class_names.txt'
with open(output_file, 'w') as f:
f.writelines(str(self.pathologies))
def string(self):
return self.__class__.__name__ + " num_samples={} views={} data_aug={}".format(len(self), self.views, self.data_aug)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {}
sample["idx"] = idx
sample["lab"] = self.labels[idx]
if self.flat_dir:
# Standard directory structure
imgid = self.csv['ImageID'].iloc[idx]
else:
# Custom directory structure is folder / filename
imgid = str(self.csv['ImageDir'].iloc[idx]) + os.sep + self.csv['ImageID'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid)
img = imread(img_path)
sample["img"] = normalize(img, maxval=65535, reshape=True)
if self.transform is not None:
sample["img"] = self.transform(sample["img"])
if self.data_aug is not None:
sample["img"] = self.data_aug(sample["img"])
return sample
|
import math
def add(operand1, operand2):
"Takes two integer or float and returns the sum of the two numbers."
return operand1 + operand2
def subtract(operand1, operand2):
"Takes two integer or float and returns the difference of first from the second."
return operand1 - operand2
def multiply(operand1, operand2):
"Takes two integer or float and returns the product of the two numbers."
return operand1 * operand2
def divide(operand1, operand2):
"Takes two integer or float and returns the division result of first by second."
return operand1 / operand2
def sine(operand1):
"Takes one argument in radians and returns the sine of that operand."
return math.sin(operand1)
def cosine(operand1):
"Takes one argument in radians and returns the cosine of that operand."
return math.cos(operand1)
def power(operand1, operand2):
"Takes two integer or float and returns the division result first / raised to the power of second."
return operand1 ** operand2
def squareroot(operand1):
"Takes one argument and returns its squareroot"
return math.sqrt(operand1)
|
#!/usr/bin/env python3
providers = [
"0x29e613b04125c16db3f3613563bfdd0ba24cb629", # A
"0x1926b36af775e1312fdebcc46303ecae50d945af", # B
"0x4934a70ba8c1c3acfa72e809118bdd9048563a24", # C
"0x51e2b36469cdbf58863db70cc38652da84d20c67", # D
]
requesters = [ # should not contain providers
"0x378181ce7b07e8dd749c6f42772574441b20e35f",
"0x4cd57387cc4414be8cece4e6ab84a7dd641eab25",
"0x02a07535bc88f180ecf9f7c6fd66c0c8941fd7ab",
"0x90b25485fcbde99f3ca4864792947cfcfb071de6",
"0x449ecd91143d77cfa9fbd4a5ba779a0911d21423",
"0x1397e8bf32d57b46f507ff2e912a29cd9aa78dcd",
"0xdce54cfd06e7ccf5f2e7640e0007ba667190e38e",
"0x5affc0638b7b311be40a0e27ed5cd7c133c16e64",
"0x904c343addd9f21510e711564dbf52d2a0daf7e3",
"0x17b4ec0bcd6a8f386b354becd44b3c4813448184",
"0x6e89235ddcc313a8184ffa4cea496d0f42f1f647",
"0x76f47f566845d7499c058c3a36ccd2fe5695c9f7",
"0xb0c24da05236caae4b2ee964052aa917eb3927ed",
"0x1176e979939e9a0ea65b9ece552fe413747243dc",
"0x24056c57e3b0933d2fa7d83fb9667d0efdfae64d",
"0x5c2719b9f74dba2feaefb872ffaf6a375c8e70f9",
"0x30f02cecf3e824f963cfa05270c8993a49703d55",
"0x44d85663b00117e38a9d6def322fb09dc40b6839",
"0x78bc08e70dce53f7823456e34610bc55828373af",
"0x7293d2089b6f6e814240d21dc869cc88a3471839",
"0x141c01e36d4e908d42437e203442cd3af40b4d79",
"0x782cf10b0c7393c0c98587277bfc26e73d3d0ca2",
"0x66b7bf743b17f05f8a5afff687f085dc88ed3515",
"0xbe2a4e39b6853086aab31d4003e2d1fa67561eae",
"0xbc28a0dab6eaef04574e2116059bb9102aa31e42",
"0x8d9fc9ad8d51647a12b52d74cfab6f5948687084",
"0x67613da8568ae0b0e76796ec8c45b50c469e3f30",
"0x71cef32e92caacee9bb3b020e2c1b2007d360c26",
"0x90018b8b5ccf4c291b64d85fdd3d9e179706da26",
"0xd3ff698232ac001ce663992b316510a50e93e460",
"0x4c2aebf67f8cfce387ad0ee3774bb193c4a62ef6",
"0x6d00e4661e8407f24b2942149ea4a9d445d10038",
"0x92086404471d0e5453f856363358967308b37cd5",
"0xf6261f330f75052c84022bf3c165153c36d0fcdc",
"0xd1e34cbda0d77407bbe76cce19d2e11257d00a1b",
"0x00ddbcfcee1e3222fa8abc2b2b9d319b61993e27",
"0x6f8d2420c6917bb5bb84db7bc615b9893aa30cb3",
"0xa0cadcbabd102b7966b82f3556c436e5d88daf07",
"0xada6c7bc6c6e0004b407733e3993f65397b321ab",
"0x740fcbc6d4e7e5102b8cba29370b93c6de4c786e",
"0x2cc6d89cda9db6592f36d548d5ced9ec27a80d5c",
"0x82b506dee5091b58a85b52bc31681f29d2c55584",
"0x12ba09353d5c8af8cb362d6ff1d782c1e195b571",
"0x53cf1a8d668ef8e0de626ceed58e1766c08bb625",
"0xcf1c9e9f33d3e439e51d065e0ebfccad6850cbd9",
"0x945ec7ca40720991f4a387e1b19217fbff62cbde",
"0xa61bb920ef738eab3d296c0c983a660f6492e1af",
"0xf4c8345baab83d92a88420b093a96dcdb08705de",
"0xd00440d20faba4a08b3f80f1596420ae16a9910b",
"0xe01eda38f7b5146463872f0c769ac14885dbf518",
"0x5ae3a2b79537f849eabb235401645982a2b1d7bd",
"0xc660aba0006e51cd79428f179467a8d7fbcf90f7",
"0x64b570f0e7c019dc750c4a75c33dca55bdc51845",
"0xf6d9f88fa98d4dc427ffdb1bdf01860fd12c98c7",
"0x865fdb0532b1579ee4eebf0096dbde06f1548a36",
"0x676b8d6e031a394c079fc8fee03ad2974ef126f5",
"0x77c0b42b5c358ff7c97e268794b9ff6a278a0f1e",
"0x6579dac76f0a009f996270bd1b7716ed72cdb2ce",
"0x7621e307269e5d9d4d27fd1c24425b210048f486",
"0xe72307313f8b8f96cfcd99ecef0f1ab03d28be5d",
"0xfe0f02d3d387eec745090c756a31a3c3c2bf32cf",
"0x831854a093f30acb032ab9eeaeb715b37ee1bb03",
"0x28fe7b65c3846541d6d17271585792805ae280f7",
"0xfce73328daf1ae024d0a724c595e1d5b2ac8aecb",
"0x805332ee379269b087c8c1b96adb0f398d53e46f",
"0xe953a99ff799e6da23948ca876fce3f264447de8",
"0x55414e26855c90056b3b54f797b5c7e6558146b3",
"0x1099f0f45f5f2040cc408b062557a31bfedd00d6",
"0x7553a853a45358103ac9650d50d4a15ade1038e3",
"0x5170a965650fc8704b748548c97edb80fec5efd3",
"0xc23aae5daa9e66d6db8496ada096b447297cbddd",
"0xc60409093861fe191ae851572a440deb42818a63",
"0xa47a5e1206e3b8c5fca11f5064d4c0d35e2fd240",
"0xf767b0567a2c4c6c4b4d071746b68198dddb7202",
"0x382f1e1fe7680315053ea1c489b4fc003ff9ad64",
"0x3d50f068445c457c0c38d52980a5e6442e780d89",
"0x8b7356a95c8ba1846eb963fd127741730f666ba8",
"0x4d96ba91f490eca2abd5a93f084ad779c54656aa",
"0x1b8a68bc837f5325bdc57804c26922d98a0332ab",
"0x4b419991c9b949b20556ab9ad13c5d54354f601f",
"0x13fe0b65bdd902d252d0b431aec6acf02a0b2f41",
"0x96963310153ec9a47a19312719a98cc08041134d",
"0x2c73a80956516ba9e6005030faed2f8212bc10a3",
"0xb9a498c3a76049bffc96c898605758620f459244",
"0xf8b8cac4f133b039d1990e9d578289c32ff774de",
"0xfb674750e02afa2f47f26a2d59ea1fe20444b250",
"0x6b781119b8ff1c7585f97caf779be6a80a88daf0",
"0xcaa482f269dd0926fdfd5c48e3a22b621f9d1a09",
"0x446aa04436809c130eab8e33ce9f5d3c80564fb7",
"0xd1806d39a8c2cd7469048f6d99c26cb66cd46f83",
"0xee516025bf14f7118aa9ced5eb2adacfd5827d14",
"0x1d16cfb236f5830875716c86f600dd2ee7456515",
"0xe2969f599cb904e9a808ec7218bc14fcfa346965",
"0x0636278cbd420368b1238ab204b1073df9cc1c5c",
"0x72c1a89ff3606aa29686ba8d29e28dccff06430a",
"0x168cb3240a429c899a74aacda4289f4658ec924b",
"0x08b003717bfab7a80b17b51c32223460fe9efe2a",
"0x4aae9220409e1c4d74ac95ba14edb0684a431379",
"0xab608a70d0b4a3a288dd68a1661cdb8b6c742672",
]
extra_requesters = [
"0xe2e146d6b456760150d78819af7d276a1223a6d4",
"0xa9fc23943e48a3efd35bbdd440932f123d05b697",
"0x5b235d87f3fab61f87d238c11f6790dec1cde736",
"0xe03914783115e807d8ea1660dbdcb4f5b2f969c0",
"0x85fa5e6dd9843cce8f67f4797a96a156c3c79c25",
]
for provider in providers:
if provider in requesters:
raise Exception(f"Provider {provider}, is in the requester list fix it")
if provider in extra_requesters:
raise Exception(f"Provider {provider}, is in the extra_requesters list fix it")
|
import aiohttp
import config
async def get_youtube_videos(query: str, res_num: int):
async with aiohttp.ClientSession() as session:
youtube_url = f'https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=5&q={query}&type=video&key={config.YOUTUBE_API_KEY}'
async with session.get(youtube_url) as response:
yt_json = await response.json()
if not yt_json:
return None
return yt_json
async def get_touhouwiki_query(query: str):
async with aiohttp.ClientSession() as session:
touhouwiki_url = f'https://en.touhouwiki.net/api.php?action=query&generator=search&prop=info&inprop=url&format=json&gsrsearch={query}'
async with session.get(touhouwiki_url) as response:
thw_json = await response.json()
if not thw_json:
return None
return thw_json
|
# -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''Chrome WebView实现
'''
import time
import logging
from qt4c.webview.base import WebViewBase
from qt4c import qpath
from qt4w.webdriver.webkitwebdriver import WebkitWebDriver
from qt4c.webview.chromewebview.chromedriver import ChromeDriver
class ChromeWebView(WebViewBase):
'''Chrome WebView实现
'''
def __init__(self, page_wnd, url, pid, port=9200):
self._port = port
self._url = url
self._pid = pid
self._window = page_wnd
super(ChromeWebView, self).__init__(self._window, WebkitWebDriver(self))
self._driver = ChromeDriver(self._port).get_debugger(self._url)
self._frame_dict = {} # 缓存frame xpath和frame id的对应关系
self._browser_type = 'chrome'
def _get_frame(self, parent, name, url):
'''根据frame的name和url获取frameTree节点
:param parent 要查找的frameTree节点
:type parent dict
:param name: frame的id或name属性
:type name: string
:param url: frame的url
:type url: string
'''
if 'childFrames' in parent:
for child in parent['childFrames']:
if (name and child['frame']['name'] == name) or (url and child['frame']['url'] == url):
return child
else:
logging.info('[ChromeWebView] get_frame %s' % parent)
# raise RuntimeError('find frame of %s failed' % ((('name=%s' % name) if name else ('url=%s' % url))))
return None
def _get_frame_id_by_xpath(self, frame_xpaths):
'''根据XPath对象查找frame id
:param frame_xpaths: frame的xpath数组
:type frame_xpaths: list
'''
frame_xpaths_str = ''.join(frame_xpaths)
if frame_xpaths_str in self._frame_dict: return self._frame_dict[frame_xpaths_str]
# 缓存中不存在时
timeout = 10
time0 = time.time()
while time.time() - time0 < timeout:
frame_tree = self._driver.get_frame_tree()
frame_id = frame_tree['frame']['id']
if len(frame_xpaths) == 0:
# 顶层frame
self._frame_dict[frame_xpaths_str] = frame_id
else:
frame_exist = True
for i in range(len(frame_xpaths)):
try:
name, url = self._webdriver._get_frame_info(frame_xpaths[:i + 1])
except Exception as e:
logging.error('[ChromeWebView] _get_frame_info_error %s' % e)
frame_exist = False
break
frame_tree = self._get_frame(frame_tree, name, url)
if frame_tree == None:
# 未找到frame
frame_exist = False
break
self._frame_dict[frame_xpaths_str] = frame_tree['frame']['id']
if not frame_exist:
time.sleep(0.5)
continue
return self._frame_dict[frame_xpaths_str]
def eval_script(self, frame_xpaths, script):
'''在指定frame中执行JavaScript,并返回执行结果
:param frame_xpaths: frame元素的XPATH路径,如果是顶层页面,则传入“[]”或者是frame id
:type frame_xpaths: list or string
:param script: 要执行的JavaScript语句
:type script: string
'''
from qt4c.webview.chromewebview.chromedriver import ChromeDriverError
timeout = 10
time0 = time.time()
while time.time() - time0 < timeout:
if isinstance(frame_xpaths, list):
frame_id = self._get_frame_id_by_xpath(frame_xpaths)
else:
frame_id = frame_xpaths
try:
result = self._driver.eval_script(frame_id, script)
break
except (ChromeDriverError, RuntimeError) as e:
if isinstance(e, RuntimeError):
del self._frame_dict[''.join(frame_xpaths)]
time.sleep(0.5)
elif e.code == -32000:
# Execution context with given id not found.
time.sleep(0.5)
else:
raise e
else:
raise RuntimeError('执行JavaScript代码失败')
return self._handle_result(result, frame_xpaths)
def click(self, x_offset, y_offset):
'''Chrome中按住shift键点击,以便在新窗口中打开页面
'''
from qt4c.keyboard import Key
js_tmpl = '''(function(x, y){
var getNodeByPos = function(tag, x, y){
var nodes = document.getElementsByTagName(tag);
for(var i=0;i<nodes.length;i++){
var rect = nodes[i].getBoundingClientRect();
if(rect.left <= x && rect.right >= x && rect.top <= y && rect.bottom >= y) return nodes[i];
}
return null;
};
var getTargetFrame = function(x, y){
var frame = getNodeByPos('iframe', x, y);
if(frame != null){
// target node is in iframe
var result = new Array();
var rect = frame.getBoundingClientRect();
var scale = qt4w_driver_lib.getScale();
result.push(rect.left*scale);
result.push(rect.top*scale);
var name = frame.getAttribute('name');
if(name != null){
result.push('//iframe[@name="'+name+'"]');
return result.toString();
}
var id = frame.getAttribute('id');
if(id != null){
result.push('//iframe[@id="'+id+'"]');
return result.toString();
}
var src = frame.getAttribute('src');
if(src == '' || src == 'about:blank') return '';
result.push('//iframe[@src="' + src + '"]');
return result.toString();
}
return '';
};
var isButton = function(x,y){
var button = getNodeByPos('button',x,y)
return (button != null)
};
var isNewTabLinkNode = function(x, y){
var frame_info = getTargetFrame(x, y);
if(frame_info != '') return frame_info;
var node = getNodeByPos('a', x, y);
if(node == null) return null;
//console.log(node.outerHTML);
return node.getAttribute('target') == '_blank';
};
var scale = qt4w_driver_lib.getScale();
return isNewTabLinkNode(x/scale, y/scale)||isButton(x/scale, y/scale);
})(%f, %f);'''
x = x_offset
y = y_offset
frame_xpaths = []
new_tab_flag = False
while True:
result = self.eval_script(frame_xpaths, js_tmpl % (x, y))
if ',' in result:
# in frame
result = result.split(',')
frame_xpaths.append(result[2])
x -= float(result[0])
y -= float(result[1])
else:
new_tab_flag = result == 'true'
break
shift_key = Key(16) # SHIFT
if new_tab_flag: shift_key._inputKey(False)
super(ChromeWebView, self).click(x_offset, y_offset)
if new_tab_flag: shift_key._inputKey(True)
def _get_pid_by_port(port):
'''利用端口,获取对应端口的进程id
'''
import subprocess
import time
import re
timeout = 10
start = time.time()
cur = start
command = "netstat -aon | findstr %d" % port
sub_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
while 1:
ret = sub_proc.poll()
if ret is None:
time.sleep(0.2)
cur = time.time()
if(cur > start + timeout):
sub_proc.kill()
break
elif ret == 0:
break
else:
break
content = sub_proc.stdout.read().rstrip()
# print content
result = re.search('LISTENING\s+(\d+)$', content)
if result:
content = result.group(0)
result = re.search('\d+$', content)
return int(result.group(0))
else:
raise RuntimeError("The specified port %d does not bind to a process" % port)
def get_pid_by_port(port):
'''增加延时和重试机制,防止网络初始化太慢导致的查找失败
'''
import time
try_count = 3
for i in range(try_count):
if i >= 3:
break
else:
try:
pid = _get_pid_by_port(port)
return pid
except RuntimeError:
time.sleep(2)
except Exception as err:
raise err
if __name__ == '__main__':
pass
|
###########################
#
# #558 Irrational base - Project Euler
# https://projecteuler.net/problem=558
#
# Code by Kevin Marciniak
#
###########################
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT style dataset."""
import os
import time
import numpy as np
import torch
from megatron import mpu, print_rank_0
from megatron.data.blendable_dataset import BlendableDataset
from megatron.data.dataset_utils import get_datasets_weights_and_num_samples
from megatron.data.dataset_utils import get_train_valid_test_split_
from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
def build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(data_prefix[0],
data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix,
train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
prefixes[i], data_impl, splits_string,
datasets_train_valid_test_num_samples[i],
seq_length, seed, skip_warmup)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset,
blending_test_dataset)
def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string,
train_valid_test_num_samples,
seq_length, seed, skip_warmup):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix,
data_impl,
skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1],
step=1, dtype=np.int32)
dataset = GPTDataset(name, data_prefix,
documents, indexed_dataset,
train_valid_test_num_samples[index],
seq_length, seed)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
"""Build indexed dataset."""
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix,
data_impl,
skip_warmup)
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' number of documents: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
class GPTDataset(torch.utils.data.Dataset):
def __init__(self, name, data_prefix, documents, indexed_dataset,
num_samples, seq_length, seed):
self.name = name
self.indexed_dataset = indexed_dataset
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name, data_prefix, documents, self.indexed_dataset.sizes,
num_samples, seq_length, seed)
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def __getitem__(self, idx):
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f,
length=offset_l - offset_f + 1)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f],
offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(self.indexed_dataset.get(
self.doc_idx[doc_index_l],
length=offset_l + 1))
sample = np.concatenate(sample_list)
return {'text': np.array(sample, dtype=np.int64)}
def _build_index_mappings(name, data_prefix, documents, sizes,
num_samples, seq_length, seed):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
if (not os.path.isfile(doc_idx_filename)) or \
(not os.path.isfile(sample_idx_filename)) or \
(not os.path.isfile(shuffle_idx_filename)):
print_rank_0(' > WARNING: could not find index map files, building '
'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting '
'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - 1) // seq_length
last_epoch_num_samples = num_samples - \
num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, \
'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - 1) // seq_length
assert last_epoch_num_samples < (num_samples_per_epoch + 1), \
'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = (last_epoch_num_samples <
int(0.80 * num_samples_per_epoch))
if separate_last_epoch:
string = ' > last epoch number of samples ({}) is smaller '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to True'
else:
string = ' > last epoch number of samples ({}) is larger '\
'than 80% of number of samples per epoch ({}), '\
'setting separate_last_epoch to False'
print(string.format(last_epoch_num_samples,
num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng,
separate_last_epoch)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
from megatron.data import helpers
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
sample_idx = helpers.build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_,
sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
print_rank_0(' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time))
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size() //
torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()))
# Load mappings.
start_time = time.time()
print_rank_0(' > loading doc-idx mapping from {}'.format(
doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading sample-idx mapping from {}'.format(
sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' > loading shuffle-idx mapping from {}'.format(
shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(
time.time() - start_time))
print_rank_0(' total number of samples: {}'.format(
sample_idx.shape[0]))
print_rank_0(' total number of epochs: {}'.format(num_epochs))
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - 1) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0:len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
np_rng.shuffle(doc_idx)
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs-1, np_rng, False)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length,
num_epochs, tokens_per_epoch):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
num_samples = (num_epochs * tokens_per_epoch - 1) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + 1
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += (remaining_seq_length + doc_length - 1)
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(' > building shuffle index with split [0, {}) and [{}, {}) '
'...'.format(num_samples, num_samples, total_size), flush=True)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size,
step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
|
from background_task import background
from logging import getLogger
logger = getLogger(__name__)
@background(schedule=60)
def demo_task():
print('This sentence should be printed per 10 seconds')
|
from math import pi
while True:
r, m, c = list(map(float, input().split()))
if r == 0:
break
a = pi * (r**2)
e = c / m * (4 * (r**2))
print("{0} {1}".format(a, e))
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_Classification.ipynb (unless otherwise specified).
__all__ = ['convert_str', 'scaler', 'comb', 'rf_colselector', 'corr_colselector', 'ColProcessor', 'interaction_feats',
'poly_feats', 'pca_feats', 'clubbed_feats', 'preprocess', 'final_preprocessor', 'combined_metrics',
'confusion_matrix_plot', 'to_excel', 'get_table_download_link', 'GNB', 'LogisticReg', 'KNN', 'SVM', 'DT',
'RF', 'GB', 'ERT', 'XGB', 'SGD', 'NN', 'data', 'test_data']
# Cell
import streamlit as st
import streamlit.components.v1 as components
from pdpbox import pdp
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import shap
# load JS visualization code to notebook
shap.initjs()
import base64
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris, load_digits
#Simple Classifiers
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
#Tree based Classifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from xgboost import XGBClassifier
#Gradient Based Classifier
from sklearn.linear_model import SGDClassifier
from sklearn.neural_network import MLPClassifier
#Preprocessing packages
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import *
from sklearn.decomposition import PCA
#Metrics
from sklearn import metrics
from sklearn.metrics import *
from sklearn.model_selection import GridSearchCV
import random
from sklearn.inspection import plot_partial_dependence
import os
import base64
from io import BytesIO
def convert_str(a):
a = str(a)
return a
def scaler(scaling_scheme='standard_scaler'):
if scaling_scheme == 'max_abs_scaler':
scal = MaxAbsScaler()
elif scaling_scheme == 'min_max_scaler':
scal = MinMaxScaler()
elif scaling_scheme == 'normalizer':
scal = Normalizer()
elif scaling_scheme == 'quantile_transformer':
scal = QuantileTransformer()
elif scaling_scheme == 'robust_scaler':
scal = RobustScaler()
elif scaling_scheme == 'power_transformer':
scal = PowerTransformer()
elif scaling_scheme == 'standard_scaler':
scal = StandardScaler()
return scal
def comb(X, pairwise_linear=False, pairwise_product=False):
from itertools import combinations
X_copy = X.copy()
columns = [str(i) for i in X.columns]
X.columns = columns
comb = combinations(columns, 2)
# Print the obtained combinations
if pairwise_linear:
for i in list(comb):
a = i[0]
b = i[1]
col_name_add = a+'+'+b
X_copy[col_name_add] = X[a]+X[b]
col_name_sub = a+'-'+b
X_copy[col_name_sub] = X[a]-X[b]
if pairwise_product:
comb = combinations(columns, 2)
# Print the obtained combinations
for i in list(comb):
a = i[0]
b = i[1]
col_name = a+'*'+b
X_copy[col_name] = X[a]*X[b]
return X_copy
def rf_colselector(X_train, y_train, no_of_cols, n_estimators=100):
rf = RandomForestClassifier(n_estimators=n_estimators)
rf.fit(X_train, y_train)
importance = rf.feature_importances_
df_importance = pd.DataFrame(importance, index = X_train.columns, columns = ['importance'])
importance_sorted = df_importance.sort_values(by=['importance'], ascending=False)
selected_columns = importance_sorted[:no_of_cols].index
return selected_columns
def corr_colselector(X_train, y_train, threshold):
d = pd.concat([X_train, y_train.reset_index(drop=True)], axis=1)
columns = d.corr().iloc[:, -1][np.logical_or((d.corr().iloc[:, -1] > threshold), (d.corr().iloc[:, -1] < -threshold))].index
return columns[:-1], d.corr()
class ColProcessor():
def __init__(self, cardinality, rf_col=False, corr_col=False, label_enc=False, interaction_only=False, poly_feat=False):
self.rf_col = rf_col
self.corr_col = corr_col
self.label_enc = label_enc
self.interaction_only = interaction_only
self.poly_feat = poly_feat
self.cardinality = cardinality
def fit(self, X, y=None):
categorical_cols = [cname for cname in X.columns if X[cname].nunique() < self.cardinality and
X[cname].dtype == "object"]
numerical_cols = [cname for cname in X.columns if X[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
self.categorical_cols = categorical_cols
self.numerical_cols = numerical_cols
self.my_cols = my_cols
X = X[my_cols].copy()
imputer_num = SimpleImputer(strategy='constant')
X_dum = imputer_num.fit_transform(X[self.numerical_cols])
self.imputer_num = imputer_num
if self.categorical_cols:
imputer_cat = SimpleImputer(strategy='most_frequent')
X_cat = imputer_cat.fit_transform(X[self.categorical_cols])
self.imputer_cat = imputer_cat
if not self.label_enc:
Ohe = OneHotEncoder(handle_unknown='ignore')
Ohe.fit(X_cat)
self.Ohe = Ohe
else:
OrdEnc = OrdinalEncoder(handle_unknown='ignore')
X_cat = OrdEnc.fit(X_cat)
self.OrdEnc = OrdEnc
return self
def transform(self, X, y=None):
X_num = pd.DataFrame(data=self.imputer_num.transform(X[self.numerical_cols]), columns=self.numerical_cols)
if self.categorical_cols:
if not self.label_enc:
X_cat = pd.DataFrame(data=self.Ohe.transform(self.imputer_cat.transform(X[self.categorical_cols])).toarray(),
columns=self.Ohe.get_feature_names(input_features=self.categorical_cols))
data = pd.concat([X_cat, X_num], axis = 1)
else:
X_cat = pd.DataFrame(self.OrdEnc.transform(self.imputer_cat.transform(X[self.categorical_cols])), columns=self.categorical_cols)
data = pd.concat([X_cat.reset_index(drop=True), X_num], axis = 1)
else:
data = X_num
return data, X_num
def interaction_feats(X):
interaction = PolynomialFeatures(2, interaction_only=True)
interaction.fit(X)
X_interaction = pd.DataFrame(data=interaction.transform(X), columns=interaction.get_feature_names(X.columns))
return X_interaction
def poly_feats(X):
poly = PolynomialFeatures(2)
poly.fit(X)
X_poly = pd.DataFrame(data=poly.transform(X), columns=poly.get_feature_names(X.columns))
return X_poly
def pca_feats(X, n_comp):
pca = PCA(n_components=n_comp)
pca.fit(X)
X_pca = pd.DataFrame(data=pca.transform(X))
return X_pca
def clubbed_feats(X, polynomial_features, interaction_only, pca_on):
if polynomial_features:
X = poly_feats(X)
elif interaction_only:
X = interaction_feats(X)
if pca_on:
X = pca_feats(X, 100)
return X
def preprocess(X_train,
y_train,
X_valid,
X_test=None,
rf_col_selection=False,
rf_no_of_cols=20,
rf_n_estimators=100,
corr_col_selection=False,
corr_threshold=0.01,
pairwise_linear=False,
pairwise_product=False):
X_train = comb(X=X_train, pairwise_linear=pairwise_linear, pairwise_product=pairwise_product)
X_valid = comb(X=X_valid, pairwise_linear=pairwise_linear, pairwise_product=pairwise_product)
if type(X_test)!=type(None):
X_test = comb(X=X_test, pairwise_linear=pairwise_linear, pairwise_product=pairwise_product)
return X_train, X_valid, X_test
def final_preprocessor(X_train,
y_train,
X_valid,
X_test=None,
rf_col_selection=False,
rf_no_of_cols=20,
rf_n_estimators=100,
corr_col_selection=False,
corr_threshold=0.01,
pairwise_linear=False,
pairwise_product=False,
cardinality=100,
polynomial_features=False,
interaction_only=False,
pca_on=False,
label_enc=False
):
col = ColProcessor(cardinality=100, label_enc=label_enc)
col.fit(X_train)
data_train, X_train_num = col.transform(X_train)
data_valid, X_valid_num = col.transform(X_valid)
if type(X_test)!=type(None):
data_test, X_test_num = col.transform(X_test)
else:
X_test_num = None
X_train_num = clubbed_feats(X_train_num,
polynomial_features=polynomial_features,
interaction_only=interaction_only,
pca_on=pca_on)
X_valid_num = clubbed_feats(X_valid_num,
polynomial_features=polynomial_features,
interaction_only=interaction_only,
pca_on=pca_on)
if type(X_test)!=type(None):
X_test_num = clubbed_feats(X_test_num,
polynomial_features=polynomial_features,
interaction_only=interaction_only,
pca_on=pca_on)
train, valid, test = preprocess(X_train_num,
y_train,
X_valid_num,
X_test_num,
rf_col_selection=rf_col_selection,
rf_no_of_cols=rf_no_of_cols,
rf_n_estimators=rf_n_estimators,
corr_col_selection=corr_col_selection,
corr_threshold=corr_threshold,
pairwise_linear=pairwise_linear,
pairwise_product=pairwise_product
)
if col.categorical_cols:
if not label_enc:
Ohe_cat_cols = col.Ohe.get_feature_names(col.categorical_cols)
train = pd.concat([train, data_train[Ohe_cat_cols]], axis=1)
valid = pd.concat([valid, data_valid[Ohe_cat_cols]], axis=1)
if type(X_test)!=type(None):
test = pd.concat([test, data_test[Ohe_cat_cols]], axis=1)
else:
train = data_train
valid = data_valid
if type(X_test)!=type(None):
test = data_test
if rf_col_selection:
columns_selected = rf_colselector(train,
y_train,
no_of_cols=rf_no_of_cols,
n_estimators=rf_n_estimators)
train = train[columns_selected]
valid = valid[columns_selected]
if type(X_test)!=type(None):
test = test[columns_selected]
if corr_col_selection:
corr_cols, df = corr_colselector(train, y_train, threshold=corr_threshold)
train = train[corr_cols]
valid = valid[corr_cols]
if type(X_test)!=type(None):
test = test[corr_cols]
return train, valid, test, col
def combined_metrics(X_test, y_test, clf):
#to be used in combined metrics function.
# enc = LabelEncoder()
# trans_y_train = enc.fit_transform(y_train)
ohe = OneHotEncoder(handle_unknown='ignore')
ohe.fit(y_train.values.reshape(-1, 1))
y_test_ohe = ohe.transform(y_test.values.reshape(-1, 1)).toarray()
metrics_list = [[accuracy_score(y_test,
clf.predict(X_test))],
[precision_score(y_test, clf.predict(X_test), average = 'micro')],
[recall_score(y_test, clf.predict(X_test), average = 'micro')],
[f1_score(y_test, clf.predict(X_test), average = 'micro')],
[roc_auc_score(y_test_ohe, ohe.transform(clf.predict(X_test).reshape(-1, 1)).toarray(), multi_class='ovr')],
[hamming_loss(y_test, clf.predict(X_test))],
[log_loss(y_test_ohe, ohe.transform(clf.predict(X_test).reshape(-1, 1)).toarray())]
]
index = ['Accuracy', 'Precision', 'Recall', 'F1 Score', 'ROC AUC', 'Hamming Loss', 'Log Loss']
# index = ['Accuracy', 'Precision', 'Recall', 'F1 Score', 'Hamming Loss', 'Log Loss']
# index = ['Accuracy', 'Precision', 'Recall', 'F1 Score', 'ROC AUC', 'Hamming Loss']
df_metric = pd.DataFrame(metrics_list, index = index, columns = ['Value'])
return df_metric
def confusion_matrix_plot(cm, class_names, title = 'Confusion Matrix Plot'):
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap = 'Blues_r')
plt.title(title)
plt.ylabel('True')
plt.xlabel('Predicted')
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(len(class_names)):
for j in range(len(class_names)):
plt.text(j,i, str(cm[i][j]))
plt.show()
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index = False, sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
format1 = workbook.add_format({'num_format': '0.00'}) # Tried with '0%' and '#,##0.00' also.
worksheet.set_column('A:A', None, format1) # Say Data are in column A
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="Your_File.xlsx">Download output file</a>' # decode b'abc' => abc
def GNB():
gnb_params = {'clf__estimator':[GaussianNB()]
}
return gnb_params
def LogisticReg():
lr_params = {'clf__estimator': [LogisticRegression()]
}
st.subheader('Logistic Regression')
penalty = st.multiselect('Penalty', ['l1', 'l2'], ['l2'])
reg = st.multiselect('C', [0.1, 1.0, 2.0], [1.0])
solver = st.multiselect('Solver', ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga'], ['liblinear'])
lr_params['clf__estimator__penalty'] = penalty
lr_params['clf__estimator__C'] = reg
lr_params['clf__estimator__solver'] = solver
return lr_params
def KNN():
knn_params = {'clf__estimator': [KNeighborsClassifier()]
}
st.subheader('KNN')
n_neighbors = st.multiselect('Neighbors', list(range(1,30)), [5])
leaf_size = st.multiselect('Leaf Size', list(range(1,50)), [30])
p_distance = st.multiselect('Distance Metric', [1,2], [2])
knn_params['clf__estimator__n_neighbors'] = n_neighbors
knn_params['clf__estimator__leaf_size'] = leaf_size
knn_params['clf__estimator__p'] = p_distance
return knn_params
def SVM():
svm_params = {'clf__estimator': [SVC(probability=True)]
}
st.subheader('Support Vector Machines')
c = st.multiselect('C', [0.1, 1, 10, 100, 1000], [1])
gamma = st.multiselect('Gamma', ['scale', 'auto'], ['scale'])
kernel = st.multiselect('Kernel', ['linear', 'rbf', 'poly', 'sigmoid'], ['rbf'])
svm_params['clf__estimator__C'] = c
svm_params['clf__estimator__gamma'] = gamma
svm_params['clf__estimator__kernel'] = kernel
return svm_params
def DT():
dt_params = {'clf__estimator': [DecisionTreeClassifier()]}
st.subheader('Decision Tree')
criterion = st.multiselect('Criterion', ["gini", "entropy"], ['gini'])
min_samp_split = st.multiselect('Min Samples Split', [2, 10], [2])
max_depth = st.multiselect('Max Depth', [2, 5, 10], [10])
dt_params['clf__estimator__criterion'] = criterion
dt_params['clf__estimator__min_samples_leaf'] = min_samp_split
dt_params['clf__estimator__max_depth'] = max_depth
return dt_params
def RF():
rf_params = {'clf__estimator': [RandomForestClassifier()]
}
st.subheader('Random Forest')
n_estimators = st.multiselect('Number of Trees', [100, 200, 500], [100])
max_features = st.multiselect('Max Features', [2, 10, 'auto', 'sqrt', 'log2'], ['auto'])
max_depth = st.multiselect('Max Depth', [4,5,6,7,8, None], [None])
criterion = st.multiselect('Criteria', ['gini', 'entropy'], ['gini'])
rf_params['clf__estimator__n_estimators'] = n_estimators
rf_params['clf__estimator__max_features'] = max_features
rf_params['clf__estimator__max_depth'] = max_depth
rf_params['clf__estimator__criterion'] = criterion
return rf_params
def GB():
gb_params = {'clf__estimator': [GradientBoostingClassifier()]
}
st.subheader('Gradient Booster')
loss = st.multiselect('Loss Function', ['deviance', 'exponential'], ['deviance'])
learning_rate = st.multiselect('Learning Rate', [0.001, 0.01, 0.1], [0.1])
min_samples_split = st.multiselect('Min Samples Split', list(range(1, 10)), [2])
min_samples_leaf = st.multiselect('Min Samples Leaf', list(range(1, 10)), [1])
max_depth = st.multiselect('Max Depth', [1, 2, 3, 4, 5, 6], [3])
max_features = st.multiselect('Max Features', ['auto', 'log2', 'sqrt', None], [None])
criterion = st.multiselect('Criterion', ['friedman_mse', 'mse', 'mae'], ['friedman_mse'])
subsample = st.multiselect('Subsample', [0.5, 0.618, 0.8, 0.85, 0.9, 0.95, 1.0], [1.0])
n_estimators = st.multiselect('Number of Trees', [50, 100, 150, 200, 250], [100])
gb_params['clf__estimator__loss'] = loss
gb_params['clf__estimator__learning_rate'] = learning_rate
gb_params['clf__estimator__min_samples_split'] = min_samples_split
gb_params['clf__estimator__min_samples_leaf'] = min_samples_leaf
gb_params['clf__estimator__max_depth'] = max_depth
gb_params['clf__estimator__max_features'] = max_features
gb_params['clf__estimator__criterion'] = criterion
gb_params['clf__estimator__subsample'] = subsample
gb_params['clf__estimator__n_estimators'] = n_estimators
return gb_params
def ERT():
ert_params = {'clf__estimator': [ExtraTreesClassifier()]
}
st.subheader('Extra Random Trees')
n_estimators = st.multiselect('Number of Trees', [100, 200, 500, 1000], [100]) #fix
max_depth = st.multiselect('Max Depth', [None, 4, 5, 6, 7, 8, 9], [None]) #fix
min_samples_leaf = st.multiselect('Min Sample per Leaf', [1, 2, 3, 4, 5], [1])
n_jobs = st.selectbox('Parallelism', [1, 2, 3, 4, -1], 4)
ert_params['clf__estimator__n_estimators'] = n_estimators
ert_params['clf__estimator__max_depth'] = max_depth
ert_params['clf__estimator__min_samples_leaf'] = min_samples_leaf
ert_params['clf__estimator__n_jobs'] = [n_jobs]
return ert_params
def XGB():
xgb_params ={'clf__estimator':[XGBClassifier()]
}
st.subheader('XGBoost')
n_estimators = st.multiselect('Number of Trees', list(range(50, 1000, 50)), [50]) #fix
max_depth = st.multiselect('Max Depth', list(range(1, 20)), [6]) #fix
min_child_weight = st.multiselect('Min Child Weight', list(range(1, 10, 1)), [1])
gamma = st.multiselect('Gamma', list(range(0, 10)), [1])
learning_rate = st.multiselect('Learning Rate', [0.01, 0.05, 0.1, 0.2, 0.3], [0.3])
subsample = st.multiselect('Subsample', list(np.divide(range(5, 11), 10)), [1.0])
booster = st.multiselect('Booster', ['gbtree', 'gblinear'], ['gbtree'])
xgb_params['clf__estimator__n_estimators'] = n_estimators
xgb_params['clf__estimator__max_depth'] = max_depth
xgb_params['clf__estimator__min_child_weight'] = min_child_weight
xgb_params['clf__estimator__gamma'] = gamma
xgb_params['clf__estimator__learning_rate'] = learning_rate
xgb_params['clf__estimator__subsample'] = subsample
xgb_params['clf__estimator__booster'] = booster
return xgb_params
def SGD():
sgd_params = {'clf__estimator': [SGDClassifier()]
}
st.subheader('SGD')
loss = st.multiselect('Loss Function', ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'], ['hinge']) #fix
max_iter = st.multiselect('Max Iterations', list(np.multiply(range(5, 16), 100)), [1000]) #fix
tol = st.multiselect('Tolerance', [0.0001, 0.001, 0.05, 0.1], [0.0001])
penalty = st.multiselect('Penalty', ['l2', 'l1', 'elasticnet'], ['l2'])
alpha = st.multiselect('Alpha', [0.0001, 0.001, 0.05, 0.1, 0.2, 0.3], [0.0001])
n_jobs = st.selectbox('Parallelization', [1, 2, 3, 4, -1], 4)
sgd_params['clf__estimator__loss'] = loss
sgd_params['clf__estimator__max_iter'] = max_iter
sgd_params['clf__estimator__tol'] = tol
sgd_params['clf__estimator__penalty'] = penalty
sgd_params['clf__estimator__alpha'] = alpha
sgd_params['clf__estimator__n_jobs'] = [n_jobs]
return sgd_params
def NN():
nn_params = {'clf__estimator': [MLPClassifier()]
}
st.subheader('Neural Network')
solver = st.multiselect('Solver', ['lbfgs', 'sgd', 'adam'], ['adam'])
max_iter = st.multiselect('Max Iterations', [1000,1100,1200,1300,1400], [1000])
alpha = st.multiselect('Alpha', list(10.0 ** -np.arange(1, 10)), [0.0001])
hidden_layer_sizes = st.multiselect('Hidden Layer Sizes', list(range(50, 500, 50)), [100])
# hidden_layer_sizes = st.multiselect('Hidden Layer Sizes', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500] , [100])
nn_params['clf__estimator__solver'] = solver
nn_params['clf__estimator__max_iter'] = max_iter
nn_params['clf__estimator__alpha'] = alpha
nn_params['clf__estimator__hidden_layer_sizes'] = hidden_layer_sizes
return nn_params
data = st.file_uploader('Upload a csv')
test_data = st.file_uploader('Upload a csv for prediction:')
if (data != None) & (test_data != None):
df = pd.read_csv(data)
df_test = pd.read_csv(test_data)
# df = random.shuffle(data)
target_col =st.selectbox('Choose target variable', df.columns)
X = df.drop(target_col, axis = 1)
y = df[target_col]
test_ratio = st.number_input('Enter test split ratio, 0 < ratio < 1', min_value = 0.0,
max_value = 1.0, value = 0.2)
if test_ratio:
X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y,
test_size=test_ratio,
random_state = 0)
selected_models = st.sidebar.multiselect(
'Choose Algorithms:',(
'Gaussian NB',
'Logistic Regression',
'KNN',
'Support Vector Machines',
'Decision Tree',
'Random Forest',
'Gradient Boosting',
'Extra Random Trees',
'XGBoost',
'Stochastic Gradient Descent',
'Neural Network'), ['KNN', 'Support Vector Machines', 'Decision Tree'])
if selected_models:
func_dict = {'Gaussian NB': GNB(),
'Logistic Regression':LogisticReg(),
'KNN': KNN(),
'Support Vector Machines': SVM(),
'Decision Tree': DT(),
'Random Forest': RF(),
'Gradient Boosting': GB(),
'Extra Random Trees': ERT(),
'XGBoost': XGB(),
'Stochastic Gradient Descent': SGD(),
'Neural Network': NN()
}
param_dict = {}
for i in selected_models:
param_dict[i] = func_dict[i]
from sklearn.base import BaseEstimator, ClassifierMixin
class MyClassifier(BaseEstimator, ClassifierMixin):
def __init__(
self,
estimator = XGBClassifier(),
):
"""
A Custom BaseEstimator that can switch between classifiers.
:param estimator: sklearn object - The classifier
"""
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
self.estimator.fit(X, y)
return self
def predict(self, X, y=None):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
def score(self, X, y):
return self.estimator.score(X, y)
@property
def classes_(self):
return self.estimator.classes_
X_train, X_valid, df_test, col = final_preprocessor(X_train_full,
y_train,
X_valid_full,
df_test,
rf_col_selection=True,
rf_no_of_cols=20,
rf_n_estimators=100,
corr_col_selection=True,
corr_threshold=0.2,
pairwise_linear=False,
pairwise_product=False,
cardinality=100,
polynomial_features=False,
interaction_only=False,
pca_on=False
)
data_valid = pd.concat([X_valid, y_valid.reset_index(drop=True)], axis = 1)
my_pipeline = Pipeline([('scaler', scaler(scaling_scheme='power_transformer')),
('clf', MyClassifier())
])
parameters = []
for i in selected_models:
parameters.append(param_dict[i])
st.write(parameters)
train = st.button('Train Model')
if train:
with st.spinner('Training Model...'):
from sklearn.model_selection import GridSearchCV
gscv = GridSearchCV(my_pipeline, parameters, cv=3, n_jobs=-1, return_train_score=False, verbose=3)
gscv.fit(X_train, y_train)
st.text('Best Parameters')
st.write(gscv.best_params_)
st.text('Best Score')
st.write(gscv.best_score_)
st.text('Fit vs Time vs HyperParameters')
data = gscv.cv_results_.values()
columns = gscv.cv_results_.keys()
df_fit = pd.DataFrame(data, columns).T
df_fit['param_clf__estimator'] = df_fit['param_clf__estimator'].apply(convert_str)
st.write(df_fit)
st.text('Prediction on Validation Data')
data_valid['Predicted'] = gscv.predict(X_valid)
st.write(data_valid)
st.text('Confusion Matrix')
cm = confusion_matrix(y_valid, gscv.predict(X_valid))
fig1, ax1 = plt.subplots()
class_names = y_valid.unique()
confusion_matrix_plot(cm, class_names)
st.pyplot(fig1)
st.text('Performance Metrics')
st.write(combined_metrics(X_valid, y_valid, gscv))
st.text('Partial Dependence Plot')
features = [0, 1, (0, 1)]
fig, ax = plt.subplots(1,3, figsize = (15,9))
plot_partial_dependence(gscv, X_valid, features=features, target=0, ax=ax)
plt.tight_layout()
st.pyplot(fig)
st.text('ICE Plot')
features = [0, 1]
fig, ax = plt.subplots(figsize=(7, 6))
plot_partial_dependence(gscv, X_valid, features, kind='both', target=0, ax=ax)
plt.tight_layout()
st.pyplot(fig)
st.text('Prediction on Test file')
df_test['Predicted'] = gscv.predict(df_test)
st.write(df_test)
st.text('Shapley Explainer')
# X_test = df_test.drop('Predicted', axis = 1)
explainer = shap.KernelExplainer(gscv.predict_proba, X_valid)
shap_values = explainer.shap_values(X_valid.iloc[2,:])
st.pyplot(shap.force_plot(explainer.expected_value[0], shap_values[0], X_valid.iloc[2,:], matplotlib=True, text_rotation=8))
st.text('Shapley Explainer WaterFall Plot')
f = lambda x: gscv.predict_proba(x)[:,1]
med = X_train.median().values.reshape((1,X_train.shape[1]))
explainer = shap.Explainer(f, med)
shap_values = explainer(X_train.iloc[0:100,:])
st.pyplot(shap.plots.waterfall(shap_values[2], max_display=7))
st.text('Partial Dependence Plot from pdp_box')
pdp_ = pdp.pdp_isolate(model=gscv, dataset=X_valid,
model_features=X_valid.columns,
feature=X_train.columns[0])
fig, axes = pdp.pdp_plot(pdp_isolate_out=pdp_, feature_name=X_valid.columns[0], center = True, ncols=1, figsize = (15, 10))
st.pyplot(fig)
|
'''This example is from http://www.cs.ubc.ca/~murphyk/Bayes/Charniak_91.pdf'''
from bayesian.bbn import build_bbn
from bayesian.utils import make_key
'''
This problem is also sometimes referred to
as "the Dog Problem"
'''
def family_out(fo):
if fo:
return 0.15
return 0.85
def bowel_problem(bp):
if bp:
return 0.01
return 0.99
def light_on(fo, lo):
tt = dict(
tt=0.6,
tf=0.4,
ft=0.05,
ff=0.96)
return tt[make_key(fo, lo)]
def dog_out(fo, bp, do):
tt = dict(
ttt=0.99,
tft=0.9,
ftt=0.97,
fft=0.3) # Note typo in article!
key = make_key(fo, bp, do)
if key in tt:
return tt[key]
key = make_key(fo, bp, not do)
return 1 - tt[key]
def hear_bark(do, hb):
tt = dict(
tt=0.7,
ft=0.01)
key = make_key(do, hb)
if key in tt:
return tt[key]
key = make_key(do, not hb)
return 1 - tt[key]
if __name__ == '__main__':
g = build_bbn(
family_out,
bowel_problem,
light_on,
dog_out,
hear_bark)
g.q()
|
# Sprite classes for platform game
import pygame as pg
from settings import *
import time
vec = pg.math.Vector2
class Player(pg.sprite.Sprite):
def __init__(self, game):
pg.sprite.Sprite.__init__(self)
self.game = game
touche_platform = 1
self.image = pg.image.load("perso.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (300, HEIGHT / 2)
self.pos = vec(300, HEIGHT / 2)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.stopped_by_wall = False
self.ground = True
self.can_move_up = True
self.left = False
def jump(self):
# jump only if standing on a platform
# self.rect.x += 1
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
# self.rect.x -= 1
# print(self.vel.y, self.vel.x)
if hits:
self.vel.y = -20
def update(self):
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.image = pg.image.load("perso_gauche.png").convert_alpha()
self.left = True
if keys[pg.K_RIGHT] and MovesWithKeys.can_move_right:
self.image = pg.image.load("perso_droite.png").convert_alpha()
self.left = False
hits = pg.sprite.spritecollide(self, self.game.platforms, False)
if self.pos.y == GROUND_y:
self.ground = True
if not self.can_move_up: # player bumps down if hits platform while jumping
self.vel.y = 0
## if self.ground and keys[pg.K_RIGHT] and not keys[pg.K_UP]:
## self.pos.x = hits[0].rect.left
# if hits and pg.sprite.collide_rect(self.pos.x,hits[0].rect.left) :
# if self.rect.right >= hits[0].rect.left:
# print('yayy')
## if self.vel.y == 0 and self.ground and keys[pg.K_LEFT]:
## self.pos.y = GROUND_y
## print('yo')
## if keys[pg.K_LEFT]:
## self.vel(0, 0)
# print('miauuu')
if self.vel.y > 0 and hits: # and
self.pos.y = hits[0].rect.top # that says it should stay on top
self.vel.y = 0
self.ground = True
self.acc = vec(0, PLAYER_GRAV)
## keys = pg.key.get_pressed()
## if keys[pg.K_LEFT]:
## self.acc.x = -PLAYER_ACC
## if keys[pg.K_RIGHT]:
## self.acc.x = PLAYER_ACC
##
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# wrap around the sides of the screen
if self.pos.x > WIDTH:
self.pos.x = 0
if self.pos.x < 0:
self.pos.x = WIDTH
# if self.pos.y > 530:
# self.pos.y = 530
self.rect.midbottom = self.pos
class MovesWithKeys(pg.sprite.Sprite):
can_move_left = True
can_move_right = True
def process_player_movement(self):
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] and MovesWithKeys.can_move_left:
self.acc.x = PLAYER_ACC
if keys[pg.K_RIGHT] and MovesWithKeys.can_move_right:
self.acc.x = -PLAYER_ACC
class Enemy(MovesWithKeys):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.enemy1_group
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = pg.image.load("enemy.png").convert_alpha()
self.rect = self.image.get_rect()
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.pos = vec(x, y)
self.rect.x = x
self.rect.y = y
self.time1 = time.time()
# print(self.time1)
self.counter = 1
def update(self):
self.acc = vec(0, 0)
self.process_player_movement()
if self.counter:
self.acc.x -= ENEMY_ACC
if time.time() > self.time1 + 3 and self.counter:
self.time1 = time.time()
self.counter = 0
if self.counter == 0:
self.acc.x += ENEMY_ACC
if time.time() > self.time1 + 3 and self.counter == 0:
self.time1 = time.time()
self.counter = 1
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
class Enemy2(MovesWithKeys):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.enemy1_group
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = pg.image.load("enemy2.png").convert_alpha()
self.rect = self.image.get_rect()
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.pos = vec(x, y)
self.counter = 1
self.rect.x = x
self.rect.y = y
def update(self):
self.acc = vec(0, 0)
self.process_player_movement()
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
class Background(MovesWithKeys):
def __init__(self, game):
pg.sprite.Sprite.__init__(self)
self.game = game
self.image = pg.image.load("background.jpg").convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(WIDTH, HEIGHT)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
def update(self):
self.acc = vec(0, 0)
self.process_player_movement()
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# wrap around the sides of the screen
if self.pos.x > WIDTH:
self.pos.x = 0
if self.pos.x < 0:
self.pos.x = WIDTH
self.rect.midbottom = self.pos
class Panneau(MovesWithKeys):
def __init__(self, game):
pg.sprite.Sprite.__init__(self)
self.game = game
self.image = pg.image.load("panneau.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = 9600
self.rect.y = GROUND_y
self.vel = vec(0, 0)
self.acc = vec(0, 0)
# self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(9600, GROUND_y)
def update(self):
self.acc = vec(0, 0)
self.process_player_movement()
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
class Platform(MovesWithKeys):
def __init__(self, x, y, type):
pg.sprite.Sprite.__init__(self)
if type == 1:
self.image = pg.image.load("block_1.png").convert_alpha()
elif type == 2:
self.image = pg.image.load("block_2.png").convert_alpha()
else:
self.image = pg.Surface((9800, 60))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.vel = vec(0, 0)
self.acc = vec(0, 0)
# self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(x, y)
def update(self):
self.acc = vec(0, 0)
self.process_player_movement()
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
class Coin(MovesWithKeys):
def __init__(self, x, y):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("coin.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = ((WIDTH / 2) - 100, 0)
self.pos = vec((WIDTH / 2) - 100, 0)
self.vel = vec(0, 0)
self.acc = vec(0, 0)
self.pos = vec(x, y)
# self.rect.center = (WIDTH / 2, HEIGHT / 2)
## def stop(self):
## self.vel = vec(0, 0)
## self.acc = vec(0, 0)
## print('miau')
def update(self):
## keys = pg.key.get_pressed()
keys = pg.key.get_pressed()
self.acc = vec(0, 0)
self.process_player_movement()
if keys[pg.K_DOWN]:
self.acc.y = PLAYER_GRAV
# print('miau')
## hits = pg.sprite.spritecollide(self, self.game.platforms, False)
# self.rect.x -= 1
# print(self.vel.y, self.vel.x)
## hits_cp = pg.sprite.groupcollide(self.game.platforms, self, False, False)
## if hits_cp:
## print('miau')
if self.pos.y > GROUND_y - 30: # or hits_cp: #hits or
self.vel.y = 0
self.acc.y = 0
# print('hi')
## # apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
## # equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
# print(self.pos)
class Fireball(pg.sprite.Sprite):
def __init__(self, x, y, a):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("fireball.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = ((WIDTH / 2) - 100, 0)
self.pos = vec((WIDTH / 2) - 100, 0)
self.vel = vec(a, 0)
self.acc = vec(0, 0)
self.pos = vec(x, y)
self.acc = vec(0, PLAYER_GRAV)
def update(self):
self.acc = vec(0, PLAYER_GRAV)
## # apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
## # equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
self.rect.midbottom = self.pos
# print(self.pos)
class Enemy_fire(pg.sprite.Sprite):
def __init__(self):
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load("fireball.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (0, 0)
self.pos = vec(100, 100)
self.vel = vec(80, 0)
self.acc = vec(0, 0)
self.pos = vec(100, 100)
self.acc = vec(0, PLAYER_GRAV)
# self.velocity0 = 0
def update(self):
self.acc = vec(0, PLAYER_GRAV)
## # apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
## # equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# self.velocity0 = self.vel
## keys = pg.key.get_pressed()
## if keys:
## if keys[pg.K_RIGHT]:
## self.vel = vec(0,0)
## self.acc.x = -PLAYER_ACC
##
## if keys[pg.K_LEFT]:
## self.acc.x = PLAYER_ACC
## else:
## self.vel = self.velocity0
## self.rect.midbottom = self.pos
# print(self.pos)
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'gradingStudents' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY grades as parameter.
#
def gradingStudents(grades):
final_grades = list()
teorical_grade = 0
for grade in grades:
if grade < 40:
if grade >= 38:
grade = 40
else:
teorical_grade = grade
for i in range(0, 2):
teorical_grade += 1
if teorical_grade % 5 == 0:
grade = teorical_grade
break
final_grades.append(grade)
return final_grades
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
grades_count = int(input().strip())
grades = []
for _ in range(grades_count):
grades_item = int(input().strip())
grades.append(grades_item)
result = gradingStudents(grades)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
import threading
from time import sleep
from random import randint
l_platos = 5
platos = [threading.Semaphore(1) for i in range(l_platos)]
max_animales = threading.Semaphore(l_platos)
puerta = threading.Semaphore(1)
mutex_gatos_en_cuarto = threading.Semaphore(1)
gatos_en_cuarto = 0
ratones_en_cuarto = 0
mutex_ratones_en_cuarto = threading.Semaphore(1)
torniquete = threading.Semaphore(1)
def gato_entra_a_cuarto(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
torniquete.acquire()
torniquete.release()
puerta.acquire()
mutex_gatos_en_cuarto.acquire()
gatos_en_cuarto += 1
mutex_gatos_en_cuarto.release()
gato_obtine_plato(id)
gato_come(id)
gato_deja_plato(id)
mutex_gatos_en_cuarto.acquire()
gatos_en_cuarto -= 1
mutex_gatos_en_cuarto.release()
puerta.release()
print("Soy el gato " + str(id) + " y ya me fui")
def gato_obtine_plato(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
max_animales.acquire()
platos[(id)%l_platos].acquire()
print("Soy el gato " + str(id) + " y tengo el plato: " + str((id)%l_platos))
def gato_come(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
print("Soy el gato " + str(id) + " y estoy comiendo en el plato: " + str((id)%l_platos))
def gato_deja_plato(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
platos[(id)%l_platos].release()
max_animales.release()
def raton_entra_a_cuarto(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
torniquete.acquire()
torniquete.release()
mutex_gatos_en_cuarto.acquire()
if(gatos_en_cuarto > 0):
print("Soy el raton " + str(id),end=" ")
print("Ya me comio un gato x.x")
mutex_gatos_en_cuarto.release()
else:
mutex_gatos_en_cuarto.release()
mutex_ratones_en_cuarto.acquire()
ratones_en_cuarto += 1
if(ratones_en_cuarto == 1):
puerta.acquire()
mutex_ratones_en_cuarto.release()
raton_obtine_plato(id)
raton_come(id)
raton_deja_plato(id)
mutex_ratones_en_cuarto.acquire()
ratones_en_cuarto -= 1
if(ratones_en_cuarto == 0):
puerta.release()
print("Soy el raton " + str(id) + " y ya me fui")
mutex_ratones_en_cuarto.release()
def raton_obtine_plato(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
max_animales.acquire()
platos[(id)%l_platos].acquire()
def raton_come(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
print("Soy el raton " + str(id) + " y estoy comiendo en el plato: " + str((id)%l_platos))
def raton_deja_plato(id):
global platos,max_animales,puerta,mutex_gatos_en_cuarto,gatos_en_cuarto
global ratones_en_cuarto,mutex_ratones_en_cuarto,torniquete
platos[(id)%l_platos].release()
max_animales.release()
m = 0
k = 0
for i in range(20):
numero = randint(0,1)
if(numero==0):
m += 1
rat = threading.Thread(target=raton_entra_a_cuarto, args=[m]).start()
else:
k += 1
gat = threading.Thread(target=gato_entra_a_cuarto, args=[k]).start()
|
# Generated by Django 3.1.2 on 2020-10-31 11:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20201031_1050'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='gok_id',
field=models.PositiveIntegerField(blank=True, default=0),
),
]
|
import frappe
from frappe import _
@frappe.whitelist(allow_guest=True)
def get_categories(search=None):
# TODO: Remove this method after confirmation of get_category_tree
"""
returns group level 1 and 2 Item Groups/categories
:param search: search text
"""
try:
response = frappe._dict()
cond = " 1=1"
if search:
cond += " and name like '{0}' or parent_item_group like '{0}'".format("%{}%".format(search))
query = """
select name as category, parent_item_group as parent_category,
image from `tabItem Group`
where name not in ('All Item Groups', 'Products', 'Raw Material',
'Services', 'Sub Assemblies', 'Consumable') and {}
""".format(cond)
categories_data = frappe.db.sql(query, as_dict=True)
# grouping in {parent:childs}
categories = {}
for cat in categories_data:
parent = cat.pop("parent_category")
if cat.get("parent_category") not in categories:
categories[parent] = [cat]
else:
cat_list = categories[parent]
cat_list.append(cat)
categories[parent] = cat_list
category_tree = [ {"category_name": k, "sub_category": v} for k,v in categories.items() ]
response.update({"category": category_tree, "status_code": 200})
except Exception as e:
http_status_code = getattr(e, "http_status_code", 500)
response["status_code"] = http_status_code
frappe.local.response["http_status_code"] = http_status_code
response["message"] = "Unable to fetch categories: {}".format(str(e))
finally:
return response
@frappe.whitelist(allow_guest=True)
def get_child_categories(category):
# return sub categorie from hierarchy
def _get_child(category, data, child):
for d in data:
if d.get("parent") == category:
child.append(d.get("name"))
if d.get("expandable"):
_get_child(d.get("name"), data, child)
return child
filters_ = {"group_level": (">", 1)}
fields = ['name','parent_item_group as parent','is_group as expandable']
data = frappe.get_list("Item Group", fields=fields, filters=filters_, ignore_permissions=True)
categories = _get_child(category, data, [])
return categories
@frappe.whitelist(allow_guest=True)
def get_category_tree():
"""
return item group tree hierarchy of custom item groups(category)
in parent-child structure.
"""
fields = ["name as title", "group_level", "parent_item_group", "is_group as has_child","weightage"]
erp_item_group = ['All Item Groups', 'Products', 'Raw Material', 'Services', 'Sub Assemblies', 'Consumable']
filters = {
"group_level": [">", 0],
"name": ["not in", erp_item_group]
}
item_groups = frappe.get_all("Item Group", filters, fields,order_by="weightage", ignore_permissions=True)
group_tree = []
for idx, group in enumerate(item_groups):
if group.get("group_level") == 1:
childs, item_groups = get_children(group.get("title"), group.get("group_level"), item_groups)
if len(childs):
child_level = "level_" + str(group.get("group_level")+1)
group[child_level] = childs
else:
group["has_child"] = 0
group.pop("parent_item_group")
group_tree.append(group)
# sequential arrangement
sequence_req = ["מטפלים", "הורים", "מוסדות חינוכיים", "תינוקות 0-3",\
"משחקים", "מתקני חצר", "ריהוט", "SALE"]
# sequence_req = ["Therapist", "Parents", "School", "Baby (0-12months)",\
# "Toys", "Outdoor Toys", "Furniture", "Offers & Sale"]
result = [
g for seq in sequence_req for g in group_tree if g.get("title") == seq
]
return result
def get_children(category, group_level, data):
children = []
for idx, group in enumerate(data):
if group.get("parent_item_group") == category:
if group.get("has_child"):
childs, data = get_children(group.get("title"), group.get("group_level"), data)
if len(childs):
child_level = "level_" + str(group.get("group_level")+1)
group[child_level] = childs
else:
group["has_child"] = 0
group.pop("parent_item_group")
children.append(group)
#data.remove(group)
return children, data
@frappe.whitelist(allow_guest=True)
def age_list():
def sorting(val):
if "M" in val:
return int(val.split("M")[0])
elif "+" in val:
return len(val.split("+")[0])
response = frappe._dict()
age_records = frappe.get_all("Age", ignore_permissions=True)
age_list = [a.get("name") for a in age_records]
age_list.sort(key=sorting)
response["age_list"] = age_list
return response
@frappe.whitelist(allow_guest=True)
def manufacturer_list():
response = frappe._dict()
manufacturers = frappe.get_all("Brand", ignore_permissions=True)
response["manufacturer_list"] = [m.get("name") for m in manufacturers]
return response
|
from selenium import webdriver
import names,random,time,sys
kaçhesapyapalım = int(input("Kaç Hesap Oluşturalım : "))
print("Sistem Başlatılıyor")
print("Mail Hizmeti Başlatılıyor")
tempmailoptions = webdriver.ChromeOptions()
tempmailoptions.add_argument("--incognito")
tempmailoptions.add_argument("--headless")
tempchrome = webdriver.Chrome(options=tempmailoptions)
tempchrome.get("https://smailpro.com/tool/tempmail")
time.sleep(1)
tempchrome.find_element_by_xpath("/html/body/div/main/div/div[2]/div[1]/div[1]/fieldset/div[9]/input").click()
print("Mail Hizmeti Hazır")
for x in range(1,kaçhesapyapalım + 1):
print("------------", str(x) + ". Hesap Oluşturuluyor ---------------------")
ourname = names.get_full_name()
userid = names.get_first_name() + str(random.randint(100000, 99999999))
şifre = str(random.randint(1000000, 9999999))
mail = userid + "@stempmail.com"
print("Mail Ayarlanıyor")
tempchrome.find_element_by_xpath("/html/body/div/main/div/div[2]/div[1]/div[2]/fieldset/div/input").send_keys(
userid)
tempchrome.find_element_by_xpath("/html/body/div/main/div/div[3]/div[2]/div/button").click()
time.sleep(1)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--proxy-server=%s' % PROXY)
chrome_options.add_argument("--incognito")
chrome_options.add_argument("--headless")
chrome = webdriver.Chrome(options=chrome_options)
print("Üyelik Formu Ayarlanıyor")
chrome.get("https://www.instagram.com/accounts/emailsignup/")
time.sleep(3)
print("Bilgiler Giriliyor")
try:
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[3]/div/label/input").send_keys(
mail) # MAİL ADRES
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[4]/div/label/input").send_keys(ourname)
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[5]/div/label/input").send_keys(userid)
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[6]/div/label/input").send_keys(şifre)
time.sleep(1)
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[7]/div/button").click()
time.sleep(2)
except:
print("Hata Oldu Bir Sonraki Hesaba Geçiliyor")
chrome.close()
chrome.quit()
continue
print("Yaş Doğrulaması Geçiliyor")
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/div[4]/div/div/span/span[3]/select/option[45]").click()
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/div[4]/div/div/span/span[2]/select/option[15]").click()
time.sleep(2)
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/div[5]/div[2]/button").click()
time.sleep(2)
print("Mail Onayı Bypass Ediliyor")
print("Mail Bekleniyor")
time.sleep(2)
elem = ""
def mailbekle():
global elem
print("Mail Bekleniyor")
try:
chrome.find_element_by_xpath("/html/body/div[1]/section/main/div/article/div/div[1]/div/div[2]/div/button").click()
time.sleep(40)
tempchrome.find_element_by_xpath("/html/body/div/main/div/div[4]/div/div/div[1]/div[2]/div[2]/button").click()
time.sleep(2)
elem = tempchrome.find_elements_by_xpath('.//span[@class = "font-weight-light"]')[0]
except:
mailbekle()
mailbekle()
print("Mail Geldi")
if str(elem.text).endswith("is your Instagram code"):
print("Mail Onay Kodu : " + str(elem.text)[0:6])
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/div[3]/form/div[1]/input").send_keys(
str(elem.text)[0:6])
time.sleep(2)
chrome.find_element_by_xpath(
"/html/body/div[1]/section/main/div/article/div/div[1]/div/div[3]/form/div[2]/button").click()
time.sleep(2)
print("Hesap Oluşturuldu")
print("Kalıntılar Temizleniyor")
print(userid + ":" + şifre + "\n")
print("hesaplar.txt ye Kaydediliyor")
tempchrome.refresh()
time.sleep(2)
open("hesaplar.txt", "a").write(userid + ":" + şifre + "\n")
chrome.close()
chrome.quit()
print("Sonraki İşleme Geçiliyor")
print("İşlemler Bitti")
print("Tüm Kalınıtılar Temizleniyor")
tempchrome.close()
tempchrome.quit()
print("Toplam Açılan Hesap Sayısı :" , str(kaçhesapyapalım))
print("Script By Furkan")
print("instagram : f.urkan7")
sys.exit()
|
import boto3
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, COMMASPACE
def create_message(send_from, send_to, subject, plain_text_body, html_body):
message = MIMEMultipart('alternative')
message['From'] = send_from
message['To'] = COMMASPACE.join(send_to)
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message.attach(MIMEText(plain_text_body, 'plain'))
message.attach(MIMEText(html_body, 'html'))
return message
def add_attachment_from_s3(message, bucket, key):
client = boto3.client("s3")
attachment = client.get_object(Bucket=bucket, Key=key)
part = MIMEApplication(attachment["Body"].read(), Name=basename(key))
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(key)
message.attach(part)
def add_attachment_from_local_disk(message, path):
with open(path, "rb") as file:
part = MIMEApplication(file.read(),Name=basename(path))
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(path)
message.attach(part)
def send_message(message):
client = boto3.client("ses")
response = client.send_raw_email(RawMessage = {'Data': message.as_string()})
message = create_message(
"sender@domain.com",
["recipient1@domain.com", "recipient2@domain.com"],
"Testing",
"Testing 123\nTesting 123\nTesting 123",
"<html><head></head><body><h1>Testing 123</h1><p>testing 123</p></body></html>")
add_attachment_from_local_disk(message, 'C:\\path\\to\\local\\file.pdf')
add_attachment_from_s3(message, 'bucket_name', 'prefix/file.pdf')
send_message(message)
|
import heapq
from typing import List
class Solution1:
def furthest_building(self, heights: List[int], bricks: int, ladders: int) -> int:
h = []
for i in range(1, len(heights)):
if heights[i] - heights[i-1] > 0:
heapq.heappush(h, heights[i] - heights[i-1])
if len(h) > ladders:
if h[0] <= bricks:
bricks -= heapq.heappop(h)
else:
return i - 1
return len(heights) - 1
|
#!/usr/bin/python3
# -*-: coding: utf-8 -*-
"""
:author: albert
:date: 03/08/2019
"""
import time
import logging as log
from urllib import parse as url_encoder
from util import httpclient
from db import mysql_connector as mydb
""" 天眼查搜索API """
SEARCH_API = 'https://api9.tianyancha.com/services/v3/search/sNorV3'
""" 企业详情API """
DETAIL_API = 'https://api9.tianyancha.com/services/v3/t/common/baseinfoV5'
""" ua """
UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
""" 请求验证头 """
AUTHORIZATION = '0###oo34J0WVDdeu_k1O-sWPxFpg9WJ4###1555940540033###028a568b0150721d810d5f4417e03650'
""" 请求token """
X_AUTH_TOKEN = "eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODg3NTg5MjA3NSIsImlhdCI6MTU1NTk0MDU3MiwiZXhwIjoxNTU4NTMyNTcyfQ.lCJNDWQK0gD3fp9ieIlnMEzwmi00zkBqyHShdvHnspFzZQmgPHhHJAUY7mVbKY_AFk2Xhk82jMP99Q6a0wlmEQ"
""" 天眼查头信息 """
REQUEST_HEADERS = {
"User-Agent": UA,
"version": "TYC-XCX-WX",
"Host": "api3.tianyancha.com",
"Authorization": AUTHORIZATION,
'x-auth-token': X_AUTH_TOKEN
}
def load_keys(keys: list):
globals().setdefault('keywords', keys)
def start():
""" 入口函数 """
keys = globals().get('keywords')
if not keys:
log.info('no keywords available')
return
for key in keys:
log.info('开始搜索关键字[%s]' % key)
companies = TycSearchApi.search(key)
log.info('开始解析')
TycDataBuilder.build_info4save(companies)
log.info('数据已保存')
log.info('结束')
class TycSearchApi:
@staticmethod
def search(key: str):
"""
根据关键字搜索相关企业信息
:param key: 关键字
:return:
"""
payload = {
"pageNum": 1,
"pageSize": 20,
"sortType": 0
}
url = SEARCH_API + "/" + url_encoder.quote(key)
http_result = httpclient.get(url=url, params=payload, headers=REQUEST_HEADERS)
time.sleep(2)
ok, message, code = http_result.ok, http_result.reason, http_result.status_code
if not ok or code != 200:
log.error('%s-%s-%s' %
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), code, message))
return None
try:
api_result = http_result.json() # api响应数据
except RuntimeError as error:
log.error('unboxing error, error: %s' % error)
return None
api_message, api_state = api_result.get('message'), api_result.get('state')
if api_state != 'ok':
log.error('[tyc]api error, %s-%s' % (api_state, api_message))
return None
companies = api_result.get('data').get('companyList') # 搜索公司结果json array
return companies
@staticmethod
def search_detail(company_id: int):
url = DETAIL_API + "/" + str(company_id)
http_result = httpclient.get(url=url, params=None, headers=REQUEST_HEADERS)
time.sleep(2)
ok, message, code = http_result.ok, http_result.reason, http_result.status_code
if not ok or code != 200:
log.error('%s-%s-%s' %
(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), code, message))
try:
api_result = http_result.json() # api响应数据
except RuntimeError as error:
log.error('unboxing error, error: %s' % error)
return None
api_message, api_state = api_result.get('message'), api_result.get('state')
if api_state != 'ok':
log.error('[tyc]api error, %s-%s' % (api_state, api_message))
return None
company_detail = api_result.get('data')
return company_detail
class TycDataBuilder:
@classmethod
def build_info4save(cls, companies: list):
"""
解析api数据,解析一条保存一条,尽可能降低性能缓解被爬服务器压力
@ps 优化点
:param companies:
:return: 目标对象列表
"""
if not companies:
log.info('no companies available')
return
target = dict()
for src in companies:
target = cls.copy_properties(src, target)
log.info(target)
# mydb.insert(enterprise)
time.sleep(0.5)
target.clear()
@classmethod
def copy_properties(cls, source: dict, target: dict):
""" 构建存储对象 """
target['name'] = cls.get_company_name(source)
target['representative'] = cls.get_representative(source)
target['address'] = cls.get_address(source)
target['region'] = cls.get_region(source)
target['city'] = cls.get_city(source)
target['district'] = cls.get_district(source)
target['biz_status'] = cls.get_biz_status(source)
target['credit_code'] = cls.get_credit_code(source)
target['email'] = cls.get_email(source)
target['phone'] = cls.get_work_phone(source)
target['biz_scope'] = cls.get_biz_scope(source)
target['company_type'] = cls.get_company_type(source)
target['taxpayer_code'] = cls.get_taxpayer_code(source)
target['registered_capital'] = cls.get_registered_capital(source)
target['lat_long'] = cls.get_lat_long(source)
target['setup_time'] = cls.get_setup_time(source)
company_id = source.get('id')
detail = TycSearchApi.search_detail(company_id)
target['homepage'] = cls.get_homepage(detail)
target['register_code'] = cls.get_register_code(detail)
target['organization_code'] = cls.get_organization_code(detail)
target['english_name'] = cls.get_company_english(detail)
target['authorization'] = cls.get_register_organization(detail)
target['actual_capital'] = cls.get_real_capital(detail)
target['industry'] = cls.get_industry(detail)
target['used_name'] = cls.get_company_used_name(detail)
return target
@classmethod
def get_company_name(cls, company: dict) -> str:
name = company.get('name')
if not name:
return '-'
name = name.replace('<em>', '').replace('</em>', '')
return name.strip() if name else '-'
@classmethod
def get_representative(cls, company: dict) -> str:
representative = company.get('legalPersonName')
if not representative:
return '-'
representative = representative.replace('<em>', '').replace('</em>', '')
return representative.strip() if representative else '-'
@classmethod
def get_region(cls, company: dict) -> str:
region = company.get('base')
return region.strip() if region else '-'
@classmethod
def get_city(cls, company: dict) -> str:
city = company.get('city')
return city.strip() if city else '-'
@classmethod
def get_district(cls, company: dict) -> str:
district = company.get('district')
return district.strip() if district else '-'
@classmethod
def get_email(cls, company: dict) -> str:
emails = company.get('emails')
if not emails:
return '-'
email = emails.split(';')[0]
email = email.replace('\t', '')
return email.strip() if email else '-'
@classmethod
def get_work_phone(cls, company: dict) -> str:
phone = company.get('phoneNum')
return phone.strip() if phone else '-'
@classmethod
def get_address(cls, company: dict) -> str:
address = company.get('regLocation')
return address.strip() if address else '-'
@classmethod
def get_biz_status(cls, company: dict) -> str:
status = company.get('regStatus')
return status.strip() if status else '-'
@classmethod
def get_credit_code(cls, company: dict) -> str:
credit_code = company.get('creditCode')
return credit_code.strip() if credit_code else '-'
@classmethod
def get_register_code(cls, company: dict) -> str:
if not company:
return '-'
reg_code = company.get('regNumber')
return reg_code.strip() if reg_code else '-'
@classmethod
def get_biz_scope(cls, company: dict) -> str:
biz_scope = company.get('businessScope')
return biz_scope.strip() if biz_scope else '-'
@classmethod
def get_company_type(cls, company: dict) -> str:
company_type = company.get('companyOrgType')
return company_type.replace('\t', '').strip() if company_type else '-'
@classmethod
def get_taxpayer_code(cls, company: dict) -> str:
credit_code = company.get('creditCode')
return credit_code.strip() if credit_code else '-'
@classmethod
def get_organization_code(cls, company: dict) -> str:
if not company:
return '-'
org_code = company.get('orgNumber')
return org_code.strip() if org_code else '-'
@classmethod
def get_company_english(cls, company: dict) -> str:
if not company:
return '-'
english_name = company.get('property3')
return english_name.strip() if english_name else '-'
@classmethod
def get_register_organization(cls, company: dict) -> str:
if not company:
return '-'
reg_organ = company.get('regInstitute')
return reg_organ.strip() if reg_organ else '-'
@classmethod
def get_registered_capital(cls, company: dict) -> str:
reg_capital = company.get('regCapital')
return reg_capital.strip() if reg_capital else '-'
@classmethod
def get_homepage(cls, company: dict) -> str:
if not company:
return '-'
homepage = company.get('websiteList')
return homepage.strip() if homepage else '-'
@classmethod
def get_real_capital(cls, company: dict) -> str:
if not company:
return '-'
actual_capital = company.get('actualCapital')
return actual_capital.strip() if actual_capital else '-'
@classmethod
def get_industry(cls, company: dict) -> str:
if not company:
return '-'
industry = company.get('industry')
return industry.strip() if industry else '-'
@classmethod
def get_company_used_name(cls, company: dict) -> str:
if not company:
return '-'
used_name = company.get('historyNames')
return used_name if used_name else '-'
@classmethod
def get_setup_time(cls, company: dict) -> str:
setup_time = company.get('estiblishTime')
if not setup_time:
return '-'
setup_time = setup_time[0:10]
return setup_time if setup_time else '-'
@classmethod
def get_lat_long(cls, company: dict) -> str:
lat = company.get('latitude')
long = company.get('longitude')
temp = {
'lat': lat if lat else '-',
'long': long if long else '-'
}
return str(temp)
|
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics import mean_absolute_error as mae
from model import *
def predict_time_to_erupt(seg_df):
seg_df = seg_df.fillna(0)
each_row = []
for each_column in seg_df.columns:
each_row.append(seg_df[each_column].std())
each_row.append(seg_df[each_column].min())
each_row.append(seg_df[each_column].max())
each_row.append(seg_df[each_column].quantile(.3))
each_row.append(seg_df[each_column].quantile(.6))
each_row.append(seg_df[each_column].quantile(.8))
each_row.append(seg_df[each_column].quantile(.9))
each_row.append(seg_df[each_column].kurt())
features = np.array(each_row).reshape(1,-1)
features = np.nan_to_num(features)
with open('custEnsemblexgb.pkl', 'rb') as f:
best_estimator = pickle.load(f)
preds = best_estimator.predict(features)
return preds[0]
def return_mae(seg_df, y):
seg_df = seg_df.fillna(0)
each_row = []
for each_column in seg_df.columns:
each_row.append(seg_df[each_column].std())
each_row.append(seg_df[each_column].min())
each_row.append(seg_df[each_column].max())
each_row.append(seg_df[each_column].quantile(.3))
each_row.append(seg_df[each_column].quantile(.6))
each_row.append(seg_df[each_column].quantile(.8))
each_row.append(seg_df[each_column].quantile(.9))
each_row.append(seg_df[each_column].kurt())
features = np.array(each_row).reshape(1,-1)
features = np.nan_to_num(features)
with open('custEnsemblexgb.pkl', 'rb') as f:
best_estimator = pickle.load(f)
preds = best_estimator.predict(features)
return mae(preds[0], y)
|
class NumArray:
def __init__(self, nums: [int]):
self.nums = nums
def update(self, i: int, val: int) -> None:
self.nums[i] = val
def sumRange(self, i: int, j: int) -> int:
return sum(self.nums[i: j + 1])
# Your NumArray object will be instantiated and called as such:
obj = NumArray([1, 3, 5])
print(obj.sumRange(0, 2))
obj.update(1, 2)
print(obj.sumRange(0, 2))
# obj.update(i,val)
# param_2 = obj.sumRange(i,j)
|
# Copyright 2019 Robert Bosch GmbH
# Copyright 2020 Christophe Bedard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for trace events processor and ROS 2 model creation."""
from typing import Dict
from typing import Set
from typing import Tuple
from tracetools_read import get_field
from . import EventHandler
from . import EventMetadata
from . import HandlerMap
from ..data_model.ros2 import Ros2DataModel
class Ros2Handler(EventHandler):
"""
ROS 2-aware event handling class implementation.
Handles a trace's events and builds a model with the data.
"""
def __init__(
self,
**kwargs,
) -> None:
"""Create a Ros2Handler."""
# Link a ROS trace event to its corresponding handling method
handler_map: HandlerMap = {
'ros2:rcl_init':
self._handle_rcl_init,
'ros2:rcl_node_init':
self._handle_rcl_node_init,
'ros2:rmw_publisher_init':
self._handle_rmw_publisher_init,
'ros2:rcl_publisher_init':
self._handle_rcl_publisher_init,
'ros2:rclcpp_publish':
self._handle_rclcpp_publish,
'ros2:rcl_publish':
self._handle_rcl_publish,
'ros2:rmw_publish':
self._handle_rmw_publish,
'ros2:rmw_subscription_init':
self._handle_rmw_subscription_init,
'ros2:rcl_subscription_init':
self._handle_rcl_subscription_init,
'ros2:rclcpp_subscription_init':
self._handle_rclcpp_subscription_init,
'ros2:rclcpp_subscription_callback_added':
self._handle_rclcpp_subscription_callback_added,
'ros2:rmw_take':
self._handle_rmw_take,
'ros2:rcl_take':
self._handle_rcl_take,
'ros2:rclcpp_take':
self._handle_rclcpp_take,
'ros2:rcl_service_init':
self._handle_rcl_service_init,
'ros2:rclcpp_service_callback_added':
self._handle_rclcpp_service_callback_added,
'ros2:rcl_client_init':
self._handle_rcl_client_init,
'ros2:rcl_timer_init':
self._handle_rcl_timer_init,
'ros2:rclcpp_timer_callback_added':
self._handle_rclcpp_timer_callback_added,
'ros2:rclcpp_timer_link_node':
self._handle_rclcpp_timer_link_node,
'ros2:rclcpp_callback_register':
self._handle_rclcpp_callback_register,
'ros2:callback_start':
self._handle_callback_start,
'ros2:callback_end':
self._handle_callback_end,
'ros2:rcl_lifecycle_state_machine_init':
self._handle_rcl_lifecycle_state_machine_init,
'ros2:rcl_lifecycle_transition':
self._handle_rcl_lifecycle_transition,
}
super().__init__(
handler_map=handler_map,
data_model=Ros2DataModel(),
**kwargs,
)
# Temporary buffers
self._callback_instances: Dict[int, Tuple[Dict, EventMetadata]] = {}
@staticmethod
def required_events() -> Set[str]:
return {
'ros2:rcl_init',
}
@property
def data(self) -> Ros2DataModel:
return super().data # type: ignore
def _handle_rcl_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
context_handle = get_field(event, 'context_handle')
timestamp = metadata.timestamp
pid = metadata.pid
version = get_field(event, 'version')
self.data.add_context(context_handle, timestamp, pid, version)
def _handle_rcl_node_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'node_handle')
timestamp = metadata.timestamp
tid = metadata.tid
rmw_handle = get_field(event, 'rmw_handle')
name = get_field(event, 'node_name')
namespace = get_field(event, 'namespace')
self.data.add_node(handle, timestamp, tid, rmw_handle, name, namespace)
def _handle_rmw_publisher_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'rmw_publisher_handle')
timestamp = metadata.timestamp
gid = get_field(event, 'gid')
self.data.add_rmw_publisher(handle, timestamp, gid)
def _handle_rcl_publisher_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'publisher_handle')
timestamp = metadata.timestamp
node_handle = get_field(event, 'node_handle')
rmw_handle = get_field(event, 'rmw_publisher_handle')
topic_name = get_field(event, 'topic_name')
depth = get_field(event, 'queue_depth')
self.data.add_rcl_publisher(handle, timestamp, node_handle, rmw_handle, topic_name, depth)
def _handle_rclcpp_publish(
self, event: Dict, metadata: EventMetadata,
) -> None:
timestamp = metadata.timestamp
message = get_field(event, 'message')
self.data.add_rclcpp_publish_instance(timestamp, message)
def _handle_rcl_publish(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'publisher_handle')
timestamp = metadata.timestamp
message = get_field(event, 'message')
self.data.add_rcl_publish_instance(handle, timestamp, message)
def _handle_rmw_publish(
self, event: Dict, metadata: EventMetadata,
) -> None:
timestamp = metadata.timestamp
message = get_field(event, 'message')
self.data.add_rmw_publish_instance(timestamp, message)
def _handle_rmw_subscription_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'rmw_subscription_handle')
timestamp = metadata.timestamp
gid = get_field(event, 'gid')
self.data.add_rmw_subscription(handle, timestamp, gid)
def _handle_rcl_subscription_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'subscription_handle')
timestamp = metadata.timestamp
node_handle = get_field(event, 'node_handle')
rmw_handle = get_field(event, 'rmw_subscription_handle')
topic_name = get_field(event, 'topic_name')
depth = get_field(event, 'queue_depth')
self.data.add_rcl_subscription(
handle, timestamp, node_handle, rmw_handle, topic_name, depth,
)
def _handle_rclcpp_subscription_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
subscription_pointer = get_field(event, 'subscription')
timestamp = metadata.timestamp
handle = get_field(event, 'subscription_handle')
self.data.add_rclcpp_subscription(subscription_pointer, timestamp, handle)
def _handle_rclcpp_subscription_callback_added(
self, event: Dict, metadata: EventMetadata,
) -> None:
subscription_pointer = get_field(event, 'subscription')
timestamp = metadata.timestamp
callback_object = get_field(event, 'callback')
self.data.add_callback_object(subscription_pointer, timestamp, callback_object)
def _handle_rmw_take(
self, event: Dict, metadata: EventMetadata,
) -> None:
subscription_handle = get_field(event, 'rmw_subscription_handle')
timestamp = metadata.timestamp
message = get_field(event, 'message')
source_timestamp = get_field(event, 'source_timestamp')
taken = bool(get_field(event, 'taken'))
self.data.add_rmw_take_instance(
subscription_handle, timestamp, message, source_timestamp, taken
)
def _handle_rcl_take(
self, event: Dict, metadata: EventMetadata,
) -> None:
timestamp = metadata.timestamp
message = get_field(event, 'message')
self.data.add_rcl_take_instance(timestamp, message)
def _handle_rclcpp_take(
self, event: Dict, metadata: EventMetadata,
) -> None:
timestamp = metadata.timestamp
message = get_field(event, 'message')
self.data.add_rclcpp_take_instance(timestamp, message)
def _handle_rcl_service_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'service_handle')
timestamp = metadata.timestamp
node_handle = get_field(event, 'node_handle')
rmw_handle = get_field(event, 'rmw_service_handle')
service_name = get_field(event, 'service_name')
self.data.add_service(handle, timestamp, node_handle, rmw_handle, service_name)
def _handle_rclcpp_service_callback_added(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'service_handle')
timestamp = metadata.timestamp
callback_object = get_field(event, 'callback')
self.data.add_callback_object(handle, timestamp, callback_object)
def _handle_rcl_client_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'client_handle')
timestamp = metadata.timestamp
node_handle = get_field(event, 'node_handle')
rmw_handle = get_field(event, 'rmw_client_handle')
service_name = get_field(event, 'service_name')
self.data.add_client(handle, timestamp, node_handle, rmw_handle, service_name)
def _handle_rcl_timer_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'timer_handle')
timestamp = metadata.timestamp
period = get_field(event, 'period')
tid = metadata.tid
self.data.add_timer(handle, timestamp, period, tid)
def _handle_rclcpp_timer_callback_added(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'timer_handle')
timestamp = metadata.timestamp
callback_object = get_field(event, 'callback')
self.data.add_callback_object(handle, timestamp, callback_object)
def _handle_rclcpp_timer_link_node(
self, event: Dict, metadata: EventMetadata,
) -> None:
handle = get_field(event, 'timer_handle')
timestamp = metadata.timestamp
node_handle = get_field(event, 'node_handle')
self.data.add_timer_node_link(handle, timestamp, node_handle)
def _handle_rclcpp_callback_register(
self, event: Dict, metadata: EventMetadata,
) -> None:
callback_object = get_field(event, 'callback')
timestamp = metadata.timestamp
symbol = get_field(event, 'symbol')
self.data.add_callback_symbol(callback_object, timestamp, symbol)
def _handle_callback_start(
self, event: Dict, metadata: EventMetadata,
) -> None:
# Add to dict
callback_addr = get_field(event, 'callback')
self._callback_instances[callback_addr] = (event, metadata)
def _handle_callback_end(
self, event: Dict, metadata: EventMetadata,
) -> None:
# Fetch from dict
callback_object = get_field(event, 'callback')
callback_instance_data = self._callback_instances.get(callback_object)
if callback_instance_data is not None:
(event_start, metadata_start) = callback_instance_data
del self._callback_instances[callback_object]
duration = metadata.timestamp - metadata_start.timestamp
is_intra_process = get_field(event_start, 'is_intra_process', raise_if_not_found=False)
self.data.add_callback_instance(
callback_object,
metadata_start.timestamp,
duration,
bool(is_intra_process))
else:
print(f'No matching callback start for callback object "{callback_object}"')
def _handle_rcl_lifecycle_state_machine_init(
self, event: Dict, metadata: EventMetadata,
) -> None:
node_handle = get_field(event, 'node_handle')
state_machine = get_field(event, 'state_machine')
self.data.add_lifecycle_state_machine(state_machine, node_handle)
def _handle_rcl_lifecycle_transition(
self, event: Dict, metadata: EventMetadata,
) -> None:
timestamp = metadata.timestamp
state_machine = get_field(event, 'state_machine')
start_label = get_field(event, 'start_label')
goal_label = get_field(event, 'goal_label')
self.data.add_lifecycle_state_transition(state_machine, start_label, goal_label, timestamp)
|
import os
import argparse
import json
from data_support.tfrecord_wrapper import TFRecordReader
from network import Network
import constants
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#
parser.add_argument("parent_dir", type=str, default="../experiments", help="Parent experiment directory")
parser.add_argument("data_dir", type=str, default='../resources/tf_data',
help="Directory where tfrecord files are stored")
parser.add_argument("--json-data", type=str, default=None, help="JSON with conllu and languages for training")
parser.add_argument("--languages", nargs='*', default=['en'], type=str,
help="Languages to probe.")
parser.add_argument("--tasks", nargs='*', type=str,
help="Probing tasks (distance, lex-distance, depth or lex-depth)")
# Probe arguments
parser.add_argument("--probe-rank", default=None, type=int, help="Rank of the probe")
parser.add_argument("--no-ortho-probe", action="store_true", help="Resign from ortho probe (store false)")
parser.add_argument("--only-sv", action="store_true",
help="Probe with only Scaling Vector, this option will automatically diable Orthogonal Transformation")
parser.add_argument("--with-sv", action="store_true",
help="Probe with Scaling Vector, even without orthogonal constraint")
parser.add_argument("--layer-index", default=6, type=int, help="Index of BERT's layer to probe."
"If -1 all layers embeddings are averaged")
parser.add_argument("--norm", default='euclidean', type=str, help="Distance/Depth Norm calculation. Available options same as ord attribute for tf.norm ['euclidean, 1, 2, 3, tf.inf']")
# Train arguments
parser.add_argument("--seed", default=42, type=int, help="Seed for variable initialisation")
parser.add_argument("--batch-size", default=20, type=int, help="Batch size")
parser.add_argument("--epochs", default=40, type=int, help="Maximal number of training epochs")
parser.add_argument("--learning-rate", default=0.001, type=float, help="Initial learning rate")
parser.add_argument("--ortho", default=None, type=float,
help="Orthogonality reguralization (SRIP) for language map matrices.")
parser.add_argument("--l1", default=None, type=float, help="L1 reguralization of the weights.")
parser.add_argument("--clip-norm", default=None, type=float, help="Clip gradient norm to this value")
parser.add_argument("--subsample-train", default=None, type=int,
help="Size of subsample taken from a training set.")
parser.add_argument("--zs-dep-languages", nargs='*', default=[], type=str,
help="List of languages to disregard in dependency probing training (to evaluate 0 shot capability).")
parser.add_argument("--fs-dep-languages", nargs='*', default=[], type=str, help="List of few shot languages.")
parser.add_argument("--fewshot-size", default=10, type=int,
help="Number of trainining sentences for few shot languages")
# Specify Bert Model
parser.add_argument("--model",
default=f"bert-{constants.SIZE_BASE}-{constants.LANGUAGE_MULTILINGUAL}-{constants.CASING_CASED}",
help="Transformer model name (see: https://huggingface.co/transformers/pretrained_models.html)")
args = parser.parse_args()
args.ml_probe = not args.no_ortho_probe
if args.only_sv:
args.ml_probe = False
if not args.probe_rank:
args.probe_rank = constants.MODEL_DIMS[args.model]
do_lower_case = (constants.CASING_UNCASED in args.model)
if args.seed == 42:
experiment_name = f"task_{'_'.join(args.tasks)}-layer_{args.layer_index}-trainl_{'_'.join(args.languages)}"
else:
experiment_name = f"task_{'_'.join(args.tasks)}-layer_{args.layer_index}-trainl_{'_'.join(args.languages)}-seed_{args.seed}"
args.out_dir = os.path.join(args.parent_dir, experiment_name)
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
tf_reader = TFRecordReader(args.data_dir, args.model)
tf_reader.read(args.tasks, args.languages)
network = Network(args)
network.train(tf_reader, args)
|
from macropy.core.macros import *
from macropy.core.quotes import macros, ast, u
from ast import *
@Walker
def toParam(tree, **kw):
if isinstance(tree, Store):
return Param()
@Walker
def toLoad(tree, **kw):
if isinstance(tree, Store):
return Load()
def storeToParam(node):
return toParam.recurse(node)
def storeToLoad(node):
return toLoad.recurse(node)
|
from app import App, logger, DEFAULT_CARD_NAME_LEN
from telegram import ReplyKeyboardMarkup
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
RegexHandler,
ConversationHandler,
)
from config import TG_TOKEN_IDEAS, PROJECT_NAME_COLLECTOR
from trello import Trello
import re
_CONV_STATE_CHOOSE_LIST = 101
app = App(PROJECT_NAME_COLLECTOR)
def extract_commands_from_text(message):
if message is None:
return None, None, message
extract_commands_regex = r'((in #([^\s]+))$|(as \*([^\s]+))$)|(in #([^\s]+) as *([^\s]+))$'
command = ""
for finding in re.findall(extract_commands_regex, message):
if type(finding) == str:
if (finding != '') & (finding != '_') & (finding != ' '):
command = finding
break
elif type(finding) == tuple:
for f in finding:
if (f != '') & (f != '_') & (f != ' '):
command = f
break
if command == "":
return None, None, message
message = message.split(command)[0]
get_list_name_regex = r'in #([^\s]+)'
findings = []
for finding in re.findall(get_list_name_regex, command):
if type(finding) == str:
if (finding != '') & (finding != '_') & (finding != ' '):
findings.append(finding)
elif type(finding) == tuple:
for f in finding:
if (f != '') & (f != '_') & (f != ' '):
findings.append(f)
if len(findings) == 0:
chosen_list_name = None
else:
chosen_list_name = findings[-1]
get_card_name_regex = r'as \*([^\s]+)'
findings = []
for finding in re.findall(get_card_name_regex, command):
if type(finding) == str:
if (finding != '') & (finding != ' '):
findings.append(finding)
elif type(finding) == tuple:
for f in finding:
if (f != '') & (f != '_') & (f != ' '):
findings.append(f)
if len(findings) == 0:
chosen_card_name = message[:DEFAULT_CARD_NAME_LEN]
if chosen_card_name == '':
chosen_card_name = None
else:
chosen_card_name = findings[-1].strip()
return chosen_list_name, chosen_card_name, message.strip()
def start(bot, update):
logger.info("Got /start or /help")
update.message.reply_text(
"""
Hi there!\n{}
You can use the shortcut mode in this way:\n
- anything in #list_name as *card_name
- anything in #list_name
- anything as *card_name
""".format(
"First, using /setup you should authenticate your Trello account.\n"
if update.message.from_user.id not in app._USER_SETUPS
else ""
)
)
return ConversationHandler.END
def process_shortcut_mode(bot, update):
if not app.is_user_setup(update):
update.message.reply_text("You are not authenticated yet. Use /setup please.")
return
chosen_list_name, chosen_card_name, content = extract_commands_from_text(update.message.text)
content_type = 'text'
if len(update.message.entities) > 0:
if update.message.entities[0].type == 'url':
content_type = 'url'
kwargs = {
'content': content,
'content_type': content_type,
'card_name': chosen_card_name,
'update': update,
}
if chosen_list_name is None:
inbox_list_id = app._USER_SETUPS[app.get_tg_id(update)]['inbox_list_id']
if inbox_list_id:
kwargs['list_id'] = inbox_list_id
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, inbox_list_id, chosen_card_name
))
else:
update.message.reply_text("No default list provided! "
"Re-run the setup with at least a list please.")
else:
kwargs['list_name'] = chosen_list_name
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, chosen_list_name, chosen_card_name
))
app.append_card(**kwargs)
def process_anything_text(bot, update, user_data):
if not app.is_user_setup(update):
update.message.reply_text("You are not authenticated yet. Use /setup please.")
user_data.clear()
return
content = update.message.text
content_type = 'text'
if len(update.message.entities) > 0:
if update.message.entities[0].type == 'url':
content_type = 'url'
user_data['_content'] = content
user_data['_content_type'] = content_type
user_data['_card_name'] = str(content)[:DEFAULT_CARD_NAME_LEN] if content_type != 'url' else content
trello = Trello(app._USER_SETUPS[app.get_tg_id(update)]['trello_token'])
board_lists = trello.get_board_lists(app._USER_SETUPS[app.get_tg_id(update)]['board_id'])
if board_lists is None:
board_lists = []
update.message.reply_text(
"Where do you want to save it?",
reply_markup=ReplyKeyboardMarkup(
[
[
'#{list_name}'.format(list_name=l['name']) for k, l in board_lists.items()
],
['/cancel']
],
one_time_keyboard=True,
),
)
return _CONV_STATE_CHOOSE_LIST
def process_trello_list_conv(bot, update, user_data):
if not app.is_user_setup(update):
update.message.reply_text("You are not authenticated yet. Use /setup please.")
user_data.clear()
return
content = user_data['_content']
content_type = user_data['_content_type']
choice = update.message.text
if choice == ".":
chosen_list_name = None
chosen_card_name = user_data['_card_name']
else:
if choice[0] == '#':
choice = 'in ' + choice
chosen_list_name, chosen_card_name, _ = extract_commands_from_text(choice)
if chosen_card_name is None:
chosen_card_name = user_data['_card_name']
kwargs = {
'content': content,
'content_type': content_type,
'card_name': chosen_card_name,
'update': update,
}
if chosen_list_name is None:
inbox_list_id = app._USER_SETUPS[app.get_tg_id(update)]['inbox_list_id']
if inbox_list_id:
kwargs['list_id'] = inbox_list_id
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, inbox_list_id, chosen_card_name
))
else:
update.message.reply_text("No default list provided! "
"Re-run the setup with at least a list please.")
else:
kwargs['list_name'] = chosen_list_name
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, chosen_list_name, chosen_card_name
))
app.append_card(**kwargs)
return ConversationHandler.END
def process_wong_trello_list_conv(bot, update, user_data):
update.message.reply_text("Your choice is not valid. Please restart.")
user_data.clear()
return ConversationHandler.END
def process_anything_file(bot, update):
if not app.is_user_setup(update):
update.message.reply_text("You are not authenticated yet. Use /setup please.")
return
if len(update.message.photo) > 0:
photo = update.message.photo[-1]
file_id = photo.file_id
content = photo.get_file().file_path
content_type = 'image'
else:
file = update.message.document[-1]
file_id = file.file_name
# mime_type = file.mime_type
content = file.get_file().file_path
content_type = 'document'
command = update.message.caption
if command is not None:
command = ('in ' + command) if command[0] == "#" else command
chosen_list_name, chosen_card_name, _ = extract_commands_from_text(command)
if chosen_card_name is None:
chosen_card_name = file_id[:10]
kwargs = {
'content': content,
'content_type': content_type,
'card_name': chosen_card_name,
'update': update,
}
if chosen_list_name is None:
inbox_list_id = app._USER_SETUPS[app.get_tg_id(update)]['inbox_list_id']
if inbox_list_id:
kwargs['list_id'] = inbox_list_id
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, inbox_list_id, chosen_card_name
))
else:
update.message.reply_text("No default list provided! "
"Re-run the setup with at least a list please.")
else:
kwargs['list_name'] = chosen_list_name
logger.info("I will insert the file {} in the list {} with the name {}.".format(
content, chosen_list_name, chosen_card_name
))
app.append_card(**kwargs)
app.load_users()
# Telegram messages handler
updater = Updater(token=TG_TOKEN_IDEAS)
# Get the dispatcher to register handlers
dp = updater.dispatcher
debug = False
if debug:
dp.add_handler(MessageHandler(Filters.text, lambda b, update: print(update.message.text)))
dp.add_handler(MessageHandler(Filters.photo, lambda b, update: print(update.message.text)))
else:
dp.add_handler(CommandHandler("status", app.status))
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", start))
dp.add_handler(app.get_setup_handler())
dp.add_handler(RegexHandler('^(.*)(( in #((_)?)([^\s]+))|( as \*([^\s]+)))$',
process_shortcut_mode))
dp.add_handler(ConversationHandler(
entry_points=[MessageHandler(Filters.text,
process_anything_text,
pass_user_data=True)],
states={
_CONV_STATE_CHOOSE_LIST: [RegexHandler('(^(in )?((#((_)?)([^\s]+))( as \*([^\s]+))?)$)|(^\.$)',
process_trello_list_conv,
pass_user_data=True),
MessageHandler(Filters.text,
process_wong_trello_list_conv,
pass_user_data=True)]
},
fallbacks=[
CommandHandler("cancel", app.cancel_conv, pass_user_data=True),
]
)
)
dp.add_handler(MessageHandler(Filters.photo,
process_anything_file))
dp.add_handler(MessageHandler(Filters.document,
process_anything_file))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
logger.info("Bot is idle, listening")
updater.idle()
|
from . import init_profile_commen
import itertools
from opensearchpy.helpers import scan as os_scan
import time
from loguru import logger
OPEN_SEARCH_GITHUB_PROFILE_INDEX = "github_profile"
def load_github_profile_issues_timeline(github_tokens, opensearch_conn_infos, owner, repo):
"""Get GitHub user's profile from GitHub issues timeline and put it into opensearch if it is not in opensearch."""
github_tokens_iter = itertools.cycle(github_tokens)
opensearch_client = init_profile_commen.get_opensearch_client(opensearch_conn_infos)
# 查询owner+repo所有github issues记录用来提取github issue的user
res = os_scan(client=opensearch_client, index='github_issues_timeline',
query={
"track_total_hits": True,
"query": {
"bool": {"must": [
{"term": {
"search_key.owner.keyword": {
"value": owner
}
}},
{"term": {
"search_key.repo.keyword": {
"value": repo
}
}}
]}
},
"size": 10
}, doc_type='_doc', timeout='10m')
print("========================20211225test=======================")
if res is None:
logger.info(f"There's no github issues' timeline in {repo}")
else:
all_issues_timeline_users = set([])
for issue_timeline in res:
issue_timeline_raw_data = issue_timeline["_source"]["raw_data"]
if issue_timeline_raw_data["event"] != "committed":
if "user" in issue_timeline_raw_data:
issue_timeline_user_login = issue_timeline_raw_data["user"]["login"]
all_issues_timeline_users.add(issue_timeline_user_login)
else:
issue_timeline_user_login = issue_timeline_raw_data["actor"]["login"]
all_issues_timeline_users.add(issue_timeline_user_login)
# 获取github profile
# init_profile_commen.put_profile_into_opensearch(opensearch_client, all_issues_timeline_users,
# OPEN_SEARCH_GITHUB_PROFILE_INDEX, github_tokens_iter,
# opensearch_conn_infos)
logger.info(load_github_profile_issues_timeline.__doc__)
return all_issues_timeline_users
|
import MNN
import torch
import copy
F = MNN.expr
def transform_mnn_to_tensor(mnn_path):
model_path = mnn_path
var_map = F.load_as_dict(model_path)
input_dicts, output_dicts = F.get_inputs_and_outputs(var_map)
input_names = [n for n in input_dicts.keys()]
output_names = [n for n in output_dicts.keys()]
input_vars = [input_dicts[n] for n in input_names]
output_vars = [output_dicts[n] for n in output_names]
module = MNN.nn.load_module(input_vars, output_vars, False)
tensor_params_dict = {}
for idx_layer in range(len(module.parameters)):
module.parameters[idx_layer].fix_as_const()
mnn_layer_weights_np_arr = copy.deepcopy(module.parameters[idx_layer].read())
tensor_params_dict[idx_layer] = torch.from_numpy(mnn_layer_weights_np_arr).detach()
return tensor_params_dict
def transform_tensor_to_mnn(mnn_path, tensor_dict, mnn_save_path):
model_path = mnn_path
var_map = F.load_as_dict(model_path)
input_dicts, output_dicts = F.get_inputs_and_outputs(var_map)
input_names = [n for n in input_dicts.keys()]
output_names = [n for n in output_dicts.keys()]
input_vars = [input_dicts[n] for n in input_names]
output_vars = [output_dicts[n] for n in output_names]
module = MNN.nn.load_module(input_vars, output_vars, False)
input_shape = F.shape(input_vars[0])
mnn_params_list = []
for idx_layer in range(len(tensor_dict)):
pt_layer_weights_np_arr = tensor_dict[idx_layer].numpy()
tmp = F.const(pt_layer_weights_np_arr, list(pt_layer_weights_np_arr.shape))
tmp.fix_as_trainable()
mnn_params_list.append(tmp)
module.load_parameters(mnn_params_list)
predict = module.forward(F.placeholder(input_shape.read(), F.NCHW))
F.save([predict], mnn_save_path)
if __name__ == "__main__":
mnn_path = "../model_test/lenet_cifar10.mnn"
save_path = "../model_test/agg.mnn"
save_an_path = "../model_test/test.mnn"
a = transform_mnn_to_tensor(mnn_path)
print(a[3])
transform_tensor_to_mnn(mnn_path, a, save_path)
c = transform_mnn_to_tensor(save_path)
print(c[3])
transform_tensor_to_mnn(mnn_path, c, save_an_path)
b = transform_mnn_to_tensor(save_an_path)
print(b[3])
|
from lshash import LSHash
lsh = LSHash(32, 8)
lsh = LSHash(6, 8, 1, {"redis":{"host": 'localhost', "port": 6379}})
lsh.index([1, 2, 3, 4, 5, 6, 7, 8])
lsh.index([2, 3, 4, 5, 6, 7, 8, 9])
lsh.index([10, 12, 99, 1, 5, 31, 2, 3])
lsh.query([1, 2, 3, 4, 5, 6, 7, 7])
print(lsh.query([1, 2, 3, 4, 5, 6, 7, 7]))
# # another method
# lsh.index([[1, 2, 3, 4, 5, 6, 7, 8],
# [2, 3, 4, 5, 6, 7, 8, 9],
# [4, 2, 3, 1, 5, 6, 7, 8],
# [1, 3, 4, 5, 6, 7, 8, 9],
# [10, 12, 99, 1, 5, 31, 2, 3]])
# print(lsh.query([1, 2, 3, 4, 5, 6, 7, 7], distance_func='jaccard'))
|
from app import join_room, leave_room, Rooms, app, render_template, Response, request, redirect, url_for, session, send, emit, socketio
@app.route("/")
def index_page():
return render_template('index.html')
@app.route("/chats", methods=['POST'])
def chats_page():
Rooms.reloadRooms()
session['user'] = {'userName': request.form['userName'], 'userColor': Rooms.setRandomColor(), 'currentChatroom': "None", 'sid': 0}
return render_template('chats.html', userName = session['user']['userName'], rooms = Rooms.getRoomList())
@socketio.on('connect')
def on_connect():
session['user']['sid'] = request.sid
print(session['user'])
@socketio.on('userJoinedChatroom')
def chatroom_join(data):
join_room(data['roomName'])
session['user']['currentChatroom'] = data['roomName']
Rooms.addUserToRoom(data['roomName'], data['userName'], session['user']['userColor'])
currentUsers = Rooms.readRoomInformation(data['roomName'])
print(currentUsers)
emit('getUsersList', {'userName': currentUsers['usersConnected']}, room = data['roomName'])
@socketio.on('roomMessage')
def room_message(data):
emit('showMessage', {'userName': data['userName'], 'userColor': session['user']['userColor'], 'userMessage': data['userMessage']}, room = data['roomName'], broadcast = True)
@socketio.on('command')
def get_command(command):
cmds = ["/createchatroom", "/deletechatroom", "/help"]
splitCommand = str(command['userCommand']).split(" ")
if len(splitCommand) >= 2:
if splitCommand[0] in cmds:
if splitCommand[0] == "/createchatroom":
splitCommand.remove("/createchatroom")
roomName = "".join([text + " " for text in splitCommand])[:-1]
Rooms.createRoom(roomName)
emit('commandResponse', {'response': f'Chatroom created: {roomName}. (click reload chatrooms button on chatrooms page to see it)'})
elif splitCommand[0] == "/deletechatroom":
splitCommand.remove("/deletechatroom")
roomName = "".join([text + " " for text in splitCommand])[:-1]
Rooms.deleteRoom(roomName)
emit('commandResponse', {'response': f'Chatroom deleted: {roomName}. (click reload chatrooms button on chatrooms page to see it)'})
else:
emit('commandResponse', {'response': f'Command {splitCommand[0]} not found. Try /help'})
else:
if splitCommand[0] == "/createchatroom":
emit('commandResponse', {'response': f'Command Usage {splitCommand[0]} <roomname>'})
elif splitCommand[0] == "/deletechatroom":
emit('commandResponse', {'response': f'Command Usage {splitCommand[0]} <roomname>'})
elif splitCommand[0] == "/help":
emit('commandResponse', {'response': f'Existing commands: /createchatroom <roomname>, /deletechatroom <roomname>'})
else:
emit('commandResponse', {'response': f'Command {splitCommand[0]} not found. Try /help'})
@socketio.on('userLeftChatroom')
def chatroom_left(data):
leave_room(data['roomName'])
session['user']['currentChatroom'] = "None"
Rooms.removeUserFromRoom(data['roomName'], data['userName'], session['user']['userColor'])
currentUsers = Rooms.readRoomInformation(data['roomName'])
print(currentUsers)
emit('getUsersList', {'userName': currentUsers['usersConnected']}, room = data['roomName'], broadcast = True)
@socketio.on('disconnect')
def on_disconnect():
if session['user']['currentChatroom'] != "None":
Rooms.removeUserFromRoom(session['user']['currentChatroom'], session['user']['userName'], session['user']['userColor'])
currentUsers = Rooms.readRoomInformation(session['user']['currentChatroom'])
print(currentUsers)
emit('getUsersList', {'userName': currentUsers['usersConnected']}, room = session['user']['currentChatroom'], broadcast = True)
session.pop('user', None)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import matplotlib.pyplot as plt
from utils import showImages
#
# Task 1
#
# Implement the Harris-Stephens Corner Detection for `imgGray1` without using an existing all-in-one function,
# e.g. do not use functions like `cv2.cornerHarris(..)`.
img1 = cv2.imread('img/building.jpeg')
img1Gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
# First, you need to create the Harris matrix.
# TODO: Use the Sobel filter (with `ksize`) to get x and y derivatives of `img1Gray`.
ksize = 5
gradientX = cv2.Sobel(img1Gray, -1, 1, 0, ksize=ksize, borderType=cv2.BORDER_DEFAULT)
gradientY = cv2.Sobel(img1Gray, -1, 0, 1, ksize=ksize, borderType=cv2.BORDER_DEFAULT)
# TODO: Create a simple box filter smoothing kernel (use `ksize` again).
smoothingKernel = np.ones(shape=(ksize, ksize), dtype=np.uint8) / (ksize * ksize)
# TODO: Compute and fill in the values of the Harris matrix from the Sobel gradients.
harrisMatrix = np.ones((2, 2) + img1Gray.shape)
# Hint: Each of the following 4 entries contains a full gradient image
harrisMatrix[0, 0] = cv2.multiply(gradientX, gradientX) # Gx^2
harrisMatrix[0, 1] = cv2.multiply(gradientX, gradientY) # Gx*Gy
harrisMatrix[1, 0] = cv2.multiply(gradientX, gradientY) # Gx*Gy
harrisMatrix[1, 1] = cv2.multiply(gradientY, gradientY) # Gy^2
# TODO: Use the created smoothing kernel to filter the 4 Harris matrix values assigned above.
# Tip: You can use `cv2.filter2D(..)` to apply a kernel to a whole image.
harrisMatrix[0, 0] = cv2.filter2D(harrisMatrix[0, 0], -1, smoothingKernel, borderType=cv2.BORDER_DEFAULT) # Gx^2
harrisMatrix[0, 1] = cv2.filter2D(harrisMatrix[0, 1], -1, smoothingKernel, borderType=cv2.BORDER_DEFAULT) # Gx*Gy
harrisMatrix[1, 0] = cv2.filter2D(harrisMatrix[1, 0], -1, smoothingKernel, borderType=cv2.BORDER_DEFAULT) # Gx*Gy
harrisMatrix[1, 1] = cv2.filter2D(harrisMatrix[1, 1], -1, smoothingKernel, borderType=cv2.BORDER_DEFAULT) # Gy^2
# TODO: Calculate the Harris-Stephens score (R) for each pixel.
# Tip: Make sure you find and use functions for the intermediate steps that are available in OpenCV.
harris_k = .05 # Empirical k value
R = np.ones(img1Gray.shape)
for x in range(R.shape[0]):
for y in range(R.shape[1]):
H = harrisMatrix[:, :, x, y] # Get H for the current pixel
det = np.linalg.det(H)
trace = H[0, 0] + H[1, 1]
R[x, y] = det - harris_k * (trace ** 2) # det(H) - harris_k * trace(H)^2
harris_r_norm = cv2.normalize(R, None, 0, 1, norm_type=cv2.NORM_MINMAX) # Normalize to 0-1 for display and thresholding
# TODO: Select pixels with a relevant Harris-Stephens score and highlight these in `imgMarkers` using
# `cv2.drawMarker(..)`
harris_tau = .95 # Harris-Stephens score threshold
imgMarkers = img1.copy()
corner_x, corner_y = np.nonzero(harris_r_norm > harris_tau)
corners = list(zip(corner_x, corner_y))
keypoints = []
for corner in corners:
keypoints.append(cv2.KeyPoint(int(corner[1]), int(corner[0]), 1))
imgMarkers = cv2.drawKeypoints(imgMarkers, keypoints, None, color=(0, 255, 0))
plt.figure(figsize=(10, 3))
showImages([("Input", img1), ("Harris-Stephens score (R)", harris_r_norm), ("Corners", imgMarkers)],
show_window_now=True)
#
# Task 2
#
# Use the SIFT Feature detector to find matching features in two images, in order to create a combined panorama image.
img2 = cv2.imread('img/mountain1.png')
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img3 = cv2.imread('img/mountain2.png')
img3gray = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY)
# TODO: Extract SIFT keypoints (`kp1`, `kp1`) and feature descriptors (`fd1`, `fd2`) for both images (`img2`, `img3`).
# (https://docs.opencv.org/master/da/df5/tutorial_py_sift_intro.html)
sift = cv2.SIFT_create()
kp1, fd1 = sift.detectAndCompute(img2gray, None)
kp2, fd2 = sift.detectAndCompute(img3gray, None)
# TODO: For all features of `img2`, find the two closest matches from the features of `img3` using euclidean distances.
# Tip: Have a look at `knnMatch(..)` from `cv2.BFMatcher`.
matcher = cv2.BFMatcher()
matches = matcher.knnMatch(fd1, fd2, k=2)
# TODO: Use the ratio test (best vs. second-best match) to keep only the `good_matches`.
best_to_secondBest_ratio = .6
good_matches = []
for m, n in matches:
if m.distance < 0.6 * n.distance:
good_matches.append([m])
# TODO: Create an image showing the matches between `img2` and `img3`.
imgMatches = cv2.drawMatchesKnn(img2, kp1, img3, kp2, good_matches, None)
# TODO: Change this, once you have completed task 2 to test your feature matches.
task2_complete = True
if not task2_complete:
plt.figure(figsize=(10, 2))
showImages([("img2", img2), ("img3", img3)])
else:
# Now let's try to stich these two images together to see how well the features actually are.
src_pts = np.float32([kp1[m[0].queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m[0].trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
H, _ = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
# Apply transformation to transform `img3` onto `img2`.
h, w, _ = img2.shape
img23 = cv2.warpPerspective(img3, H, (w * 2, h))
# Fill in pixels from `img2` around transformed `img3`.
stitchempty = np.where(img23[:, :w, :] == [0, 0, 0])
img23[stitchempty] = img2[stitchempty]
plt.figure(figsize=(10, 5))
showImages([("img2", img2), ("img3", img3), ("Matches", imgMatches), ("Both images stiched together", img23)], 2)
|
eggs = int(input())
print(eggs // 12)
print(eggs % 12)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import tempfile
import zipfile
from lib.subcommand import SubCommand
from lib.symbol import SymbolDataSources
LOGGER = logging.getLogger('dmprof')
class UploadCommand(SubCommand):
def __init__(self):
super(UploadCommand, self).__init__(
'Usage: %prog upload [--gsutil path/to/gsutil] '
'<first-dump> <destination-gs-path>')
self._parser.add_option('--gsutil', default='gsutil',
help='path to GSUTIL', metavar='GSUTIL')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
gs_path = args[2]
dump_files = SubCommand._find_all_dumps(dump_path)
bucket_files = SubCommand._find_all_buckets(dump_path)
prefix = SubCommand._find_prefix(dump_path)
symbol_data_sources = SymbolDataSources(prefix)
symbol_data_sources.prepare()
symbol_path = symbol_data_sources.path()
handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof')
os.close(handle_zip)
try:
file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in dump_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
for filename in bucket_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
symbol_basename = os.path.basename(os.path.abspath(symbol_path))
for filename in os.listdir(symbol_path):
if not filename.startswith('.'):
file_zip.write(os.path.join(symbol_path, filename),
os.path.join(symbol_basename, os.path.basename(
os.path.abspath(filename))))
file_zip.close()
returncode = UploadCommand._run_gsutil(
options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path)
finally:
os.remove(filename_zip)
return returncode
@staticmethod
def _run_gsutil(gsutil, *args):
"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""
command = [gsutil] + list(args)
LOGGER.info("Running: %s", command)
try:
return subprocess.call(command)
except OSError, e:
LOGGER.error('Error to run gsutil: %s', e)
|
#
# PySNMP MIB module MICOMBRGEXT (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MICOMBRGEXT
# Produced by pysmi-0.3.4 at Wed May 1 14:12:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
micom_oscar, = mibBuilder.importSymbols("MICOM-OSCAR-MIB", "micom-oscar")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Gauge32, Unsigned32, Bits, IpAddress, MibIdentifier, Counter32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ObjectIdentity, Counter64, TimeTicks, iso = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Gauge32", "Unsigned32", "Bits", "IpAddress", "MibIdentifier", "Counter32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ObjectIdentity", "Counter64", "TimeTicks", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class MacAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class Timeout(Integer32):
pass
mcmBrg = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6))
mcmBrgGlobalParamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1))
mcmBrgIPBridged = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("bridged", 1), ("routed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgIPBridged.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgIPBridged.setDescription('NAME = ; DESC = IP is being bridged or Routed.; HELP = Is IP being Bridged or Routed?; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgNumInterfaces = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgNumInterfaces.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgNumInterfaces.setDescription('NAME = ; DESC = The number of ports(ethernet & wan) \\ that the access device has.; HELP = The number of ports this bridge has.; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgSpanEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgSpanEnable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpanEnable.setDescription('NAME = ; DESC = The current state of the Spanning \\ tree Algorithm on the access device.; HELP = Spanning-Tree Algorithm Enabled?; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgSpoofEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgSpoofEnable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpoofEnable.setDescription('NAME = ; DESC = IP-ARP spoofing -when enabled, access device \\ intercepts most ARP bcasts and handle them locally \\ to reduce the amount of bcast pkts fwded across WAN.; HELP = IP-ARP Spoofing Enabled?; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgAgeTime = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 1000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgAgeTime.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgAgeTime.setDescription('NAME = ; DESC = Aging time for dynamically learned devices \\ in forwarding Table.; HELP = The timeout period in seconds for aging \\ out dynamically learned forwarding \\ information. 802.1D-1990 recommends \\ a default of 300 seconds. Mirror object \\ of dot1dTpAgingTime.; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgMiscParamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2))
mcmBrgDebugEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgDebugEnable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgDebugEnable.setDescription('NAME = ; DESC = The access device capability to generate controlled \\ number of debugging messages to CLI for \\ the purpose of debugging some bridge \\ status.; HELP = Bridge Debugging Enabled?; CAPABILITIES = NET_CFG, VPN_DISP;')
mcmBrgSpanDebugEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSpanDebugEnable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpanDebugEnable.setDescription('NAME = ; DESC = The access device capability to debug spanning \\ tree algorithm.; HELP = Spanning-Tree Debugging Enabled?; CAPABILITIES = NET_CFG, VPN_DISP;')
mcmBrgSpoofCacheAge = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 1800))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSpoofCacheAge.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpoofCacheAge.setDescription('NAME = ; DESC = The aging time for entries in the ARP Spoof\\ cache table to specified by the cache timeout \\ to make room for active entries.; HELP = The Aging Time for the ARP Spoof Cache.; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgSpoofThresholdAge = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 120))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSpoofThresholdAge.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpoofThresholdAge.setDescription('NAME = ; DESC = The Aging Time for the ARP Spoofcache Threshold \\ in seconds.(60 .. 1800 sec).; HELP = The Aging Time for the ARP Spoof Threshold.; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgSpoofThresholdCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 2, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSpoofThresholdCount.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSpoofThresholdCount.setDescription('NAME = ; DESC = The ARP Spoof Threshold count- allows \\ the max number of consecutive outstanding \\ ARP request per destination node.; HELP = The ARP Spoof Threshold count.; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgConfPortTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3), )
if mibBuilder.loadTexts: mcmBrgConfPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortTable.setDescription('NAME = ; DESC = A config Table that contains port-specific \\ information for the access device Bridge \\ Configuration information.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgConfPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1), ).setIndexNames((0, "MICOMBRGEXT", "mcmBrgConfPortIndex"))
if mibBuilder.loadTexts: mcmBrgConfPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortEntry.setDescription('NAME = ; DESC = Configuration parameter information maintained by \\ each Port that are being used by the access \\ device.; HELP = ; CAPABILITIES = ;')
mcmBrgConfPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgConfPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortIndex.setDescription('NAME = ; DESC = The Port Index to which this entry pertains.; HELP = ; CAPABILITIES = ;')
mcmBrgConfPortPPA = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgConfPortPPA.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortPPA.setDescription('NAME = ; DESC = The Protocol Point of Access to which \\ this entry pertains.; HELP = ; CAPABILITIES = ;')
mcmBrgConfPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ethernet", 1), ("wan", 2), ("frameRelay", 3), ("internal", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mcmBrgConfPortType.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortType.setDescription('NAME = ; DESC = The Type of the interface this port \\ is attached to.; HELP = ; CAPABILITIES = ;')
mcmBrgConfPortMacFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortMacFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortMacFilterFlag.setDescription('NAME = ; DESC = When MAC address filtering is needed on a \\ specific port, enable MAC filter Flag. \\ Otherwise, disable the MAC Filetr Flag.; HELP = Is MAC Filtering enabled?; CAPABILITIES = ;')
mcmBrgConfPortEtFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortEtFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortEtFilterFlag.setDescription('NAME = ; DESC = When E-type filtering is needed on a specific \\ port, enable E-type filter Flag. Otherwise, \\ disable E-type Filter Flag.; HELP = Is E-type Filtering enabled?; CAPABILITIES = ;')
mcmBrgConfPortSapFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortSapFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortSapFilterFlag.setDescription('NAME = ; DESC = When SAP filtering is needed on a specific \\ port, enable SAP filter flag. Otherwise, \\ disable the SAP filter flag.; HELP = Is SAP Filtering enabled?; CAPABILITIES = ;')
mcmBrgConfPortMacInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortMacInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortMacInclExcl.setDescription('NAME = ; DESC = If Inclusive MAC filtering is set, only \\ addresses in the table are filtered/discarded.\\ If Exclusive, allow only addresses in the \\ table to be accepted/learned.; HELP = Inclusive or Exclusive Mac Filtering?; CAPABILITIES = ;')
mcmBrgConfPortEtInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortEtInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortEtInclExcl.setDescription('NAME = ; DESC = If Inclusive E-Type filtering is set, only \\ E-Type in the table are filtered/discarded.\\ If Exclusive, allow only E-Type in the \\ table to be accepted/learned.; HELP = Inclusive or Exclusive Et Filtering?; CAPABILITIES = ;')
mcmBrgConfPortSapInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgConfPortSapInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgConfPortSapInclExcl.setDescription('NAME = ; DESC = If Inclusive Sap Type filtering is set, only \\ Sap Types in the Table are filtered/discarded. \\ If Exclusive, allow only Sap Types in the \\ table to be accepted/learned.; HELP = Inclusive or Exclusive Sap Filtering?; CAPABILITIES = ;')
mcmBrgMacFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4), )
if mibBuilder.loadTexts: mcmBrgMacFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacFilterTable.setDescription('NAME = ; DESC = A table that contains port-specific \\ information for the access device MAC Filter \\ info including the source & destination.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgMacFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4, 1), ).setIndexNames((0, "MICOMBRGEXT", "mcmBrgMacFilterPortIndex"), (0, "MICOMBRGEXT", "mcmBrgMacFilterNumber"))
if mibBuilder.loadTexts: mcmBrgMacFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacFilterEntry.setDescription('NAME = ; DESC = MAC Filter parameter information maintained by \\ every port that the access device uses.; HELP = ; CAPABILITIES = ;')
mcmBrgMacFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgMacFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacFilterPortIndex.setDescription('NAME = ; DESC = The Port Index particular to this entry.; HELP = ; CAPABILITIES = ;')
mcmBrgMacFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgMacFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacFilterNumber.setDescription('NAME = ; DESC = The Filter number associated with the entry.; HELP = ; CAPABILITIES = ;')
mcmBrgMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4, 1, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgMacAddress.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacAddress.setDescription('NAME = ; DESC = The MAC address(HW address) that needs to be \\ filtered.; HELP = ; CAPABILITIES = ;')
mcmBrgMacType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("source", 1), ("destination", 2), ("invalid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgMacType.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgMacType.setDescription('NAME = ; DESC = Filter based on source MAC address or \\ destination MAC address.; HELP = The type of the MAC Address. By setting \\ it to invalid one can invalidate the \\ entry.; CAPABILITIES = ;')
mcmBrgEtFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5), )
if mibBuilder.loadTexts: mcmBrgEtFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterTable.setDescription('NAME = ; DESC = A table that contains port-specific \\ information for the access device \\ E-type Filter.; HELP = CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgEtFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1), ).setIndexNames((0, "MICOMBRGEXT", "mcmBrgEtFilterPortIndex"), (0, "MICOMBRGEXT", "mcmBrgEtFilterNumber"))
if mibBuilder.loadTexts: mcmBrgEtFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterEntry.setDescription('NAME = ; DESC = A list of information maintained by \\ each port about the access device \\ E-type Filter parameters for that port.; HELP = CAPABILITIES = ;')
mcmBrgEtFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterPortIndex.setDescription('NAME = ; DESC = The port Index particular to this port.; HELP = ; CAPABILITIES = ;')
mcmBrgEtFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterNumber.setDescription('NAME = ; DESC = The Filter number associated with the \\ port Index.; HELP = ; CAPABILITIES = ;')
mcmBrgEtFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("singular", 1), ("range", 2), ("invalid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterStatus.setDescription('NAME = ; DESC = An indication as to whether the filter \\^M is specified as a single e-type or as a range of \\ as a range of e-types.; ; HELP = HELP = Setting the value tp invalid, has the effect of \\ the entry,; CAPABILITIES = ;')
mcmBrgEtFilterEType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterEType.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterEType.setDescription('NAME = ; DESC = The singular e-type to be filtered.; HELP = The value of this object is valid only \\ if the mcmBrgEtFilterStatus (above) \\ is singular.; CAPABILITIES = ;')
mcmBrgEtFilterUpperRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterUpperRange.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterUpperRange.setDescription('NAME = ; DESC = DESC = The Upper Range of the E-type to be \\ filtered. Range is between 0 - 65535.; HELP = This object is valid only if the \\ mcmBrgEtFilterStatus (above) is range.; CAPABILITIES = ;')
mcmBrgEtFilterLowerRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 5, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgEtFilterLowerRange.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgEtFilterLowerRange.setDescription('NAME = ; DESC = The lower Range of the E-type to be \\ filtered. Range is between 0 - 65535.; HELP = This object is valid only \\ if the mcmBrgEtFilterStatus (above) \\ is range.; CAPABILITIES = ;')
mcmBrgSapFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6), )
if mibBuilder.loadTexts: mcmBrgSapFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterTable.setDescription('NAME = ; DESC = A table that contains port-specific \\ information for the access device E-type \\ Filter information. (Operational); HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgSapFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1), ).setIndexNames((0, "MICOMBRGEXT", "mcmBrgSapFilterPortIndex"), (0, "MICOMBRGEXT", "mcmBrgSapFilterNumber"))
if mibBuilder.loadTexts: mcmBrgSapFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterEntry.setDescription('NAME = ; DESC = A list of information maintained by \\ every port about the access device E-type Filter \\ parameters for that port. (Operational); HELP = ; CAPABILITIES = ;')
mcmBrgSapFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterPortIndex.setDescription('NAME = ; DESC = ; HELP = The Port Index to which this entry pertains.; CAPABILITIES = ;')
mcmBrgSapFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterNumber.setDescription('NAME = ; DESC = The Filter number associated with the \\ port Index.; HELP = ; CAPABILITIES = ;')
mcmBrgSapFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("singular", 1), ("range", 2), ("invalid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterStatus.setDescription('NAME = ; DESC = An indication as to whether the filter \\ is specified as a single e-type or \\ as a range of e-types.; HELP = Setting the value to invalid has the \\ effect of deleteing the entry.;; CAPABILITIES = ;')
mcmBrgSapFilterEType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterEType.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterEType.setDescription('NAME = ; DESC = The singular e-type to be filtered.; HELP = The value of this object is valid only \\ if the mcmBrgSapFilterStatus (above) \\ is singular.; CAPABILITIES = ;')
mcmBrgSapFilterUpperRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterUpperRange.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterUpperRange.setDescription('NAME = ; DESC = The Upper Range of the E-type to be \\ filtered.; HELP = This object is valid only if the \\ mcmBrgSapFilterStatus (above) \\ is range.; CAPABILITIES = ;')
mcmBrgSapFilterLowerRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgSapFilterLowerRange.setStatus('mandatory')
if mibBuilder.loadTexts: mcmBrgSapFilterLowerRange.setDescription('NAME = ; DESC = The lower Range of the E-type to be \\ filtered.; HELP = This object is valid only if the \\ mcmBrgSapFilterStatus (above) \\ is range.; CAPABILITIES = ;')
nvmBrgGlobalParamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7))
nvmBrgIPBridged = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("bridged", 1), ("routed", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgIPBridged.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgIPBridged.setDescription('NAME = ; DESC = IP is being bridged or Routed in the access device.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgNumInterfaces = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgNumInterfaces.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgNumInterfaces.setDescription('NAME = ; DESC = The number of ethernet and WAN ports that \\ the access device has.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgSpanEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpanEnable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpanEnable.setDescription('NAME = ; DESC = The current state of the Spanning \\ tree Algorithm on the access device is enabled \\ or disabled.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgSpoofEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpoofEnable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpoofEnable.setDescription('NAME = ; DESC = IP-ARP spoofing -when enabled,access device intercepts\\ most ARP bcasts and handle them locally to \\ reduce the amount of bcast pkts fwded across WAN.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgAgeTime = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 7, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 1000000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgAgeTime.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgAgeTime.setDescription('NAME = ; DESC = Aging time for dynamically learned devices \\ in forwarding Table.; HELP = The timeout period in seconds for aging \\ out dynamically learned forwarding \\ information. 802.1D-1990 recommends \\ a default of 300 seconds. Mirror object \\ of dot1dTpAgingTime.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgMiscParamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8))
nvmBrgDebugEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgDebugEnable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgDebugEnable.setDescription("NAME = ; DESC = The access device's capability to generate controlled \\ number of debugging messages to CLI for \\ the purpose of debugging some bridge \\ status.; HELP = Bridge Debugging Enabled?; CAPABILITIES = NET_CFG, VPN_DISP;")
nvmBrgSpanDebugEnable = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpanDebugEnable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpanDebugEnable.setDescription("NAME = ; DESC = The access device's capability to debug spanning \\ tree algorithm.; HELP = Spanning-Tree Debugging Enabled?; CAPABILITIES = NET_CFG, VPN_DISP;")
nvmBrgSpoofCacheAge = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 1800))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpoofCacheAge.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpoofCacheAge.setDescription('NAME = ; DESC = The aging time for entries in the ARP Spoof\\ cache table to specified by the cache timeout \\ to make room for active entries.; HELP = The Aging Time for the ARP Spoof Cache.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgSpoofThresholdAge = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 120))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpoofThresholdAge.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpoofThresholdAge.setDescription('NAME = ; DESC = The Aging Time for the ARP Spoofcache Threshold \\ in seconds.(60 .. 1800 sec); HELP = The Aging Time for the ARP Spoof Threshold.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgSpoofThresholdCount = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 8, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSpoofThresholdCount.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSpoofThresholdCount.setDescription('NAME = ; DESC = The ARP Spoof Threshold count- allows \\ the max number of consecutive outstanding \\ ARP request per dest node.; HELP = The ARP Spoof Threshold count.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgStpParamGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 9))
nvmBrgPriority = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgPriority.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgPriority.setDescription('NAME = ; DESC = The value of the write-able portion \\ of the Bridge ID, i.e., the first two \\ octets of the (8 octet long) Bridge ID.; HELP = The other (last) 6 octets of the \\ Bridge ID are given by the value of \\ dot1dBaseBridgeAddress. This is Mirror \\ object to dot1dStpPriority.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgMaxAge = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 9, 2), Timeout().subtype(subtypeSpec=ValueRangeConstraint(600, 4000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgMaxAge.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMaxAge.setDescription('NAME = ; DESC = The maximum age of Spanning Tree Protocol \\ information learned from the network \\ on any port before it is discarded, \\ in units of hundredths of a second.; HELP = This is the programmed value of the \\ dot1dStpBridgeMaxAge.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgFwdDelay = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 9, 3), Timeout().subtype(subtypeSpec=ValueRangeConstraint(400, 3000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgFwdDelay.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgFwdDelay.setDescription('NAME = ; DESC = The value that all bridges use for \\ ForwardDelay when this bridge is \\ acting as the root.; HELP = Note that 802.1D-1990 specifies that \\ the range for this parameter is related \\ to the value of dot1dStpBridgeMaxAge. \\ The granularity of this timer is specified \\ by 802.1D-1990 to be 1 second. An agent \\ may return a badValue error if a set \\ is attempted to a value which is not \\ a whole number of seconds. Mirror object \\ of dot1dStpBridgeForwardDelay; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgHelloTime = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 9, 4), Timeout().subtype(subtypeSpec=ValueRangeConstraint(100, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgHelloTime.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgHelloTime.setDescription('NAME = ; DESC = The value that all bridges use for HelloTime \\ when this bridge is acting as the root.; HELP = The granularity of this timer is specified \\ by 802.1D- 1990 to be 1 second. An \\ agent may return a badValue error if \\ a set is attempted to a value which \\ is not a whole number of seconds. Mirror \\ object of dot1dStpBridgeHelloTime.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgConfPortTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10), )
if mibBuilder.loadTexts: nvmBrgConfPortTable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortTable.setDescription('NAME = ; DESC = A config table that contains port-specific \\ information for tha access device Bridge Configuration \\ information.; HELP = A table that contains port-specific \\ information for the access device Biidge Config \\ information.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgConfPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1), ).setIndexNames((0, "MICOMBRGEXT", "nvmBrgConfPortIndex"))
if mibBuilder.loadTexts: nvmBrgConfPortEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortEntry.setDescription('NAME = ; DESC = A list of information maintained by \\ each Port that are being used by the access device, \\ about conf. parameters for that port.; HELP = ; CAPABILITIES = ;')
nvmBrgConfPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortIndex.setDescription('NAME = ; DESC = The Port Index to which this entry pertains.; HELP = ; CAPABILITIES = ;')
nvmBrgConfPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ethernet", 1), ("wan", 2), ("frameRelay", 3), ("internal", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortType.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortType.setDescription('NAME = ; DESC = The Type of the interface this port \\ is attached to. \\ This object only supported enumerations of \\ ethernet and frameRelay.; HELP = ; CAPABILITIES = ;')
nvmBrgConfPortEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("invalid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortEnable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortEnable.setDescription('NAME = ; DESC = The status of the Port is enabled/disabled.; HELP = Mirror object of dot1dStpPortEnable.; CAPABILITIES = ;')
nvmBrgConfPortPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortPriority.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortPriority.setDescription("NAME = ; DESC = The value of the priority field which \\ is contained in the first (in network \\ byte order) octet of the port ID.; HELP = It's the mirror object of dot1dStpPortPriority.; CAPABILITIES = ;")
nvmBrgConfPortPathCost = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortPathCost.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortPathCost.setDescription('NAME = ; DESC = The contribution of this port to the \\ path cost of paths towards the spanning \\ tree root which include this port.; HELP = 802.1D-1990 recommends that the default \\ value of this parameter be in inverse \\ proportion to the speed of the attached \\ LAN. Mirror object of dot1dStpPortPathCost.; CAPABILITIES = ;')
nvmBrgConfPortMacFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortMacFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortMacFilterFlag.setDescription('NAME = ; DESC = When MAC address filtering is needed on a \\ specific port, enable MAC filter Flag. Otherwise, \\ disable the MAC Filetr Flag.; HELP = Is MAC Filtering enabled?; CAPABILITIES = ;')
nvmBrgConfPortEtFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortEtFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortEtFilterFlag.setDescription('NAME = ; DESC = When E-type filtering is needed on a specific \\ port, enable E-type filter Flag. Otherwise,When \\ disable E-type Filter Flag.; HELP = Is E-type Filtering enabled?; CAPABILITIES = ;')
nvmBrgConfPortSapFilterFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortSapFilterFlag.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortSapFilterFlag.setDescription('NAME = ; DESC = When SAP filtering is needed on a specific \\ port, enable SAP filter flag. Otherwise, \\ disable the SAP filter flag.; HELP = Is SAP Filtering enabled?; CAPABILITIES = ;')
nvmBrgConfPortMacInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortMacInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortMacInclExcl.setDescription('NAME = ; DESC = ; HELP = Inclusive or Exclusive Mac Filtering?; CAPABILITIES = ;')
nvmBrgConfPortEtInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortEtInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortEtInclExcl.setDescription('NAME = ; DESC = ; HELP = Inclusive or Exclusive Et Filtering?; CAPABILITIES = ;')
nvmBrgConfPortSapInclExcl = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 10, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inclusive", 1), ("exclusive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgConfPortSapInclExcl.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgConfPortSapInclExcl.setDescription('NAME = ; DESC = ; HELP = Inclusive or Exclusive Sap Filtering?; CAPABILITIES = ;')
nvmBrgMacFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11), )
if mibBuilder.loadTexts: nvmBrgMacFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacFilterTable.setDescription('NAME = ; DESC = ; HELP = A table that contains port-specific \\ information for the access device MAC Filter \\ information.; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgMacFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11, 1), ).setIndexNames((0, "MICOMBRGEXT", "nvmBrgMacFilterPortIndex"), (0, "MICOMBRGEXT", "nvmBrgMacFilterNumber"))
if mibBuilder.loadTexts: nvmBrgMacFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacFilterEntry.setDescription('NAME = ; DESC = ; HELP = A list of information maintained by \\ every port about the access device MAC Filter \\ parameters for that port.; CAPABILITIES = ;')
nvmBrgMacFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgMacFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacFilterPortIndex.setDescription('NAME = ; DESC = ; HELP = The Port Index to which this entry pertains.; CAPABILITIES = ;')
nvmBrgMacFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgMacFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacFilterNumber.setDescription('NAME = ; DESC = ; HELP = The Port Index to which this entry pertains.; CAPABILITIES = ;')
nvmBrgMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgMacAddress.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacAddress.setDescription('NAME = ; DESC = ; HELP = The MAC Address which needs to be filtered.; CAPABILITIES = ;')
nvmBrgMacType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 11, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("source", 1), ("destination", 2), ("invalid", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgMacType.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgMacType.setDescription('NAME = ; DESC = ; HELP = The type of the MAC Address. By setting \\ it to invalid one can invalidate the \\ entry.; CAPABILITIES = ;')
nvmBrgEtFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12), )
if mibBuilder.loadTexts: nvmBrgEtFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterTable.setDescription('NAME = ; DESC = DESC = A table that contains port-specific \\ information for the access device E-type Filter.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgEtFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1), ).setIndexNames((0, "MICOMBRGEXT", "nvmBrgEtFilterPortIndex"), (0, "MICOMBRGEXT", "nvmBrgEtFilterNumber"))
if mibBuilder.loadTexts: nvmBrgEtFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterEntry.setDescription('NAME = ; DESC = A list of information maintained by \\ each port about the access device E-type Filter \\ parameters for that port.; Help = ; CAPABILITIES = ;')
nvmBrgEtFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterPortIndex.setDescription('NAME = ; DESC = The port Index particular to this port.; HELP = ; CAPABILITIES = ;')
nvmBrgEtFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterNumber.setDescription('NAME = ; DESC = The Filter number associated with the \\ port Index.; HELP = ; CAPABILITIES = ;')
nvmBrgEtFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("singular", 1), ("range", 2), ("invalid", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterStatus.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterStatus.setDescription('NAME = ; DESC = An indication as to whether the filter \\ is specified as a single e-type or \\ as a range of e-types.; HELP = Setting the value to invalid has the \\ the effect of deleting the entry.; CAPABILITIES = ;')
nvmBrgEtFilterEType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterEType.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterEType.setDescription('NAME = ; DESC = The singular e-type to be filters.; HELP = The value of this object is valid only \\ if the mcmBrgEtFilterStatus (above) \\ is singular.; CAPABILITIES = ;')
nvmBrgEtFilterUpperRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterUpperRange.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterUpperRange.setDescription('NAME = ; DESC = The Upper Range of the E-type to be \\ filtered. Range is between 0 - 65535.; HELP = This object is valid only if the \\ mcmBrgEtFilterStatus (above) is range.; CAPABILITIES = ;')
nvmBrgEtFilterLowerRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 12, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1536, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nvmBrgEtFilterLowerRange.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgEtFilterLowerRange.setDescription('NAME = ; DESC = The lower Range of the E-type to be \\ filtered. Range is between 0 - 65535.; HELP = This object is valid only \\ if the mcmBrgEtFilterStatus (above) \\ is range.; CAPABILITIES = ;')
nvmBrgSapFilterTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13), )
if mibBuilder.loadTexts: nvmBrgSapFilterTable.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterTable.setDescription('NAME = ; DESC = A table that contains port-specific \\ information for the access device E-type \\ Filter information.; HELP = ; CAPABILITIES = NET_CFG, VPN_CFG;')
nvmBrgSapFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1), ).setIndexNames((0, "MICOMBRGEXT", "nvmBrgSapFilterPortIndex"), (0, "MICOMBRGEXT", "nvmBrgSapFilterNumber"))
if mibBuilder.loadTexts: nvmBrgSapFilterEntry.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterEntry.setDescription('NAME = ; DESC = A list of information maintained by \\ every port about the access device E-type Filter \\ parameters for that port.; HELP = ; CAPABILITIES = ;')
nvmBrgSapFilterPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterPortIndex.setDescription('NAME = ; DESC = The Port Index to which this entry pertains.; HELP = ; CAPABILITIES = ;')
nvmBrgSapFilterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterNumber.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterNumber.setDescription('NAME = ; DESC = The Filter number associated with the \\ port Index.; HELP = ; CAPABILITIES = ;')
nvmBrgSapFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("singular", 1), ("range", 2), ("invalid", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterStatus.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterStatus.setDescription('NAME = ; DESC = An indication as to whether the filter \\ is specified as a single e-type or \\ as a range of e-types.; HELP = Setting the value to invalid has the \\ effect of deleteing the entry.; CAPABILITIES = ;')
nvmBrgSapFilterEType = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterEType.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterEType.setDescription('NAME = ; DESC = The singular e-type to be filtered. \\ HELP = The value of this object is valid only \\ if the mcmBrgSapFilterStatus (above) \\ is singular.; CAPABILITIES = ;')
nvmBrgSapFilterUpperRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterUpperRange.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterUpperRange.setDescription('NAME = ; DESC = The Upper Range of the E-type to be \\ filtered.; HELP = This object is valid only if the \\ mcmBrgSapFilterStatus (above) \\ is range.; CAPABILITIES = ;')
nvmBrgSapFilterLowerRange = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 13, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nvmBrgSapFilterLowerRange.setStatus('mandatory')
if mibBuilder.loadTexts: nvmBrgSapFilterLowerRange.setDescription('NAME = ; DESC = The lower Range of the E-type to be \\ filtered.; HELP = This object is valid only if the \\ mcmBrgSapFilterStatus (above) \\ is range.; CAPABILITIES = ;')
mcmBrgCntr = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14))
mcmBrgPortCntrZeroTable = MibTable((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1), )
if mibBuilder.loadTexts: mcmBrgPortCntrZeroTable.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgPortCntrZeroTable.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ A table that contains port-specific \\ information for access device Port Counter \\ Zeroing.; HELP =; CAPABILITIES = NET_CFG, VPN_CFG;')
mcmBrgPortCntrZeroEntry = MibTableRow((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1, 1), ).setIndexNames((0, "MICOMBRGEXT", "mcmBrgPortCntrZeroIndex"))
if mibBuilder.loadTexts: mcmBrgPortCntrZeroEntry.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgPortCntrZeroEntry.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ A list of information maintained by \\ every port about the access device E-type Filter \\ parameters for that port.; HELP = ; CAPABILITIES = ;')
mcmBrgPortCntrZeroIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mcmBrgPortCntrZeroIndex.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgPortCntrZeroIndex.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ The Port Index to which this entry pertains.; HELP = ; CAPABILITIES = ;')
mcmBrgBasePortCounterZero = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmBrgBasePortCounterZero.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgBasePortCounterZero.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ When set to reset, the Base port Counters \\ should be zeroed out.; HELP = ; CAPABILITIES = ;')
mcmBrgStpPortCounterZero = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmBrgStpPortCounterZero.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgStpPortCounterZero.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ When set to reset, the spanning Tree port \\ counters will be zeroed out.; HELP = ; CAPABILITIES = ;')
mcmBrgTpPortCounterZero = MibTableColumn((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmBrgTpPortCounterZero.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgTpPortCounterZero.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ When set to reset, the Transparent port \\ counters will be zeroed out.; HELP = ; CAPABILITIES = ;')
mcmBrgCounterZero = MibIdentifier((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 2))
mcmBrgStpCounterZero = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmBrgStpCounterZero.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgStpCounterZero.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ When set to reset, the spanning Tree port \\ counters will be zeroed out.; HELP = ; CAPABILITIES = ;')
mcmBrgTpCounterZero = MibScalar((1, 3, 6, 1, 4, 1, 335, 1, 4, 6, 14, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("reset", 1)))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: mcmBrgTpCounterZero.setStatus('obsolete')
if mibBuilder.loadTexts: mcmBrgTpCounterZero.setDescription('NAME = ; DESC = This object was obsoleted as of release \\ 3.0; and is included here only to support \\ backwards compatibility for software \\ versions prior to release 3.0. \\ When set to reset, the Transparent port \\ counters will be zeroed out.; HELP = ; CAPABILITIES = ;')
mibBuilder.exportSymbols("MICOMBRGEXT", mcmBrgConfPortMacFilterFlag=mcmBrgConfPortMacFilterFlag, mcmBrgConfPortEntry=mcmBrgConfPortEntry, nvmBrgIPBridged=nvmBrgIPBridged, mcmBrgConfPortIndex=mcmBrgConfPortIndex, nvmBrgSapFilterTable=nvmBrgSapFilterTable, mcmBrgTpPortCounterZero=mcmBrgTpPortCounterZero, nvmBrgMacFilterTable=nvmBrgMacFilterTable, nvmBrgMacType=nvmBrgMacType, mcmBrgStpCounterZero=mcmBrgStpCounterZero, nvmBrgConfPortIndex=nvmBrgConfPortIndex, mcmBrgEtFilterLowerRange=mcmBrgEtFilterLowerRange, nvmBrgMacFilterEntry=nvmBrgMacFilterEntry, nvmBrgEtFilterPortIndex=nvmBrgEtFilterPortIndex, mcmBrgMacType=mcmBrgMacType, mcmBrgPortCntrZeroEntry=mcmBrgPortCntrZeroEntry, mcmBrgDebugEnable=mcmBrgDebugEnable, nvmBrgSapFilterEType=nvmBrgSapFilterEType, mcmBrgCounterZero=mcmBrgCounterZero, mcmBrgSpanDebugEnable=mcmBrgSpanDebugEnable, mcmBrgPortCntrZeroTable=mcmBrgPortCntrZeroTable, mcmBrgSapFilterEntry=mcmBrgSapFilterEntry, mcmBrgEtFilterPortIndex=mcmBrgEtFilterPortIndex, nvmBrgGlobalParamGroup=nvmBrgGlobalParamGroup, nvmBrgSpoofThresholdAge=nvmBrgSpoofThresholdAge, mcmBrgTpCounterZero=mcmBrgTpCounterZero, nvmBrgMaxAge=nvmBrgMaxAge, mcmBrgGlobalParamGroup=mcmBrgGlobalParamGroup, nvmBrgConfPortEntry=nvmBrgConfPortEntry, mcmBrgStpPortCounterZero=mcmBrgStpPortCounterZero, nvmBrgSpoofCacheAge=nvmBrgSpoofCacheAge, nvmBrgEtFilterEntry=nvmBrgEtFilterEntry, nvmBrgPriority=nvmBrgPriority, nvmBrgEtFilterEType=nvmBrgEtFilterEType, mcmBrgEtFilterTable=mcmBrgEtFilterTable, mcmBrgSapFilterEType=mcmBrgSapFilterEType, nvmBrgEtFilterNumber=nvmBrgEtFilterNumber, nvmBrgMacFilterPortIndex=nvmBrgMacFilterPortIndex, mcmBrgCntr=mcmBrgCntr, mcmBrgEtFilterNumber=mcmBrgEtFilterNumber, nvmBrgSapFilterLowerRange=nvmBrgSapFilterLowerRange, nvmBrgConfPortSapInclExcl=nvmBrgConfPortSapInclExcl, Timeout=Timeout, mcmBrgSpoofThresholdCount=mcmBrgSpoofThresholdCount, mcmBrgSapFilterStatus=mcmBrgSapFilterStatus, mcmBrgSapFilterLowerRange=mcmBrgSapFilterLowerRange, nvmBrgSapFilterStatus=nvmBrgSapFilterStatus, mcmBrgEtFilterUpperRange=mcmBrgEtFilterUpperRange, mcmBrgNumInterfaces=mcmBrgNumInterfaces, nvmBrgSapFilterEntry=nvmBrgSapFilterEntry, mcmBrgMacFilterNumber=mcmBrgMacFilterNumber, nvmBrgConfPortEtFilterFlag=nvmBrgConfPortEtFilterFlag, mcmBrgSapFilterUpperRange=mcmBrgSapFilterUpperRange, nvmBrgFwdDelay=nvmBrgFwdDelay, nvmBrgEtFilterStatus=nvmBrgEtFilterStatus, nvmBrgMiscParamGroup=nvmBrgMiscParamGroup, mcmBrgEtFilterStatus=mcmBrgEtFilterStatus, mcmBrgSapFilterPortIndex=mcmBrgSapFilterPortIndex, mcmBrgMacFilterEntry=mcmBrgMacFilterEntry, mcmBrgAgeTime=mcmBrgAgeTime, nvmBrgAgeTime=nvmBrgAgeTime, mcmBrgConfPortType=mcmBrgConfPortType, nvmBrgConfPortMacFilterFlag=nvmBrgConfPortMacFilterFlag, nvmBrgEtFilterTable=nvmBrgEtFilterTable, mcmBrgConfPortEtInclExcl=mcmBrgConfPortEtInclExcl, nvmBrgConfPortPriority=nvmBrgConfPortPriority, nvmBrgHelloTime=nvmBrgHelloTime, mcmBrgIPBridged=mcmBrgIPBridged, mcmBrg=mcmBrg, mcmBrgPortCntrZeroIndex=mcmBrgPortCntrZeroIndex, nvmBrgStpParamGroup=nvmBrgStpParamGroup, nvmBrgSapFilterPortIndex=nvmBrgSapFilterPortIndex, MacAddress=MacAddress, nvmBrgConfPortEnable=nvmBrgConfPortEnable, mcmBrgConfPortEtFilterFlag=mcmBrgConfPortEtFilterFlag, nvmBrgMacFilterNumber=nvmBrgMacFilterNumber, mcmBrgConfPortSapFilterFlag=mcmBrgConfPortSapFilterFlag, nvmBrgSpoofThresholdCount=nvmBrgSpoofThresholdCount, mcmBrgSapFilterNumber=mcmBrgSapFilterNumber, nvmBrgSpanEnable=nvmBrgSpanEnable, nvmBrgEtFilterUpperRange=nvmBrgEtFilterUpperRange, mcmBrgEtFilterEType=mcmBrgEtFilterEType, mcmBrgMacFilterTable=mcmBrgMacFilterTable, nvmBrgConfPortEtInclExcl=nvmBrgConfPortEtInclExcl, mcmBrgSapFilterTable=mcmBrgSapFilterTable, mcmBrgMacAddress=mcmBrgMacAddress, mcmBrgEtFilterEntry=mcmBrgEtFilterEntry, nvmBrgConfPortPathCost=nvmBrgConfPortPathCost, nvmBrgSapFilterUpperRange=nvmBrgSapFilterUpperRange, mcmBrgConfPortSapInclExcl=mcmBrgConfPortSapInclExcl, nvmBrgMacAddress=nvmBrgMacAddress, mcmBrgSpoofThresholdAge=mcmBrgSpoofThresholdAge, mcmBrgSpoofEnable=mcmBrgSpoofEnable, mcmBrgSpanEnable=mcmBrgSpanEnable, mcmBrgConfPortTable=mcmBrgConfPortTable, mcmBrgSpoofCacheAge=mcmBrgSpoofCacheAge, mcmBrgMacFilterPortIndex=mcmBrgMacFilterPortIndex, nvmBrgSpanDebugEnable=nvmBrgSpanDebugEnable, mcmBrgBasePortCounterZero=mcmBrgBasePortCounterZero, nvmBrgConfPortSapFilterFlag=nvmBrgConfPortSapFilterFlag, nvmBrgNumInterfaces=nvmBrgNumInterfaces, nvmBrgConfPortType=nvmBrgConfPortType, mcmBrgConfPortPPA=mcmBrgConfPortPPA, nvmBrgSpoofEnable=nvmBrgSpoofEnable, nvmBrgDebugEnable=nvmBrgDebugEnable, nvmBrgEtFilterLowerRange=nvmBrgEtFilterLowerRange, mcmBrgConfPortMacInclExcl=mcmBrgConfPortMacInclExcl, nvmBrgSapFilterNumber=nvmBrgSapFilterNumber, mcmBrgMiscParamGroup=mcmBrgMiscParamGroup, nvmBrgConfPortTable=nvmBrgConfPortTable, nvmBrgConfPortMacInclExcl=nvmBrgConfPortMacInclExcl)
|
on = ":fastparrot:"
off = ":meow_party:"
a = [
0, 1, 1, 0,
1, 0, 0, 1,
1, 1, 1, 1,
1, 0, 0, 1,
1, 0, 0, 1
]
b = [
1, 1, 1, 0,
1, 0, 0, 1,
1, 1, 1, 0,
1, 0, 0, 1,
1, 1, 1, 1
]
c = [
0, 1, 1, 0,
1, 0, 0, 1,
1, 0, 0, 0,
1, 0, 0, 1,
0, 1, 1, 0
]
d = [
1, 1, 1, 0,
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
1, 1, 1, 1
]
e = [
1, 1, 1, 1,
1, 0, 0, 0,
1, 1, 1, 1,
1, 0, 0, 0,
1, 1, 1, 1
]
f = [
1, 1, 1, 1,
1, 0, 0, 0,
1, 1, 1, 0,
1, 0, 0, 0,
1, 0, 0, 0
]
g = [
0, 1, 1, 0,
1, 0, 0, 0,
1, 0, 1, 1,
1, 0, 0, 1,
0, 1, 1, 0
]
h = [
1, 0, 0, 1,
1, 0, 0, 1,
1, 1, 1, 1,
1, 0, 0, 1,
1, 0, 0, 1
]
i = [
1, 1, 1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
1, 1, 1,
]
j = [
1, 1, 1,
0, 1, 1,
0, 1, 1,
0, 1, 1,
1, 1, 0,
]
k = [
1, 0, 0, 1,
1, 0, 1, 0,
1, 1, 0, 0,
1, 0, 1, 0,
1, 0, 0, 1
]
l = [
1, 0, 0, 0,
1, 0, 0, 0,
1, 0, 0, 0,
1, 0, 0, 0,
1, 1, 1, 1
]
m = [
1, 0, 0, 0, 1,
1, 1, 0, 1, 1,
1, 0, 1, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
]
n = [
1, 0, 0, 0, 1,
1, 1, 0, 0, 1,
1, 0, 1, 0, 1,
1, 0, 0, 1, 1,
1, 0, 0, 0, 1,
]
o = [
0, 1, 1, 0,
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
0, 1, 1, 0
]
p = [
1, 1, 1, 0,
1, 0, 0, 1,
1, 1, 1, 1,
1, 0, 0, 0,
1, 0, 0, 0
]
q = [
0, 1, 1, 0,
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 1, 1,
0, 1, 1, 1
]
r = [
1, 1, 1, 0,
1, 0, 0, 1,
1, 1, 1, 0,
1, 0, 0, 1,
1, 0, 0, 1
]
s = [
0, 1, 1, 1,
1, 0, 0, 0,
0, 1, 1, 0,
0, 0, 0, 1,
1, 1, 1, 0
]
t = [
1, 1, 1,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
]
u = [
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
0, 1, 1, 0
]
v = [
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
1, 0, 0, 1,
0, 1, 1, 0
]
w = [
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 1, 0, 1,
1, 0, 1, 0, 1,
0, 1, 0, 1, 0,
]
x = [
1, 0, 0, 0, 1,
0, 1, 0, 1, 0,
0, 0, 1, 0, 0,
0, 1, 0, 1, 0,
1, 0, 0, 0, 1,
]
y = [
1, 0, 0, 1,
1, 0, 0, 1,
1, 1, 1, 1,
0, 0, 0, 1,
1, 1, 1, 1
]
z = [
1, 1, 1, 1,
0, 0, 1, 0,
0, 1, 0, 0,
1, 0, 0, 0,
1, 1, 1, 1
]
space = [
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
]
letter_spacing = 1
glyph_height = 5
chars = 'abcdefghijklmnopqrstuvwxyz '
glyphs = [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,space]
def get_glyph_for_char(char):
i = chars.index(char)
g = glyphs[i]
return g
def create_str_from_glyph(glyph):
str = ''
glyph_width = int(len(glyph) / glyph_height)
for y in range(glyph_height):
for x in range(glyph_width + letter_spacing):
#spacing
if x >= glyph_width:
str += off
continue
#char content
i = glyph_width * y + x
if glyph[i] == 1:
str += on
else:
str += off
str += '\n'
return str
input_text = input('give text: ')
lines = ['' for i in range(glyph_height)]
for c in input_text:
g = get_glyph_for_char(c)
s = create_str_from_glyph(g)
str_lines = s.split('\n')
for l in range(glyph_height):
lines[l] += str_lines[l].rstrip()
output = '\n'.join(lines)
print(output)
input()
|
import itertools
def main():
n = int(input())
s = input().split()
k = int(input())
count = 0
length = 0
for i in itertools.combinations(s, k):
if 'a' in i:
count += 1
length += 1
print('{:.4f}'.format(count / length))
if __name__ == '__main__':
main()
|
from pprint import pprint
import hashlib
import sqlite3
import os
class MAuth:
def __init__(self, db_file:str, firstBoot:bool=False) -> None:
self.db_file = db_file
self.conn = sqlite3.connect(self.db_file)
self.cur = self.conn.cursor()
self.total_users = 0
self.userData = {}
if firstBoot:
self.MAuthFirstBoot()
self.total_users += 1
else:
self.setUserData()
pprint(self.userData)
def setUserData(self) -> None:
users = self.cur.execute("SELECT * FROM users").fetchall()
self.total_users = len(users)
for user in users:
self.userData[user[1]] = self.readConfig(user[3])
print(self.userData[user[1]])
def readConfig(self, config_path:str) -> dict:
with open(config_path, 'r') as f:
return {line.split(':')[0]:line.split(':')[1].strip() for line in f.readlines()}
def MAuthFirstBoot(self) -> None:
self.cur.execute("""CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, name TEXT, password TEXT ,userFile TEXT)""")
#self.cur.execute("DELETE FROM users WHERE name = 'admin'")
if not self.userExists('admin'):
self.makeUser('admin', 'SuperAdminPasssword6742344234!!')
self.conn.commit()
def makeUser(self, name:str, password:str) -> bool:
## don't allow duplicate users
if self.userExists(name):
return False
password = self.hashPassword(name, password)
userFile = f'users/{name}.udata'
userFolder = f'users/{name}.mfs'
self.cur.execute(f"""INSERT INTO users (name, password, userFile) VALUES (?, ?, ?)""", (name, password, userFile))
self.conn.commit()
with open(userFile, 'w+') as f:
f.write(f'activeToken:{name}_user_{self.total_users}\ndataFile:{userFile}\nauthLevel:1\nhome:{userFolder}\n')
self.userData[name] = self.readConfig(userFile)
self.total_users += 1
return True ## new user was created
def getUserHome(self, username:str) -> str:
return self.userData[username]['home']
def getUserAuthToken(self, name:str) -> str:
return self.userData[name]['activeToken']
def checkAuthToken(self, name:str, authToken:str) -> bool:
return True if self.userData[name]['activeToken'] == authToken else False
def userExists(self, name:str) -> bool:
return False if not name or self.cur.execute("SELECT * FROM users WHERE name = ?", (name,)).fetchone() is None else True
def getAuthLevel(self, name:str) -> int:
return int(self.userData[name]['authLevel'])
def checkPassword(self, name:str, password:str) -> bool:
## get the hashed password from the database
hashedPassword = self.cur.execute("SELECT password FROM users WHERE name = ?", (name,)).fetchone()[0]
return True if hashedPassword == self.hashPassword(name, password) else False
def getUserFile(self, name:str) -> str:
return self.cur.execute("SELECT userFile FROM users WHERE name = ?", (name,)).fetchone()[0]
@staticmethod
def hashPassword(userName:str, password:str) -> str:
rounds = len(userName)
for i in range(rounds):
password = hashlib.sha256(password.encode()).hexdigest()
password += userName[i]
## TODO: do this yes
#password = hashlib.sha256(password.encode()).hexdigest()
return password
|
from ..source import URLSource
from ..package import Package
from ..patch import LocalPatch
from ..util import target_arch
class GDBM(Package):
version = '1.18.1'
source = URLSource(f'https://ftp.gnu.org/gnu/gdbm/gdbm-{version}.tar.gz', sig_suffix='.sig')
validpgpkeys = ['325F650C4C2B6AD58807327A3602B07F55D0C732']
patches = [
LocalPatch('SIZE_T_MAX'),
LocalPatch('missing-return'),
]
def prepare(self):
self.run_with_env([
'./configure',
'--prefix=/usr',
'--host=' + target_arch().ANDROID_TARGET,
'--enable-libgdbm-compat',
'--disable-static',
])
def build(self):
self.run(['make', 'V=1'])
self.run(['make', 'install', f'DESTDIR={self.destdir()}'])
|
import subprocess
from Bio import Phylo
from covizu import clustering, treetime, beadplot
import sys
import json
def build_timetree(by_lineage, args, callback=None):
""" Generate time-scaled tree of Pangolin lineages """
if callback:
callback("Parsing Pango lineage designations")
handle = open(args.lineages)
header = next(handle)
if header != 'taxon,lineage\n':
if callback:
callback("Error: {} does not contain expected header row 'taxon,lineage'".format(args.lineages))
sys.exit()
lineages = {}
for line in handle:
taxon, lineage = line.strip().split(',')
lineages.update({taxon: lineage})
if callback:
callback("Identifying lineage representative genomes")
fasta = treetime.retrieve_genomes(by_lineage, known_seqs=lineages, ref_file=args.ref,
earliest=True)
if callback:
callback("Reconstructing tree with {}".format(args.ft2bin))
nwk = treetime.fasttree(fasta, binpath=args.ft2bin)
if callback:
callback("Reconstructing time-scaled tree with {}".format(args.ttbin))
nexus_file = treetime.treetime(nwk, fasta, outdir=args.outdir, binpath=args.ttbin,
clock=args.clock, verbosity=0)
# writes output to treetime.nwk at `nexus_file` path
return treetime.parse_nexus(nexus_file, fasta)
def beadplot_serial(lineage, features, args, callback=None):
""" Compute distance matrices and reconstruct NJ trees """
# bootstrap sampling and NJ tree reconstruction, serial mode
trees, labels = clustering.build_trees(features, args, callback=callback)
if trees is None:
# lineage only has one variant, no meaningful tree
beaddict = {'lineage': lineage, 'nodes': {}, 'edges': []}
# use earliest sample as variant label
intermed = [label.split('|')[::-1] for label in labels['0']]
intermed.sort()
variant = intermed[0][1]
beaddict.update({'sampled_variants': len(labels)})
beaddict['nodes'].update({variant: []})
for coldate, accn, label1 in intermed:
beaddict['nodes'][variant].append([coldate, accn, label1])
return beaddict
# generate majority consensus tree
ctree = clustering.consensus(iter(trees), cutoff=args.boot_cutoff)
# collapse polytomies and label internal nodes
label_dict = dict([(str(idx), lst) for idx, lst in enumerate(labels)])
ctree = beadplot.annotate_tree(ctree, label_dict, callback=callback)
# convert to JSON format
beaddict = beadplot.serialize_tree(ctree)
beaddict.update({'lineage': lineage})
beaddict.update({'sampled_variants': len(labels)})
return beaddict
def import_labels(handle, callback=None):
""" Load map of genome labels to tip indices from CSV file """
result = {}
_ = next(handle) # skip header line
for line in handle:
try:
qname, idx = line.strip('\n').split(',')
except ValueError:
if callback:
callback("import_labels() failed to parse line {}".format(line), level="ERROR")
raise # issue #206, sequence label contains delimiter
if idx not in result:
result.update({idx: []})
result[idx].append(qname)
return result
def make_beadplots(by_lineage, args, callback=None, t0=None, txtfile='minor_lineages.txt',
recode_file="recoded.json"):
"""
Wrapper for beadplot_serial - divert to clustering.py in MPI mode if
lineage has too many genomes.
:param by_lineage: dict, feature vectors stratified by lineage
:param args: Namespace, from argparse.ArgumentParser()
:param t0: float, datetime.timestamp.
:param txtfile: str, path to file to write minor lineage names
:param recode_file: str, path to JSON file to write recoded lineage data
:return: list, beadplot data by lineage
"""
# recode data into variants and serialize
if callback:
callback("Recoding features, compressing variants..")
recoded = {}
for lineage, records in by_lineage.items():
union, labels, indexed = clustering.recode_features(records, limit=args.max_variants)
# serialize tuple keys (features of union), #335
union = dict([("{0}|{1}|{2}".format(*feat), idx) for feat, idx in union.items()])
indexed = [list(s) for s in indexed] # sets cannot be serialized to JSON, #335
recoded.update({lineage: {'union': union, 'labels': labels,
'indexed': indexed}})
with open(recode_file, 'w') as handle:
json.dump(recoded, handle)
# partition lineages into major and minor categories
intermed = [(len(features), lineage) for lineage, features in by_lineage.items()
if len(features) < args.mincount]
intermed.sort(reverse=True) # descending order
minor = dict([(lineage, None) for _, lineage in intermed if lineage is not None])
# export minor lineages to text file
with open(txtfile, 'w') as handle:
for lineage in minor:
handle.write('{}\n'.format(lineage))
# launch MPI job across minor lineages
if callback:
callback("start MPI on minor lineages")
cmd = ["mpirun", "--machinefile", args.machine_file, "python3", "covizu/clustering.py",
recode_file, txtfile, # positional arguments <JSON file>, <str>
"--mode", "flat",
"--max-variants", str(args.max_variants),
"--nboot", str(args.nboot),
"--outdir", args.outdir,
"--binpath", args.binpath # RapidNJ
]
if t0:
cmd.extend(["--timestamp", str(t0)])
subprocess.check_call(cmd)
# process major lineages
for lineage, features in by_lineage.items():
if lineage in minor:
continue
if callback:
callback('start {}, {} entries'.format(lineage, len(features)))
cmd = [
"mpirun", "--machinefile", args.machine_file, "python3", "covizu/clustering.py",
recode_file, lineage, # positional arguments <JSON file>, <str>
"--mode", "deep",
"--max-variants", str(args.max_variants),
"--nboot", str(args.nboot),
"--outdir", args.outdir,
"--binpath", args.binpath
]
if t0:
cmd.extend(["--timestamp", str(t0)])
subprocess.check_call(cmd)
# parse output files
if callback:
callback("Parsing output files")
result = []
for lineage in recoded:
# import trees
lineage_name = lineage.replace('/', '_') # issue #297
outfile = open('data/{}.nwk'.format(lineage_name))
trees = Phylo.parse(outfile, 'newick') # note this returns a generator
label_dict = recoded[lineage]['labels']
if len(label_dict) == 1:
# handle case of only one variant
# lineage only has one variant, no meaningful tree
beaddict = {'lineage': lineage, 'nodes': {}, 'edges': []}
# use earliest sample as variant label
intermed = [label.split('|')[::-1] for label in label_dict['0']]
intermed.sort()
variant = intermed[0][1]
beaddict['nodes'].update({variant: []})
for coldate, accn, label1 in intermed:
beaddict['nodes'][variant].append([coldate, accn, label1])
else:
# generate beadplot data
ctree = clustering.consensus(trees, cutoff=args.boot_cutoff, callback=callback)
outfile.close() # done with Phylo.parse generator
ctree = beadplot.annotate_tree(ctree, label_dict)
beaddict = beadplot.serialize_tree(ctree)
beaddict.update({'lineage': lineage})
beaddict.update({'sampled_variants': len(label_dict)})
result.append(beaddict)
return result
def get_mutations(by_lineage):
"""
Extract common mutations from feature vectors for each lineage
:param by_lineage: dict, return value from process_feed()
:return: dict, common mutations by lineage
"""
result = {}
for lineage, samples in by_lineage.items():
# enumerate features
counts = {}
for sample in samples:
for diff in sample['diffs']:
feat = tuple(diff)
if feat not in counts:
counts.update({feat: 0})
counts[feat] += 1
# filter for mutations that occur in at least half of samples
common = [feat for feat, count in counts.items() if count/len(samples) >= 0.5]
result.update({lineage: common})
return result
|
import logging
from flask_restplus import Api
from werkzeug import exceptions
from app.define import status
from app.response import response as resp
logger = logging.getLogger(__name__)
api = Api(version="1.0.0", title="Flask-JWT-Auth Example")
@api.errorhandler
def default_error_handler(e):
message = "Unhandled exception occurred: {}".format(e)
logger.exception(message)
return resp.error(message, status=status.ERROR_BAD_REQUEST)
|
# from pipelitools.preprocessing.eda import test_eda
from pipelitools.preprocessing import eda, features, outliers
from importlib import reload
reload(eda)
reload(features)
reload(outliers)
|
import os
import glob
import pickle
from itertools import groupby
import argparse
from numpy.lib.format import open_memmap
from tqdm import tqdm
from .read_skeleton import read_xyz
max_body = 1
num_joint = 31
max_frame = 901
def gendata(data_path,
out_path):
sample_name = []
sample_label = []
filenames = sorted(glob.glob(os.path.join(data_path, '*/*.mat')))
grouped_dict = {g[0]: list(g[1]) for g in groupby(filenames, key=lambda x: x.split('/')[-2])}
for idx, (k, v) in enumerate(grouped_dict.items()):
for filename in v:
action_class = idx
sample_name.append(filename)
sample_label.append(action_class)
with open('{}/{}_label.pkl'.format(out_path, 'full'), 'wb') as f:
pickle.dump((sample_name, list(sample_label)), f)
fp = open_memmap(
'{}/{}_data.npy'.format(out_path, 'full'),
dtype='float32',
mode='w+',
shape=(len(sample_label), 3, max_frame, num_joint, max_body))
for i in tqdm(range(len(sample_name))):
s = sample_name[i]
data = read_xyz(s, max_body=max_body, num_joint=num_joint)
fp[i, :, 0:data.shape[1], :, :] = data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='HDM05 Data Converter.')
parser.add_argument(
'--data_path', default='/media/ssd_storage/HDM05/HDM05_mats')
parser.add_argument(
'--ignored_sample_path',
default='')
parser.add_argument('--out_folder', default='data/HDM05')
arg = parser.parse_args()
out_path = arg.out_folder
if not os.path.exists(out_path):
os.makedirs(out_path)
gendata(
arg.data_path,
out_path)
|
"""
Elections are in progress!
Given an array of the numbers of votes given to each of the candidates so far, and an integer k equal to the number of voters who haven't cast their vote yet, find the number of candidates who still have a chance to win the election.
The winner of the election must secure strictly more votes than any other candidate. If two or more candidates receive the same (maximum) number of votes, assume there is no winner at all.
Example
For votes = [2, 3, 5, 2] and k = 3, the output should be
electionsWinners(votes, k) = 2.
The first candidate got 2 votes. Even if all of the remaining 3 candidates vote for him, he will still have only 5 votes, i.e. the same number as the third candidate, so there will be no winner.
The second candidate can win if all the remaining candidates vote for him (3 + 3 = 6 > 5).
The third candidate can win even if none of the remaining candidates vote for him. For example, if each of the remaining voters cast their votes for each of his opponents, he will still be the winner (the votes array will thus be [3, 4, 5, 3]).
The last candidate can't win no matter what (for the same reason as the first candidate).
Thus, only 2 candidates can win (the second and the third), which is the answer.
Input/Output
[time limit] 4000ms (py)
[input] array.integer votes
A non-empty array of non-negative integers. Its ith element denotes the number of votes cast for the ith candidate.
Guaranteed constraints:
4 <= votes.length <= 105,
0 <= votes[i] <= 104.
[input] integer k
The number of voters who haven't cast their vote yet.
Guaranteed constraints:
0 <= k <= 105.
[output] integer
"""
def electionsWinners(votes, k):
if k == 0:
if votes.count(max(votes)) == 1:
return 1
else:
return 0
votes_to_beat = max(votes)
winners = 0
for candidate in votes:
if candidate + k > votes_to_beat:
winners += 1
return winners
if __name__ == '__main__':
print electionsWinners([5, 1, 3, 4, 1], 0)
|
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import argparse
from datumaro.util.scope import scope_add, scoped
from ..util.project import load_project
def build_parser(parser_ctor=argparse.ArgumentParser):
parser = parser_ctor(description="Prints project history.")
parser.add_argument('-n', '--max-count', default=10, type=int,
help="Count of last commits to print (default: %(default)s)")
parser.add_argument('-p', '--project', dest='project_dir', default='.',
help="Directory of the project to operate on (default: current dir)")
parser.set_defaults(command=log_command)
return parser
@scoped
def log_command(args):
project = scope_add(load_project(args.project_dir))
revisions = project.history(args.max_count)
if revisions:
for rev, message in revisions:
print('%s %s' % (rev, message))
else:
print("(Project history is empty)")
return 0
|
"""empty message
Revision ID: a4f17e7db43b
Revises: None
Create Date: 2016-07-19 14:15:25.508645
"""
# revision identifiers, used by Alembic.
revision = 'a4f17e7db43b'
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('address',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('address_line', sa.String(), nullable=False),
sa.Column('suburb', sa.String(), nullable=False),
sa.Column('state', sa.String(), nullable=False),
sa.Column('postal_code', sa.String(length=8), nullable=False),
sa.Column('country', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('audit_event',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('user', sa.String(), nullable=True),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('object_type', sa.String(), nullable=True),
sa.Column('object_id', sa.BigInteger(), nullable=True),
sa.Column('acknowledged', sa.Boolean(), nullable=False),
sa.Column('acknowledged_by', sa.String(), nullable=True),
sa.Column('acknowledged_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_audit_events_object_and_type', 'audit_event', ['object_type', 'object_id', 'type', 'created_at'], unique=False)
op.create_index('idx_audit_events_type_acknowledged', 'audit_event', ['type', 'acknowledged'], unique=False)
op.create_index(op.f('ix_audit_event_acknowledged'), 'audit_event', ['acknowledged'], unique=False)
op.create_index(op.f('ix_audit_event_created_at'), 'audit_event', ['created_at'], unique=False)
op.create_index(op.f('ix_audit_event_type'), 'audit_event', ['type'], unique=False)
op.create_table('contact',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contact_for', sa.String(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('role', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('phone', sa.String(), nullable=True),
sa.Column('fax', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('framework',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slug', sa.String(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('framework', sa.String(), nullable=False),
sa.Column('framework_agreement_version', sa.String(), nullable=True),
sa.Column('status', sa.String(), nullable=False),
sa.Column('clarification_questions_open', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_framework_framework'), 'framework', ['framework'], unique=False)
op.create_index(op.f('ix_framework_slug'), 'framework', ['slug'], unique=True)
op.create_index(op.f('ix_framework_status'), 'framework', ['status'], unique=False)
op.create_table('lot',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slug', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('one_service_limit', sa.Boolean(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_lot_slug'), 'lot', ['slug'], unique=False)
op.create_table('service_category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('framework_lot',
sa.Column('framework_id', sa.Integer(), nullable=False),
sa.Column('lot_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lot.id'], ),
sa.PrimaryKeyConstraint('framework_id', 'lot_id')
)
op.create_table('supplier',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data_version', sa.Integer(), nullable=True),
sa.Column('code', sa.BigInteger(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('summary', sa.String(length=511), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('address_id', sa.Integer(), nullable=False),
sa.Column('website', sa.String(length=255), nullable=True),
sa.Column('abn', sa.String(length=15), nullable=True),
sa.Column('acn', sa.String(length=15), nullable=True),
sa.ForeignKeyConstraint(['address_id'], ['address.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_supplier_code'), 'supplier', ['code'], unique=True)
op.create_table('archived_service',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('status', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('service_id', sa.String(), nullable=False),
sa.Column('framework_id', sa.BigInteger(), nullable=False),
sa.Column('lot_id', sa.BigInteger(), nullable=False),
sa.Column('supplier_code', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['framework_id', 'lot_id'], ['framework_lot.framework_id', 'framework_lot.lot_id'], ),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lot.id'], ),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_archived_service_framework_id'), 'archived_service', ['framework_id'], unique=False)
op.create_index(op.f('ix_archived_service_lot_id'), 'archived_service', ['lot_id'], unique=False)
op.create_index(op.f('ix_archived_service_service_id'), 'archived_service', ['service_id'], unique=False)
op.create_index(op.f('ix_archived_service_supplier_code'), 'archived_service', ['supplier_code'], unique=False)
op.create_table('brief',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('framework_id', sa.Integer(), nullable=False),
sa.Column('lot_id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('published_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['framework_id', 'lot_id'], ['framework_lot.framework_id', 'framework_lot.lot_id'], ),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lot.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_brief_created_at'), 'brief', ['created_at'], unique=False)
op.create_index(op.f('ix_brief_published_at'), 'brief', ['published_at'], unique=False)
op.create_index(op.f('ix_brief_updated_at'), 'brief', ['updated_at'], unique=False)
op.create_table('draft_service',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('status', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('service_id', sa.String(), nullable=True),
sa.Column('framework_id', sa.BigInteger(), nullable=False),
sa.Column('lot_id', sa.BigInteger(), nullable=False),
sa.Column('supplier_code', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['framework_id', 'lot_id'], ['framework_lot.framework_id', 'framework_lot.lot_id'], ),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lot.id'], ),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_draft_service_framework_id'), 'draft_service', ['framework_id'], unique=False)
op.create_index(op.f('ix_draft_service_lot_id'), 'draft_service', ['lot_id'], unique=False)
op.create_index(op.f('ix_draft_service_service_id'), 'draft_service', ['service_id'], unique=False)
op.create_index(op.f('ix_draft_service_supplier_code'), 'draft_service', ['supplier_code'], unique=False)
op.create_table('service',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('service_id', sa.String(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=True),
sa.Column('status', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('framework_id', sa.BigInteger(), nullable=False),
sa.Column('lot_id', sa.BigInteger(), nullable=False),
sa.Column('supplier_code', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['framework_id', 'lot_id'], ['framework_lot.framework_id', 'framework_lot.lot_id'], ),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lot.id'], ),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_service_framework_id'), 'service', ['framework_id'], unique=False)
op.create_index(op.f('ix_service_lot_id'), 'service', ['lot_id'], unique=False)
op.create_index(op.f('ix_service_service_id'), 'service', ['service_id'], unique=True)
op.create_index(op.f('ix_service_supplier_code'), 'service', ['supplier_code'], unique=False)
op.create_table('supplier__contact',
sa.Column('supplier_id', sa.Integer(), nullable=False),
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.ForeignKeyConstraint(['supplier_id'], ['supplier.id'], ),
sa.PrimaryKeyConstraint('supplier_id', 'contact_id')
)
op.create_table('supplier__service_category',
sa.Column('supplier_id', sa.Integer(), nullable=False),
sa.Column('service_category_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['service_category_id'], ['service_category.id'], ),
sa.ForeignKeyConstraint(['supplier_id'], ['supplier.id'], ),
sa.PrimaryKeyConstraint('supplier_id', 'service_category_id')
)
op.create_table('supplier_framework',
sa.Column('supplier_code', sa.BigInteger(), nullable=False),
sa.Column('framework_id', sa.Integer(), nullable=False),
sa.Column('declaration', postgresql.JSON(), nullable=True),
sa.Column('on_framework', sa.Boolean(), nullable=True),
sa.Column('agreement_returned_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['framework_id'], ['framework.id'], ),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('supplier_code', 'framework_id')
)
op.create_table('supplier_reference',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('supplier_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('organisation', sa.String(), nullable=False),
sa.Column('role', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['supplier_id'], ['supplier.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email_address', sa.String(), nullable=False),
sa.Column('phone_number', sa.String(), nullable=True),
sa.Column('password', sa.String(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('password_changed_at', sa.DateTime(), nullable=False),
sa.Column('logged_in_at', sa.DateTime(), nullable=True),
sa.Column('failed_login_count', sa.Integer(), nullable=False),
sa.Column('role', sa.Enum('buyer', 'supplier', 'admin', 'admin-ccs-category', 'admin-ccs-sourcing', name='user_roles_enum'), nullable=False),
sa.Column('supplier_code', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email_address'), 'user', ['email_address'], unique=True)
op.create_index(op.f('ix_user_supplier_code'), 'user', ['supplier_code'], unique=False)
op.create_table('brief_clarification_question',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('brief_id', sa.Integer(), nullable=False),
sa.Column('question', sa.String(), nullable=False),
sa.Column('answer', sa.String(), nullable=False),
sa.Column('published_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['brief_id'], ['brief.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_brief_clarification_question_published_at'), 'brief_clarification_question', ['published_at'], unique=False)
op.create_table('brief_response',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.Column('brief_id', sa.Integer(), nullable=False),
sa.Column('supplier_code', sa.BigInteger(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['brief_id'], ['brief.id'], ),
sa.ForeignKeyConstraint(['supplier_code'], ['supplier.code'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_brief_response_created_at'), 'brief_response', ['created_at'], unique=False)
op.create_table('brief_user',
sa.Column('brief_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['brief_id'], ['brief.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('brief_id', 'user_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('brief_user')
op.drop_index(op.f('ix_brief_response_created_at'), table_name='brief_response')
op.drop_table('brief_response')
op.drop_index(op.f('ix_brief_clarification_question_published_at'), table_name='brief_clarification_question')
op.drop_table('brief_clarification_question')
op.drop_index(op.f('ix_user_supplier_code'), table_name='user')
op.drop_index(op.f('ix_user_email_address'), table_name='user')
op.drop_table('user')
op.drop_table('supplier_reference')
op.drop_table('supplier_framework')
op.drop_table('supplier__service_category')
op.drop_table('supplier__contact')
op.drop_index(op.f('ix_service_supplier_code'), table_name='service')
op.drop_index(op.f('ix_service_service_id'), table_name='service')
op.drop_index(op.f('ix_service_lot_id'), table_name='service')
op.drop_index(op.f('ix_service_framework_id'), table_name='service')
op.drop_table('service')
op.drop_index(op.f('ix_draft_service_supplier_code'), table_name='draft_service')
op.drop_index(op.f('ix_draft_service_service_id'), table_name='draft_service')
op.drop_index(op.f('ix_draft_service_lot_id'), table_name='draft_service')
op.drop_index(op.f('ix_draft_service_framework_id'), table_name='draft_service')
op.drop_table('draft_service')
op.drop_index(op.f('ix_brief_updated_at'), table_name='brief')
op.drop_index(op.f('ix_brief_published_at'), table_name='brief')
op.drop_index(op.f('ix_brief_created_at'), table_name='brief')
op.drop_table('brief')
op.drop_index(op.f('ix_archived_service_supplier_code'), table_name='archived_service')
op.drop_index(op.f('ix_archived_service_service_id'), table_name='archived_service')
op.drop_index(op.f('ix_archived_service_lot_id'), table_name='archived_service')
op.drop_index(op.f('ix_archived_service_framework_id'), table_name='archived_service')
op.drop_table('archived_service')
op.drop_index(op.f('ix_supplier_code'), table_name='supplier')
op.drop_table('supplier')
op.drop_table('framework_lot')
op.drop_table('service_category')
op.drop_index(op.f('ix_lot_slug'), table_name='lot')
op.drop_table('lot')
op.drop_index(op.f('ix_framework_status'), table_name='framework')
op.drop_index(op.f('ix_framework_slug'), table_name='framework')
op.drop_index(op.f('ix_framework_framework'), table_name='framework')
op.drop_table('framework')
op.drop_table('contact')
op.drop_index(op.f('ix_audit_event_type'), table_name='audit_event')
op.drop_index(op.f('ix_audit_event_created_at'), table_name='audit_event')
op.drop_index(op.f('ix_audit_event_acknowledged'), table_name='audit_event')
op.drop_index('idx_audit_events_type_acknowledged', table_name='audit_event')
op.drop_index('idx_audit_events_object_and_type', table_name='audit_event')
op.drop_table('audit_event')
op.drop_table('address')
### end Alembic commands ###
|
class Solution:
def solve(self, board: 'List[List[str]]') -> 'None':
"""
Do not return anything, modify board in-place instead.
"""
if not board:
return None
stack = []
# check on board
for row in [0,len(board)-1]:
for col in range(len(board[row])):
if board[row][col] == 'O':
stack.append((row,col))
for col in [0, len(board[0])-1]:
for row in range(1,len(board)-1,1):
if board[row][col] == 'O':
stack.append((row,col))
# DFS
while stack:
row, col = stack.pop()
if 0 <= row < len(board) and 0 <= col < len(board[0]) and board[row][col] == 'O':
board[row][col] ='F'
stack.append((row,col+1))
stack.append((row,col-1))
stack.append((row+1,col))
stack.append((row-1,col))
# cover
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == 'O':
board[row][col] = 'X'
elif board[row][col] == 'F':
board[row][col] = 'O'
return None
|
from django.conf.urls import patterns, include, url
from users_api.common import UsersApi
from users_api.api.users import UsersResource
from users_api.api.groups import GroupsResource
from users_api.api.permissions import PermissionsResource, ContentTypesResource
django_users_api = UsersApi()
django_users_api.register(GroupsResource())
django_users_api.register(PermissionsResource())
django_users_api.register(ContentTypesResource())
django_users_api.register(UsersResource())
urlpatterns = patterns(
'',
url(r'', include(django_users_api.urls)),
)
|
from .fcl import CollisionObject, CollisionGeometry, Transform, TriangleP, Box, Sphere, Ellipsoid, Capsule, Cone, Cylinder, Halfspace, Plane, BVHModel, OcTree, DynamicAABBTreeCollisionManager, collide, continuousCollide, distance, defaultCollisionCallback, defaultDistanceCallback
from .collision_data import OBJECT_TYPE, NODE_TYPE, CCDMotionType, CCDSolverType, GJKSolverType, Contact, CostSource, CollisionRequest, CollisionResult, ContinuousCollisionRequest, ContinuousCollisionResult, DistanceRequest, DistanceResult, CollisionData, DistanceData
from .version import __version__
|
#!/usr/bin/env python3
import atexit
import time
import requests
import subprocess
import sys
def check_eq(expected, actual):
if expected != actual:
print(f'Check failed: expected {repr(expected)}, got {repr(actual)}')
sys.exit(1)
def main():
_, *server_cmd = sys.argv
assert server_cmd, 'Expected usage: ./server-test-health.py <command-to-run>'
port = 8080
print(f'Booting server... at {server_cmd}', flush=True)
server = subprocess.Popen(args=[*server_cmd, '-p', str(port)])
def kill_server():
try:
server.wait(timeout=0.1)
except subprocess.TimeoutExpired:
print('Server terminating...', flush=True)
server.kill()
atexit.register(kill_server)
time.sleep(2) # FIXME
print('Checks starting...', flush=True)
with requests.get(f'http://localhost:{port}/health') as r:
check_eq(200, r.status_code)
check_eq("OK", r.text)
print("Health ok.")
print('All ok.')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# coding: utf-8
# # Tarea 5
#
# _Tarea 5_ de _Benjamín Rivera_ para el curso de __Métodos Numéricos__ impartido por _Joaquín Peña Acevedo_. Fecha limite de entrega __4 de Octubre de 2020__.
# ### Como ejecutar
# ##### Requerimientos
#
# Este programa se ejecuto en mi computadora con la version de __Python 3.8.2__ y con estos
# [requerimientos](https://github.com/BenchHPZ/UG-Compu/blob/master/MN/requerimientos.txt)
#
# #### Jupyter
#
# En caso de tener acceso a un _servidor jupyter_ ,con los requerimientos antes mencionados, unicamente basta con ejecutar todas las celdas de este _notebook_. Probablemente no todas las celdas de _markdown_ produzcan el mismo resultado por las
# [_Nbextensions_](jupyter-contrib-nbextensions.readthedocs.io).
#
# #### Consola
#
# Habrá archivos e instrucciones para poder ejecutar cada uno de los ejercicios desde la consola.
#
# #### Si todo sale mal
#
# <a href="https://colab.research.google.com/gist/BenchHPZ/">
# <img src="../../../assets/colab-badge.svg"
# alt="Open In Colab"/>
# </a>
#
# En caso de que todo salga mal, tratare de dejar una copia disponible en __GoogleColab__ que se pueda ejecutar con la versión de __Python__ de _GoogleColab_
# In[138]:
usage = """
Programa correspondiente a la Tarea 5 de Metodos Numericos.
Este programa espera leer los archivos de tipo npy
Alumno: Benjamin Rivera
Usage:
Tarea5.py ejercicio1 <matA> <vecB> <N>[--path=<path>]
Tarea5.py -h | --help
Options:
-h --help Show this screen.
-v --version Show version.
--path=<path> Directorio para buscar archivos [default: data/].
"""
import sys
import scipy
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_triangular # Para backward y forward substitution
NOTEBOOK = False
# ## Ejercicio 1
# Considere la matriz
# \begin{equation*}
# A = \begin{pmatrix}
# a^2 & a & a/2 & 1 \\
# a & -9 & 1 & 0 \\
# a/2 & 1 & 10 & 0 \\
# 1 & 0 & 0 & a
# \end{pmatrix}
# \end{equation*}
#
# Da un rango de valores para $a$ de manera que garantice la convergencia del método de Jacobi.
# ### Resp
# Por las notas del curso (ppt clase 9, diapositiva 8-41), sabemos que el método de Jacobi converge cuando la matriz $A$ es \textbf{estrictamente diagonal dominante}. Y esto es cierto cuando
#
# \begin{equation*}
# \forall i \in [1,\dots,n] , |a_{i,i}| > \sum_{j=1, j\neq i}^{n} |a_{i,j}|
# \end{equation*}
#
# si extendemos estas desigualdades para la matriz $A$ nos queda que
#
# \begin{eqnarray}
# sol &=&
# \begin{cases}
# |a^2| &>& |a| + |a/2| + |1| \\
# |-9| &>& |a| + |1| + |0| \\
# |10| &>& |a/2| + |1| + |0| \\
# |a| &>& |1| + |0| + |0|
# \end{cases} \\
# && \text{despues de simplificar queda que} \\
# &=& \begin{cases}
# a^2 &>& |a| + |a/2| + 1 \\
# 8 &>& |a| \\
# 9 &>& |a/2| \\
# |a| &>& 1
# \end{cases} \\
# && \\
# &=& \begin{cases}
# a^2 &>& |a| + |a/2| + 1 \\
# 64 &>& a^2 \\
# 4*91 &>& a^2 \\
# a^2 &>& 1
# \end{cases} \label{eq: red}\\
# && \text{realcionamos \ref{eq: red}.4 con \ref{eq: red}.3 y \ref{eq: red}.2} \\
# &=& \begin{cases}
# a^2 &>& |a| + |a/2| + 1 \\
# 8^2 &>& a^2 > 1\\
# 4*9^2 &>& a^2 > 1\\
# \end{cases} \label{eq: cuadrado}\\
# && \text{de esto solo nos importa \ref{eq: cuadrado}.1 y \ref{eq: cuadrado}.2} \\
# &=& \begin{cases}
# a^2 &>& 3|a|/2 + 1 \\
# 8 &>& a > 1\\
# \end{cases} \label{eq: final}\\
# \end{eqnarray}
#
# Podemos calcular los intervalos de soluci\'on de \ref{eq: final}. Estos quedan
#
# \begin{equation}
# \begin{cases}
# a^2 > 3|a|/2 + 1 &\Rightarrow& (-\infty, -2) \cup(2, \infty) \\
# 8 > a > 1 &\Rightarrow& (1, 8)
# \end{cases}
# \label{eq: casi sol}
# \end{equation}
#
# Y la solucion que buscamos es la interseccion de \ref{eq: casi sol}. De manera que, para que la matriz $A$ converja con el metodo de Jacobi, se necesita que $x \in (2,8)$.
# ## Ejercicio 2
# Sea $A \in \mathbb R^{n\times n}$ una matriz tridiagonal y que las tres diagonales de interes se almacenan en un arreglo $B_{n \times 3}$.
#
# Escribe las expresiones para calcular las actualizaciones de las componentes del vector $x^{i+1} = \left( x^{i+1}_0, \dots, x^{i+1}_{n-1} \right)$ de acuerdo con \textit{Gauss-Seidel}. Especificamente escribir la expresion para actualizar $x^{i+1}_0, x^{i+1}_i$ para $i = 1,2,\dots, n-2$; ademas de $x^{i+1}_{n-1}$ usando los coeficientes $a_{i,j}$ de $A$ y $b_{ij}$ de $B$.
# ### Respuesta
# \par Sea $A$ una matriz tridiagonal con elementos $a_{i,j}$, $B$ el arreglo descrito anteriormente con elementos $b'_{i,j}$, $b$ el vector de terminos independientes con elementos $b_i$ y $x$ el vector solucion con $x_i^{(t)}$ su elemento $i$ de la iteraci\'on $t$.
#
# \par Se da que en el m\'etodo de \textit{Gauss-Seidel} original tenemos que los componentes $x^{(t+1)}$ se calculan siguiendo \verb|forwardSubstitution|
#
# \begin{equation}
# x_{i}^{(t+1)} = \frac{1}{a_{i,i}}\left( b_i - \sum_{j=0}^{i-1} a_{j,j}x_{j}^{(t+1)} - \sum_{j=i+1}^{n-1} a_{i,j}x_j^{(t)}\right)
# \label{eq: GS original}
# \end{equation}
#
# \noindent pero como en este ejercicio estamos trabajando con una matriz tridiagonal, lo que implica que solo habr\'a elementos distitnos de cero en las tres diagonales de interes; entonces podemos reescribir la ecuaci\'on~\ref{eq: GS original}, lo que queda como
#
# \begin{eqnarray*}
# x_{i}^{(t+1)} &=& \frac{1}{a_{i,i}}\left( b_i - a_{i, j-1}x_{i-1}^{t+1} - a_{i, j+1}x_{i+1}^{t}\right)
# \label{eq: GS tridiagonal} \\
# &=& \frac{1}{b'_{1,i}}\left( b_i - b'_{i,0}x_{i-1}^{t+1} - b'_{i,2}x_{i+1}^{t}\right)
# \qquad\text{Usando el arreglo B} \label{eq: GS tridiagonal con B}
# \end{eqnarray*}
#
# \noindent esto se puede usar $\forall i = 0,1,\dots,n-1,n$ sobre el arreglo $B$.
#
# \par Espec\'ificamente podemos definir al elemento $x_0^{t+1}$ como
# \begin{eqnarray*}
# x_0^{t+1} &=& \frac{1}{a_{0,0}} \left( b_0 - a_{0, 1}x_1^{t} \right) \\
# &=& \frac{1}{b_{0,1}} \left( b_0 - b'_{0, 2}x_1^{t} \right)
# \end{eqnarray*}
#
# \noindent y para el elemento $x_{n-1}^{i+1}$ queda que
# \begin{eqnarray*}
# x_{n-1}^{i+1} &=& \frac{1}{a_{n-1,n-1}} \left( b_{n-1} - a_{n-1, n-2}x_{n-2}^{t+1} \right) \\
# &=& \frac{1}{a_{n-1,1}} \left( b_{n-1} - b'_{n-1, 0}x_{n-2}^{t+1} \right)
# \end{eqnarray*}
# ## Ejercicio 3
# Programa el metodo de \textit{Gauss-Seidel} para resolver sistemas tridiagonales.
# In[65]:
# Extras
def data4mFile(n_file,/,path='datos/npy/', ext='npy', dtype=np.float64):
""" Cargar matrices y vectores de memoria
Funcion para cargar los archivos en memoria. El nombre del
archivo no espera path, ni la extension, solo el nombre. Por
default trata de leer los archivos .npy, pero numpy soporta
leer de otros formatos.
Input:
n_file := nombre del archivo sin extension
path := directorio para buscar el archivo
ext := extension del archivo a buscar (sin punto)
dtype := tipo de dato para guardar los valores
Output:
Regresa el una instancia np.matrix con los datos
obtenidos del archivo cargado.
"""
try:
return np.asmatrix(np.load(file=str(path+n_file+'.'+ext),
allow_pickle=False),
dtype=dtype)
except:
raise Exception("Error al cargar la informacion.")
def show1D(vec,/, max_sz=8, show=True):
""" Implementacion para pprint vector 1D.
Funcion para generar string para poder imporimir un
vector de manera reducida, dando un maximo de elementos
a imprimir. Lo puede imprimir directamente si se quiere
Input:
vec := vector de informacion a imprimir.
[opcionales]
max_sz := Maximo de elementos a imprimir.
show := Imprimir vector
_Doctest:
>>> show1D([1,2,3,4], show=False)
'1, 2, 3, 4'
>>> show1D([1,2,3,4,5,6,7,8,9], show=False)
'1, 2, 3, 4, ..., 6, 7, 8, 9'
"""
n=0
# En caso de que venga de instancia de np.data
try:
shape = vec.shape
if len(shape) < 2:
raise Exception('Array 1D')
else:
if shape[0] == 1:
get = lambda i: vec[0,i]
n = shape[1]
elif shape[1] == 1:
get = lambda i: vec[i,0]
n = shape[0]
else:
raise Exception('No arreglo 1D')
except AttributeError:
get = lambda i: vec[i]
n = len(vec)
except Exception('No arreglo 1D'):
print(e)
ret = ' '
if n <= max_sz:
for i in range(n): ret += str(get(i))+', '
else:
for i in range(4): ret += str(get(i))+', '
ret += '..., '
for i in range(4): ret += str(get(-(4-i)))+', '
ret = ret[2:-2]
if show: print(ret)
return ret
# In[143]:
# Parte 1
def diagonalesRelevantes(A, dtype=np.float64):
""" Funcion que otiene las diagonales relevantes de A.
Esta funcion, con A una matriz tridiagonal cuadrada(n) extrae_
ra las diagonales relevantes y las pondra en un arreglo B de
3xn, donde la columna 0 correspondera a la diagonal -1, la col_
umna 1 a la diagonal de A y la columna 2 a la diagonal +1 de
la matriz A.
Se espera, y corrobora, que A sea instancia de np.matrix para
usar su metodos
Input:
A := Matriz tridiagonal cuadrada instancia de np.matrix
Output:
B := Arreglo de valores relevantes de A
"""
if isinstance(A, (np.matrix)): # Verificar instancia de numpy
n = A.shape[0] if A.shape[0]==A.shape[1] else 0 # Verificar A es cuadrada
B = np.zeros((n, 3), dtype=dtype) # Reservar memoria de B
B[1:, 0] = A.diagonal(-1) # Diagonal inferior
B[ :, 1] = A.diagonal( 0) # Diagonal de matriz
B[:-1,2] = A.diagonal( 1) # Diagonal superior
return B
else:
raise Exception("A no es instancia de np.matrix")
# In[190]:
# Parte 2
def error_GS(B, xt, b,/, dtype=np.float64):
""" Funcion para calular el error || Ax^t - b|| desde B """
n = len(xt) # esperamos que las dimensiones coincidan
vec = np.asmatrix(np.zeros((n,1)),
dtype=dtype)
# En vec generaremos Ax^t
vec[0,0] = B[0,1]*xt[0,0] + B[0,2]*xt[1,0] # Calculamos el primer elemento
# Calculamos hasta el penultimo
for i in range(1, n-1):
vec[i,0] = B[i,0]*xt[i-1,0] + B[i,1]*xt[i,0] + B[i,2]*xt[i+1,0]
n = n-1 # Calculamos el ultimo
vec[n,0] = B[n,0]*xt[n-1,0] + B[n,1]*xt[n,0]
return np.linalg.norm(vec - b)
def GaussSeidel_tridiagonal(B, xt, b, N,/, t=None, dtype=np.float64):
""" Implementacion de GaussSeidel para matrices tridiagonales.
Esta funcion trata de resolver un sistema de ecuaciones Ax = b
con A una matriz (nxn) cuadrada tridiagonal y estas diagonales
almacenadas en el arreglo B (3xn).
Respecto a la tolerancia t del metodo, en caso de ser None, se
tomara el epsilon para el tipo de dato dtype que se le pase a
la funcion (calculado por numpy)
Input:
B := arreglo (3xn) de la diagonales relecantes para el metodo
x0 := vector (nx1) inicial de aproximacion de respuestas
b := vector (nx1) de terminos independientes
N := maximo numero de iteraciones del metodo
t := Tolerancia del metodo (default: None)
dtype := Tipop de dato para trabajar con el metodo
Output:
x, n, e
x := vector respuesta en la iteracion en que se detenga
n := iteracion en la que se detuvo el metodo
e := error al momento de detenerse
"""
# Inicializacion
if t == None: t = np.finfo(dtype).eps # Correcion tolerancia
sz = len(b)
e = float('inf') # Error inicial es infinito
n = 0 # Iteracion inicial
while n < N:
# Primer elemento de iteracion
i = 0
xt[i,0] = (b[i,0] - B[i,2]*xt[i+1,0])/B[i,1]
# Iteracion del metodo
for i in range(1, sz-1):
xt[i,0] = (b[i,0] - B[i,0]*xt[i-1,0] - B[i,2]*xt[i+1,0])/B[i,1]
# Ultimo elemento de iteracion
i = sz-1
xt[i,0] = (b[i,0] - B[i,0]*xt[i-1,0])/B[i,1]
# avance bucle verificacion tolerancia
e = error_GS(B, xt, b, dtype=dtype)
if e < t:
break
n += 1
return xt, n, e
# In[191]:
# Parte 3
def Ejercicio3(mat, vecb, N,/, path='datos/npy/', show=True):
""" Funcion para ejecutar la parte 3 de la tarea
Esta funcion usara las funciones diagonalesRelevantes, error_GS,
GaussSeidel_tridiagonal, data4mFile y show1D para tratar de
resolver un sistema Ax = b mediante la variante del metodo de
Gauss-Seidel para matrices tridiagonales cuadradas
Input:
mat := nombre del archivo que contiene una matriz
tridiagonal
vecb := nombre del archivo con el vector de terminos
independientes
N := numero maximo de iteraciones para el metodo
path := directorio para buscar los archivos
show := Indica si se desea imprimir los detalles
"""
dtype = np.float64
t = (np.finfo(dtype).eps)**(1/2)
A = data4mFile(mat, dtype=dtype)
b = data4mFile(vecb, dtype=dtype).transpose()
x0 = np.zeros(b.shape, dtype=dtype)
# Suponemos que A si es tridiagonal
B = diagonalesRelevantes(A, dtype=dtype)
xt, n, e = GaussSeidel_tridiagonal(B, x0, b, N, t=t, dtype=dtype)
conv = True if e < t else False
if show:
# Segunda solucion
x = np.linalg.solve(A, b)
# Print
__ = f'Matriz de "{mat}" con el vector de "{vecb}"'
__ += f'\n\tIteraciones: {n}'
__ += f'\n\tError: {e}'
__ += f'\n\tSol: {show1D(xt,show=False)}\n'
__ += ('El metodo converge' if e < t else 'El metodo no converge')
__ += f'\nLa diferencia entre soluciones es {np.linalg.norm(x - xt)}'
print(__)
return e, n, conv
# In[240]:
# Parte 4
if NOTEBOOK:
sizes = ['6', '20', '500']
data = {}
for sz in sizes:
data[sz] = [[],[],[]]
itr = [0, 5, 10, 15, 20, 25, 35, 50]
for sz in sizes:
for N in itr:
e, n, conv = Ejercicio3('matrizA'+sz, 'vecb'+sz, N, show=True)
data[sz][0].append(e)
data[sz][1].append(n)
data[sz][2].append(conv)
# In[249]:
PLOT = True
if PLOT and NOTEBOOK:
rng = itr
fig, ax = plt.subplots(2, 2, figsize=(10,10))
ax[0,1].axis('off')
for sz in sizes:
if sz == '6': i,j = 0,0
elif sz == '20': i,j = 1,0
elif sz == '500': i,j = 1,1
ax[i,j].set_title(sz)
a = ax[i,j].plot(rng, data[sz][0], '-x')
b = ax[i,j].plot(rng, data[sz][1], '-o')
c = ax[i,j].plot(rng, data[sz][2], '*')
labels = ['error', 'iteraciones', 'conv']
fig.legend([e, i, c], # The line objects
labels=labels, # The labels for each line
loc="upper right", # Position of legend
borderaxespad=0.1, # Small spacing around legend box
fontsize='xx-large'
)
plt.show()
# En la figura anterior podemos ver como convergen, o no, el metodo para los distintos datos proporcionados. Se grafica el error, de manera directa; las iteraciones con las que termina el metodo; y si converge o no, donde 0 es no y 1 es si. Todos estos datos se grafican contra el limite superior de iteraciones que se le pasa al metodo.
# In[ ]:
if __name__ == "__main__" and not NOTEBOOK:
import doctest
from docopt import docopt
doctest.testmod()
args = docopt(usage, version='Tarea4, prb')
if args['ejercicio3']:
Ejercicio3(args['<matA>'], args['<vecB>'], args['<N>'], args['--path'])
|
from django.core.management.base import BaseCommand, CommandError
from shortener.models import ShortURL
class Command(BaseCommand):
help = 'Refrehes all ShortURL shortcode'
def add_arguments(self, parser):
parser.add_argument('--items', type=int) # python manage.py refreshcodes --items n
def handle(self, *args, **options):
return ShortURL.objects.refresh_shortcodes(items=options['items'])
|
from resource import ResourceManager
from tuple_calculation import plus_i, mult_i
from pbrtwriter import PbrtWriter
class BlockSolver:
"""Write all solid block in the scene"""
def __init__(self, block):
self.block = block
self.Y = len(self.block)
self.Z = len(self.block[0])
self.X = len(self.block[0][0])
self.used_texture = set()
self._preloadUsedTexture()
def _inBlock(self, pt):
x, y, z = pt
return x >= 0 and x < self.X and y >= 0 and y < self.Y and z >= 0 and z < self.Z
def _preloadUsedTexture(self):
print("Preloading used texture...")
self.used_texture = set()
for x in range(self.X):
for y in range(self.Y):
for z in range(self.Z):
self.used_texture = self.used_texture | self.block[y][z][x].getUsedTexture(
)
def write(self, pbrtwriter: PbrtWriter, start_pt):
print("Writing solid blocks...")
for fn in self.used_texture:
pbrtwriter.texture("%s-color" % fn, "spectrum",
"imagemap", "string filename", "%s.png" % fn)
if ResourceManager().hasAlpha(fn + ".png"):
pbrtwriter.texture("%s-alpha" % fn, "float", "imagemap",
"bool alpha", "true", "string filename", "%s.png" % fn)
import queue
que = queue.Queue()
rendered = set()
deltas = [(1, 0, 0), (-1, 0, 0), (0, 1, 0),
(0, -1, 0), (0, 0, 1), (0, 0, -1)]
que.put(start_pt)
for delta in deltas:
next_pt = plus_i(delta, start_pt)
if not self._inBlock(next_pt):
continue
que.put(next_pt)
cnt = 0
while not que.empty():
pt = que.get()
if not self._inBlock(pt):
continue
if pt in rendered:
continue
rendered.add(pt)
x, y, z = pt
b = self.block[y][z][x]
if not b.empty():
pbrtwriter.translate(pt)
cnt += b.write(pbrtwriter)
pbrtwriter.translate(mult_i(pt, -1))
if b.canPass():
for delta in deltas:
next_pt = plus_i(delta, pt)
if not self._inBlock(next_pt):
continue
if next_pt in rendered:
continue
que.put(next_pt)
print("Render", cnt, "blocks")
|
# This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Unit tests for the Json file loader."""
import pytest
from refdata.dataset.base import DatasetHandle
from refdata.dataset.consumer import DataCollector
from refdata.dataset.json_loader import JQuery
from refdata.db import local_time
# -- Loader -------------------------------------------------------------------
DESCRIPTOR = {
"id": "countries",
"name": "REST Countries",
"description": "Information about countries in the world available from the restcountries.eu project.",
"url": "countries.json",
"checksum": "889c264f2ac4629b4998aa8b8b1d4de45890c39c10e24cfd8a017e9924e805c7",
"schema": [
{"id": "name"},
{"id": "alpha2Code"},
{"id": "alpha3Code"},
]
}
@pytest.mark.parametrize(
'parameters,columns,first_row',
[
(
{"type": "json", "parameters": {}},
['name', 'alpha2Code', 'alpha3Code'],
['Afghanistan', 'AF', 'AFG']
),
(
{"type": "json", "parameters": {}},
['alpha2Code', 'name', 'alpha3Code'],
['AF', 'Afghanistan', 'AFG']
),
(
{
"type": "json",
"parameters": {'sources': [
{'id': 'name', 'path': 'alpha2Code'},
{'id': 'alpha2Code', 'path': 'alpha3Code'},
{'id': 'alpha3Code', 'path': 'capital'}
]}
},
['name', 'alpha2Code', 'alpha3Code'],
['AF', 'AFG', 'Kabul']
)
]
)
def test_json_loader(parameters, columns, first_row, countries_file, mock_response):
descriptor = dict(DESCRIPTOR)
descriptor['format'] = parameters
dataset = DatasetHandle(
descriptor=descriptor,
package_name='test',
package_version='0',
created_at=local_time(),
datafile=countries_file
)
data = dataset.load(columns=columns, consumer=DataCollector()).data
assert len(data) == 2
assert data[0] == first_row
# -- JQuery -------------------------------------------------------------------
# Input document for JQuery tests.
DOC = {
'a': {
'b': {
'c': 1
},
'c': 2,
'd': [
{'c': 3}
]
}
}
@pytest.mark.parametrize(
'path,result',
[
('a/b', {'c': 1}),
('a/b/c', 1),
('a/b/c/', 1),
('a/c', 2),
('a/d/c', None),
('a/e', None),
('e', None),
('e/', None),
('', DOC),
('///', DOC)
]
)
def test_json_query(path, result):
"""Test evaluating different path expressions on a nested dictionary."""
assert JQuery(path=path).find(DOC) == result
|
""""
This is a saved model run from natcap.invest.habitat_quality.
Generated: 11/06/17 11:07:35
InVEST version: 3.3.3
"""
import natcap.invest.habitat_quality
import os
args = {
u'access_uri': u'~/workspace/data/HabitatQuality/access_samp.shp',
u'half_saturation_constant': u'0.5',
u'landuse_cur_uri': u'~/workspace/data/HabitatQuality/lc_samp_cur_b.tif',
u'sensitivity_uri': u'~/workspace/data/HabitatQuality/sensitivity_samp.csv',
u'threat_raster_folder': u'~/workspace/data/HabitatQuality',
u'threats_uri': u'~/workspace/data/HabitatQuality/threats_samp.csv',
u'workspace_dir': u'/tmp/habitat_quality_workspace',
}
if __name__ == '__main__':
for k in args.keys():
try:
args[k] = os.path.expanduser(args[k])
except AttributeError:
continue
natcap.invest.habitat_quality.execute(args)
|
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
params = {
'hostname': 'localhost',
'port': 5672,
'virtual_host': '/',
}
connection = kombu.connection.BrokerConnection(**params)
connection.connect()
exchange = kombu.entity.Exchange(name='direct-test',
type='direct',
durable=False,
auto_delete=False)
queue1 = kombu.Queue(name='queue1', exchange=exchange, routing_key='black')
queue1.maybe_bind(connection)
queue1.declare()
def process_message(body, message):
print("The body is {}".format(body))
print("The message is {}".format(message))
message.ack()
consumer = kombu.Consumer(connection, queues=queue1, callbacks=[process_message], accept=[])
consumer.consume()
connection.drain_events(timeout=1)
|
# Generated by Django 3.2.9 on 2021-11-15 19:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki', '0052_alter_recommendbeatmap_comment'),
]
operations = [
migrations.AddField(
model_name='rulesetstatus',
name='pre_release',
field=models.BooleanField(default=False),
),
]
|
from django.contrib import admin
from django.urls import include, path
from django.contrib.auth import views as auth_views
urlpatterns = [
path('admin/', admin.site.urls,),
path('api/', include(('api.urls', 'api'), namespace="api")),
path('password-reset/<uidb64>/<token>',
auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='password_reset_complete.html'
),
name="password_reset_complete"),
]
|
# Support for isometric zones and characters
#
import copy
import pyglet
import xml.etree.ElementTree as ET
class AnimationSet:
def __init__(self, source):
tree = ET.parse(source)
root = tree.getroot()
self.data = {}
self.img_path = root.attrib['path']
raw = pyglet.image.load(root.attrib['path'])
grid_width = int(root.attrib['img_width']) / int(root.attrib['frame_width'])
grid_height = int(root.attrib['img_height']) / int(root.attrib['frame_height'])
grid = pyglet.image.ImageGrid(raw, grid_width, grid_height)
for direction in root:
self.data[direction.attrib['name']] = {}
for action in direction:
first = int(action.attrib['first'])
last = int(action.attrib['last'])
speed = 0.15
loop = True
animation = pyglet.image.Animation.from_image_sequence(grid[first:last], speed, loop)
self.data[direction.attrib['name']][action.attrib['name']] = animation
def __getitem__(self, direction):
return self.data[direction]
class Character:
def __init__(self, x, y, body_animation):
self.x = x
self.y = y
self.draw_x =
self.draw_y =
self._body = copy(body)
self._weapon = None
self._sheild = None
self.action = 'action'
self.direction = 'south'
def set_body(self, body):
self._body = copy(body)
def set_weapon(self, weapon):
self._weapon = copy(weapon)
def set_shield(self, shield):
self._shield = copy(shield)
def draw(self, offset):
if self._body:
self._body[self._action][self._direction].draw(offset)
if self._weapon:
self._weapon[self._action][self._direction].draw(offset)
if self._shield:
self._shield[self._action][self._direction].draw(offset)
if __name__ == '__main__':
anim = AnimationSet('data/flare_resources/animationsets/male_clothes.xml')
|
# _*_ encoding:utf-8 _*_
__author__ = 'sunzhaohui'
__date__ = '2019-08-05 17:21'
from django.shortcuts import render
from django.http import HttpResponse,QueryDict,HttpResponseRedirect,JsonResponse
from django.urls import reverse
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.hashers import make_password
from django.views.generic import View,ListView,DetailView
from django.contrib.auth import authenticate, login, logout
# Create your views here.
# 用户认证及权限管理模块导入
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage, InvalidPage
from pure_pagination.mixins import PaginationMixin
from django.db.models import Q
from users.forms import PowerForm
class PowerListView(LoginRequiredMixin,PermissionRequiredMixin,PaginationMixin,ListView):
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = ('users.add_permission','users.change_permission','users.delete_permission','users.view_permission')
model = Permission
template_name = "users/power_list.html"
context_object_name = "powerlist"
paginate_by = 10
keyword = ''
#搜索
def get_queryset(self):
queryset = super(PowerListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword','').strip()
print(self.keyword)
if self.keyword:
queryset = queryset.filter(Q(codename__icontains=self.keyword)| Q(name__icontains=self.keyword))
return queryset
#显示搜索关键字
def get_context_data(self, **kwargs):
context = super(PowerListView,self).get_context_data(**kwargs)
context['keyword'] = self.keyword
context['user'] = self.request.user.username
ContentType_object_list = ContentType.objects.all()
context['ContentType_object_list'] = ContentType_object_list
#print(context)
return context
"""
创建权限
"""
def post(self, request):
data = QueryDict(self.request.body).dict()
print(data)
try:
# _userForm.cleaned_data['password'] = make_password("12345678")
# _userForm.cleaned_data['is_active'] = True
#data = _userForm.cleaned_data
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加权限成功'}
except Exception as e:
#logger.error("create user error: %s" % traceback.format_exc())
print(e)
res = {'code': 1, 'errmsg': '添加权限失败'}
print(JsonResponse(res))
return JsonResponse(res, safe=True)
def delete(self,request,**kwargs):
print(kwargs)
data = QueryDict(request.body).dict()
id = data['id']
try:
self.model.objects.get(id=id).delete()
res = {'code': 0, 'result': '删除成功'}
except:
# print(id)
res = {'code': 1, 'errmsg': '删除失败'}
return JsonResponse(res, safe=True)
class PowerView(LoginRequiredMixin,PermissionRequiredMixin,DetailView):
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = (
'users.add_permission', 'users.change_permission', 'users.delete_permission', 'users.view_permission')
"""
更新权限
"""
template_name = 'users/modify_power.html'
model = Permission
context_object_name = 'power'
def get_context_data(self, **kwargs):
context = super(PowerView,self).get_context_data(**kwargs)
# context['keyword'] = self.keyword
# context['user'] = self.request.user.username
ContentType_object_list = ContentType.objects.all()
context['ContentType_object_list'] = ContentType_object_list
#print(context)
return context
def post(self, request, **kwargs):
print(request.POST) # <QueryDict: {'id': ['7'], 'username': ['aa'], 'name_cn': ['bb'], 'phone': ['13305779168']}>
print(kwargs) # {'pk': '7'}
print(request.body) # b'id=7&username=aa&name_cn=bb&phone=13305779168'
pk = kwargs.get("pk")
data = QueryDict(request.body).dict()
print(data) # {'id': '7', 'username': 'aa', 'name_cn': 'bb', 'phone': '13305779168'}
#_userForm = UserUpdateForm(request.POST)
#if _userForm.is_valid():
try:
self.model.objects.filter(pk=pk).update(**data)
res = {'code': 0, "next_url": reverse("users:power_list"), 'result': '更新权限成功'}
except Exception as e:
print(e)
res = {'code': 1, "next_url": reverse("users:power_list"), 'errmsg': '更新权限失败'}
# else:
# # 获取所有的表单错误
# print(_userForm.errors)
# res = {'code': 1, "next_url": reverse("users:user_list"), 'errmsg': _userForm.errors}
return render(request, settings.JUMP_PAGE, res)
|
from django.conf.urls import patterns, include, url
from django.conf import settings
import session_csrf
session_csrf.monkeypatch()
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^blog/', include('blog.urls')),
url(r'^_ah/', include('djangae.urls')),
# Note that by default this is also locked down with login:admin in app.yaml
url(r'^admin/', include(admin.site.urls)),
url(r'^csp/', include('cspreports.urls')),
)
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += patterns('',
(
r'^static/(?P<path>.*)$',
'django.views.static.serve',
{
'document_root': settings.STATIC_ROOT
}
),
(
r'^media/(?P<path>.*)$',
'django.views.static.serve',
{
'document_root': settings.MEDIA_ROOT
}
),
)
|
# paiza POH! vol.2
# result:
# http://paiza.jp/poh/paizen/result/5921f33c8a94fee3caa44fe179df053f
# author: Leonardone @ NEETSDKASU
h, w = map(int, raw_input().split())
sp = [0 for j in xrange(w)]
tb = [[0 for j in xrange(w + 1)] for i in xrange(h + 1)]
for y in xrange(h) :
str = raw_input()
for x in xrange(w) :
if str[x] == '0' :
sp[x] = sp[x] + 1
s = sp[x]
t = 1
i = x
while i >= 0 and sp[i] > 0 :
if sp[i] < s :
s = sp[i]
tb[s][t] = tb[s][t] + 1
t = t + 1
i = i - 1
else :
sp[x] = 0
for x in xrange(1, w + 1) :
for y in xrange(h - 1, 0, -1) :
tb[y][x] = tb[y][x] + tb[y + 1][x]
n = int(raw_input())
for i in xrange(n) :
s, t = map(int, raw_input().split())
if s <= h and t <= w :
print tb[s][t]
else :
print "0"
|
from __future__ import absolute_import
from __future__ import print_function
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict, List, Text, Union
from zerver.lib.actions import (
do_change_is_admin,
do_set_realm_property,
do_deactivate_realm,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.models import get_realm, get_user_profile_by_email, Realm
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (Text, Text) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_update_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self):
# type: () -> None
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self):
# type: () -> None
new_description = u'A' * 101
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = 'iago@zulip.com'
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm description cannot exceed 100 characters.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
email = 'othello@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_unauthorized_name_change(self):
# type: () -> None
data = {'full_name': 'Sir Hamlet'}
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings/change'
result = self.client_post(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
def test_do_deactivate_realm(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.realm.deactivated)
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
class RealmAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
email = 'cordelia@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save()
def update_with_api(self, name, value):
# type: (str, Union[Text, int, bool]) -> Realm
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name):
# type: (str) -> None
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
add_emoji_by_admins_only=bool_tests,
create_stream_by_admins_only=bool_tests,
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
email_changes_disabled=bool_tests,
invite_required=bool_tests,
invite_by_admins_only=bool_tests,
inline_image_preview=bool_tests,
inline_url_embed_preview=bool_tests,
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
name_changes_disabled=bool_tests,
restricted_to_domain=bool_tests,
waiting_period_threshold=[10, 20],
) # type: Dict[str, Any]
vals = test_values.get(name)
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
def test_update_realm_properties(self):
# type: () -> None
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self):
# type: () -> None
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
|
from sympy import symbols, sin, exp, cos
x, y, z = symbols('xyz')
def test_count_ops_non_symbolic():
assert x.count_ops(symbolic=True) == 0
assert y.count_ops(symbolic=True) == 0
assert (x+1).count_ops(symbolic=False) == 1
assert (y+x+1).count_ops(symbolic=False) == 2
assert (z+y+x+1).count_ops(symbolic=False) == 3
assert (2*z+y+x+1).count_ops(symbolic=False) == 4
assert (2*z+y**17+x+1).count_ops(symbolic=False) == 5
assert (2*z+y**17+x+sin(x)).count_ops(symbolic=False) == 6
assert (2*z+y**17+x+sin(x**2)).count_ops(symbolic=False) == 7
assert (2*z+y**17+x+sin(x**2)+exp(cos(x))).count_ops(symbolic=False) == 10
|
"""The tests for mqtt camera component."""
from http import HTTPStatus
import json
from unittest.mock import patch
import pytest
from homeassistant.components import camera
from homeassistant.components.mqtt.camera import MQTT_CAMERA_ATTRIBUTES_BLOCKED
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
camera.DOMAIN: {"platform": "mqtt", "name": "test", "topic": "test_topic"}
}
async def test_run_camera_setup(hass, hass_client_no_auth, mqtt_mock):
"""Test that it fetches the given payload."""
topic = "test/camera"
await async_setup_component(
hass,
"camera",
{"camera": {"platform": "mqtt", "topic": topic, "name": "Test Camera"}},
)
await hass.async_block_till_done()
url = hass.states.get("camera.test_camera").attributes["entity_picture"]
async_fire_mqtt_message(hass, topic, "beer")
client = await hass_client_no_auth()
resp = await client.get(url)
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "beer"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, MQTT_CAMERA_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one camera per unique_id."""
config = {
camera.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, camera.DOMAIN, config)
async def test_discovery_removal_camera(hass, mqtt_mock, caplog):
"""Test removal of discovered camera."""
data = json.dumps(DEFAULT_CONFIG[camera.DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, camera.DOMAIN, data)
async def test_discovery_update_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
config1 = {"name": "Beer", "topic": "test_topic"}
config2 = {"name": "Milk", "topic": "test_topic"}
await help_test_discovery_update(
hass, mqtt_mock, caplog, camera.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
data1 = '{ "name": "Beer", "topic": "test_topic"}'
with patch(
"homeassistant.components.mqtt.camera.MqttCamera.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, ["test_topic"]
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass,
mqtt_mock,
camera.DOMAIN,
DEFAULT_CONFIG,
None,
state_topic="test_topic",
state_payload=b"ON",
)
async def test_reloadable(hass, mqtt_mock, caplog, tmp_path):
"""Test reloading the MQTT platform."""
domain = camera.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable(hass, mqtt_mock, caplog, tmp_path, domain, config)
async def test_reloadable_late(hass, mqtt_client_mock, caplog, tmp_path):
"""Test reloading the MQTT platform with late entry setup."""
domain = camera.DOMAIN
config = DEFAULT_CONFIG[domain]
await help_test_reloadable_late(hass, caplog, tmp_path, domain, config)
|
from student import Student
class Course:
# Using a global base for all the available courses
numReg = []
# We will initalise the course class
def __init__(self, number, name, profOfCourse=None, studentInCourse=None):
if studentInCourse is None:
studentInCourse = []
self.number = number
self.name = name
self.profOfCourse = profOfCourse
self.studentInCourse = studentInCourse
self.numReg.append(self)
# To display name and the number of the course
def displayCourses(self):
print('%d) %s' % (self.number, self.name))
# It will show [1]Student name [2]Professor name [3]Students of particular course
def displayCourseDetails(self, courseNumber):
if self.number == courseNumber:
print('Course name : ' + self.name)
if self.profOfCourse:
print('Professor name : ' + self.profOfCourse)
else:
print('No professor has chosen this particular course to teach!')
if len(self.studentInCourse) > 0:
print('The number of attending student/s are: ' + str([self.studentInCourse[q] for q in range(0, len(self.studentInCourse))]))
else:
print('No students have taken up the course.')
# It is used to add the details of the students registering for the first time to the overall course list
def newStudentGotACourse(self, numStudent, wantedCourse):
if wantedCourse == self.name:
for i in Student.numReg:
if numStudent == i.number:
nameAndFamily = i.name + ' ' + i.family
self.studentInCourse.append(nameAndFamily)
|
#!/usr/bin/python2.7 python2.7
# -*- coding: utf-8 -*-
# kivy modules first, if not Kivy may cause problems
import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import ScreenManager, Screen
kivy.require('1.10.0')
# common modules
import sys
import signal
from multiprocessing import Process
# Flask & similar modules
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import eventlet
from eventlet import wsgi
# async server setup
app = Flask(__name__)
api = Api(app)
def start_Flask():
print("Starting server...")
# start an eventlet WSGI server on port 5000
wsgi.server(eventlet.listen(('', 5000)), app)
def signal_handler(signal, frame):
# for fetching CTRL+C and relatives
print (" CTRL + C detected, exiting ... ")
exit(1)
# Kivy screen class
class MainScreen(Screen):
def __init__(self, **kwargs):
self.name="MAIN SCREEN"
super(Screen, self).__init__(**kwargs)
# Kivy app class
class Kivy(App):
w_MessageBox10_1 = "MAIN SCREEN"
w_MessageBox10_2 = "One golden glance of what should be"
w_MessageBox30_2 = "CHORUS"
w_MessageBox30_3 = "EXIT"
# exit button action
def exit(self):
print ("exiting... one shaft of light will show the way...")
p1.terminate() # terminate Flask by pressing on cancel
exit(1)
# do magic button action
def do_magic(self):
# your code goes here or maybe not
print ("***** it's a kind of magic *************************")
# Kivy UI builder file
def build(self):
sm = Builder.load_string("""
ScreenManager
MainScreen:
size_hint: 1, .7
auto_dismiss: False
title: app.w_MessageBox10_1
title_align: "center"
BoxLayout:
orientation: "vertical"
Label:
text: app.w_MessageBox10_2
BoxLayout:
orientation: "horizontal"
spacing: 10
size_hint: 1, .5
Button:
text: app.w_MessageBox30_2 # DO MAGIC
on_press:
app.do_magic()
Button:
text: app.w_MessageBox30_3 # EXIT
on_press:
app.exit()
""")
return sm
if __name__ == '__main__':
# #CTRL+C signal handler
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
global p1
p1 = Process(target=start_Flask) # assign Flask to a process
p1.start() # run Flask as process
Kivy().run() # run Kivy UI
|
from pylab import *
import numpy as np
import scipy.signal as sps
import os
import pandas as pd
def vtoint (vec): return [int(x) for x in vec]
def index2ms (idx, sampr): return 1e3*idx/sampr
def ms2index (ms, sampr): return int(sampr*ms/1e3)
def calPosThresh(dat, sigmathresh):
#return dat.mean() + sigmathresh * dat.std()
return 100
def calNegThresh(dat, sigmathresh):
#return dat.mean() - sigmathresh * dat.std()
return -100
# remove noise, where noise < negthres < dat < posthres < noise
def badEpoch (dat, sigmathresh):
badValues = len(np.where(dat <= calNegThresh(dat, sigmathresh))[0]) + \
len(np.where(dat >= calPosThresh(dat, sigmathresh))[0])
if badValues > 0:
return True
else:
return False
def removeBadEpochs (dat, sampr, trigtimes, swindowms, ewindowms, sigmathresh):
nrow = dat.shape[0]
swindowidx = ms2index(swindowms,sampr) # could be negative
ewindowidx = ms2index(ewindowms,sampr)
# trigByChannel could be returned for removing different epochs on each channel
trigByChannel = [x for x in range(nrow)]
badEpochs = []
for chan in range(nrow): # go through channels
trigByChannel[chan] = []
for trigidx in trigtimes: # go through stimuli
sidx = max(0,trigidx+swindowidx)
eidx = min(dat.shape[1],trigidx+ewindowidx)
if not badEpoch(dat[chan, sidx:eidx], sigmathresh):
trigByChannel[chan].append(trigidx)
else:
badEpochs.append(trigidx)
print('Found %d bad epochs in channel %d. Range: [%.2f, %.2f]'%
(len(trigtimes) - len(trigByChannel[chan]), chan,
calNegThresh(dat[chan, sidx:eidx], sigmathresh),
calPosThresh(dat[chan, sidx:eidx], sigmathresh)))
# combine bad epochs into a single sorted list (without duplicates)
badEpochs = sort(list(set(badEpochs)))
print('%d bad epochs:'%len(badEpochs),[x for x in badEpochs])
# remove the associated trigger times before returning
trigtimes = np.delete(trigtimes,[trigtimes.index(x) for x in badEpochs])
return trigtimes
# get the average ERP (dat should be either LFP or CSD)
def getERPOnChan (dat, sampr, chan, trigtimes, swindowms, ewindowms):
nrow = dat.shape[0]
tt = np.linspace(swindowms, ewindowms,ms2index(ewindowms - swindowms,sampr))
swindowidx = ms2index(swindowms,sampr) # could be negative
ewindowidx = ms2index(ewindowms,sampr)
lERP = np.zeros((len(trigtimes),len(tt)))
for i,trigidx in enumerate(trigtimes): # go through stimuli
sidx = max(0,trigidx+swindowidx)
eidx = min(dat.shape[1],trigidx+ewindowidx)
lERP[i,:] = dat[chan, sidx:eidx]
return tt,lERP
# get the average ERP (dat should be either LFP or CSD)
def getAvgERP (dat, sampr, trigtimes, swindowms, ewindowms):
nrow = dat.shape[0]
tt = np.linspace(swindowms, ewindowms,ms2index(ewindowms - swindowms,sampr))
swindowidx = ms2index(swindowms,sampr) # could be negative
ewindowidx = ms2index(ewindowms,sampr)
avgERP = np.zeros((nrow,len(tt)))
for chan in range(nrow): # go through channels
for trigidx in trigtimes: # go through stimuli
sidx = max(0,trigidx+swindowidx)
eidx = min(dat.shape[1],trigidx+ewindowidx)
avgERP[chan,:] += dat[chan, sidx:eidx]
avgERP[chan,:] /= float(len(trigtimes))
return tt,avgERP
# draw the average ERP (dat should be either LFP or CSD)
def drawAvgERP (dat, sampr, trigtimes, swindowms, ewindowms, whichchan=None, yl=None, clr=None,lw=1):
ttavg,avgERP = getAvgERP(dat,sampr,trigtimes,swindowms,ewindowms)
nrow = avgERP.shape[0]
for chan in range(nrow): # go through channels
if whichchan is None:
subplot(nrow,1,chan+1)
plot(ttavg,avgERP[chan,:],color=clr,linewidth=lw)
elif chan==whichchan:
plot(ttavg,avgERP[chan,:],color=clr,linewidth=lw)
xlim((-swindowms,ewindowms))
if yl is not None: ylim(yl)
# draw the event related potential (or associated CSD signal), centered around stimulus start (aligned to t=0)
def drawERP (dat, sampr, trigtimes, windowms, whichchan=None, yl=None,clr=None,lw=1):
if clr is None: clr = 'gray'
nrow = dat.shape[0]
tt = np.linspace(-windowms,windowms,ms2index(windowms*2,sampr))
windowidx = ms2index(windowms,sampr)
for trigidx in trigtimes: # go through stimuli
for chan in range(nrow): # go through channels
sidx = max(0,trigidx-windowidx)
eidx = min(dat.shape[1],trigidx+windowidx)
if whichchan is None:
subplot(nrow,1,chan+1)
plot(tt,dat[chan, sidx:eidx],color=clr,linewidth=lw)
elif chan==whichchan:
plot(tt,dat[chan, sidx:eidx],color=clr,linewidth=lw)
xlim((-windowms,windowms))
if yl is not None: ylim(yl)
#xlabel('Time (ms)')
# normalized cross-correlation between x and y
def normcorr (x, y):
# Pad shorter array if signals are different lengths
if x.size > y.size:
pad_amount = x.size - y.size
y = np.append(y, np.repeat(0, pad_amount))
elif y.size > x.size:
pad_amount = y.size - x.size
x = np.append(x, np.repeat(0, pad_amount))
corr = np.correlate(x, y, mode='full') # scale = 'none'
lags = np.arange(-(x.size - 1), x.size)
corr /= np.sqrt(np.dot(x, x) * np.dot(y, y))
return lags, corr
# x is longer signal; y is short pattern; nsamp is moving window size (in samples) for finding pattern
def windowcorr (x, y, nsamp, verbose=False):
sz = len(x)
lsidx,leidx=[],[]
llag, lc = [],[]
for sidx in range(0,sz,nsamp):
lsidx.append(sidx)
eidx = min(sidx + nsamp, sz-1)
leidx.append(eidx)
if verbose: print(sidx,eidx)
sig = sps.detrend(x[sidx:eidx])
lags,c = normcorr(sig,y)
llag.append(lags[int(len(lags)/2):])
lc.append(c[int(len(lags)/2):])
return llag, lc, lsidx, leidx
#
def maxnormcorr (x, y):
lags, corr = normcorr(x,y)
return max(corr)
#
def findpeakERPtimes (sig, erp, winsz, sampr, dfctr=2, thresh=0.05):
llag, lc, lsidx, leidx = windowcorr(sig, erp, int(winsz*sampr))
d = int(dfctr * len(erp)) # minimum distance between peaks and troughs (in samples)
lpkpos,lpkprop = [],[]
lT = []
for i,C in enumerate(lc):
pkpos, pkprop = sps.find_peaks(C, height = thresh, threshold = None, distance=d)
lpkpos.append(pkpos)
lpkprop.append(pkprop)
for t in pkpos: lT.append(index2ms(lsidx[i] + t, sampr))
return {'lT':lT, 'llag':llag, 'lc':lc, 'lsidx':lsidx,'leidx':leidx, 'lpkpos':lpkpos,'lpkprop':lpkprop}
# add ERP score to pdf; ddx must have average s2,g,i1 ERPs
def addERPscore (ddx, lschan, pdf):
lchan = list(set(pdf['chan']))
lchan.sort()
pdf['ERPscore'] = pd.Series([-2 for i in range(len(pdf))], index=pdf.index) # -2 is invalid value
lERPAvg = [ddx[s] for s in lschan]
for ERPAvg,chan in zip(lERPAvg,lchan):
s = pdf[pdf.chan==chan]
for idx in s.index:
sig0 = pdf.at[idx,'CSDwvf']
pdf.loc[idx,'ERPscore'] = maxnormcorr(sig0,ERPAvg)
#
def getAvgERPInDir (based, stimIntensity, needBBN, needCX, needThal,\
swindowms=0, ewindowms=150,
dbpath='data/nhpdat/spont/A1/19apr4_A1_spont_LayersForSam.csv',
useBIP=False):
from nhpdat import getflayers, getdownsampr, getorigsampr, getTriggerIDs, closestfile, hasBBNStim,IsCortex,IsThal
from nhpdat import getStimIntensity, getTriggerTimes
dd = {}
for fn in os.listdir(based):
if not fn.endswith('.mat'): continue
FN = os.path.join(based,fn)
if stimIntensity > 0 and getStimIntensity(FN) != stimIntensity: continue
if needBBN and not hasBBNStim(FN): continue
if needCX and not IsCortex(FN): continue
if needThal and not IsThal(FN): continue
s2,g,i1=-1,-1,-1; lchan = []
if IsCortex(FN):
s2,g,i1=getflayers(closestfile(fn,dbpath=dbpath)[0],abbrev=True)
if s2 < 0: continue # no layer/channel information
lchan = [s2,g,i1]
samprds = getdownsampr(FN)
divby = getorigsampr(FN) / samprds
trigtimes = [int(round(x)) for x in np.array(getTriggerTimes(FN)) / divby]
trigIDs = getTriggerIDs(FN)
if useBIP:
sampr,dat,dt,tt,CSD,MUA,BIP = loadfile(FN,samprds,getbipolar=True)
else:
sampr,dat,dt,tt,CSD,MUA = loadfile(FN,samprds)
ttrigtimes = [index2ms(t,sampr) for t in trigtimes]
if useBIP:
ttavg,avgBIP = getAvgERP(BIP, sampr, trigtimes, swindowms, ewindowms)
ddf = {'fn':fn,'ttavg':ttavg,'avgBIP':avgBIP,'sampr':sampr}
else:
ttavg,avgCSD = getAvgERP(CSD, sampr, trigtimes, swindowms, ewindowms)
ddf = {'fn':fn,'ttavg':ttavg,'avgCSD':avgCSD,'sampr':sampr}
print(fn,lchan)
if s2 >= 0:
if useBIP:
s2+=1; g+=1; i1+=1;
ddf['s2']=s2; ddf['g']=g; ddf['i1']=i1
else:
th=int(CSD.shape[0]/2)
lchan=[th]
if useBIP: th+=1
ddf['th']=th
if useBIP:
ddf['lchan'] = [x+1 for x in lchan]
else:
ddf['lchan'] = lchan
dd[fn] = ddf
return dd
#
def avgERPOverChan (dd, noiseth=0.75):
from nhpdat import getflayers, getdownsampr, getorigsampr, getTriggerIDs, closestfile, hasBBNStim,IsCortex,IsThal
from nhpdat import getStimIntensity, getTriggerTimes
ddx = {'s2':[],'g':[],'i1':[],'tt':None}
for k in dd:
if getorigsampr('data/nhpdat/bbn/'+k) != 44000.0: continue
ddf = dd[k]
lsc = ddf['lchan']
for idx,c in enumerate([ddf[lsc[0]],ddf[lsc[1]],ddf[lsc[2]]]):
if max(abs(ddf['avgCSD'][c,:])) > noiseth: continue
ddx[lsc[idx]].append(ddf['avgCSD'][c,:])
if ddx['tt'] is None: ddx['tt'] = ddf['ttavg']
for c in lsc:
ddx[c+'avg'] = mean(np.array(ddx[c]),axis=0)
s = std(np.array(ddx[c]),axis=0)
s /= sqrt(len(ddx[c]))
ddx[c+'stderr'] = s
return ddx
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.