repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_zero_shot_intent_dataset.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.glue_benchmark.data_processors import InputExample
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import GLUEDataset
from nemo.core.neural_types import CategoricalValuesType, ChannelType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['DialogueZeroShotIntentDataset']
class DialogueZeroShotIntentDataset(GLUEDataset):
"""
Dataset for training a NLI model for zero shot intent recognition. Similar to GLUE/MNLI
dataset, but allows the user to specify which columns in the data files contain the
premise, hypothesis, and gold label.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'labels': NeuralType(tuple('B'), CategoricalValuesType()),
}
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer to split text into sub-word tokens
cfg: config dict for dataset
num_classes: number of classes in the data (should be either 2 or 3, corresponding to
labels ['entailment', 'not_entailment'] or ["contradiction", "entailment", "neutral"])
"""
self.cfg = cfg
self.tokenizer = tokenizer
if self.cfg.num_classes not in [2, 3]:
raise ValueError("num_classes must be either 2 or 3!")
self.label_list = (
["contradiction", "entailment", "neutral"]
if self.cfg.num_classes == 3
else ['not_entailment', 'entailment']
)
token_params = {
'bos_token': None,
'eos_token': tokenizer.eos_token,
'pad_token': tokenizer.pad_token,
'cls_token': tokenizer.cls_token,
'sep_token_extra': tokenizer.eos_token
if hasattr(tokenizer, 'name') and 'roberta' in tokenizer.name.lower()
else None,
}
self.raw_features = dialogues_processor.get_dialog_examples(dataset_split)
self.examples = self._create_examples(self.raw_features, dataset_split)
self.features = self.convert_examples_to_features(
self.examples,
[0, 1, 2, 3],
self.cfg.max_seq_length,
tokenizer,
output_mode="classification",
**token_params,
)
def _create_examples(self, raw_features, dataset_split: str):
"""Creates examples for the training and dev sets."""
examples = []
for idx in range(len(raw_features)):
ex = self.raw_features[idx].data
user_utterance = ex["utterance"]
intent = ex["labels"]["intent"]
for candidate_idx, candidate_intent in enumerate(ex["possible_labels"]["intent"]):
guid = "{}-{}-{}".format(dataset_split, idx, candidate_idx)
text_a = user_utterance
text_b = "{} {}".format(self.cfg.prompt_template, candidate_intent)
label = 1 if candidate_intent == intent else 0
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(
self,
examples: List[str],
label_list: List[int],
max_seq_length: int,
tokenizer: TokenizerSpec,
output_mode: str,
bos_token: str = None,
eos_token: str = '[SEP]',
pad_token: str = '[PAD]',
cls_token: str = '[CLS]',
sep_token_extra: str = None,
cls_token_at_end: bool = False,
cls_token_segment_id: int = 0,
pad_token_segment_id: int = 0,
pad_on_left: bool = False,
mask_padding_with_zero: bool = True,
sequence_a_segment_id: int = 0,
sequence_b_segment_id: int = 1,
):
"""
Loads a data file into a list of `InputBatch`s.
The `cls_token_at_end` defines the location of the CLS token:
* False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
* True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
The `cls_token_segment_id` defines the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
The convention in BERT is:
a. For sequence pairs:
* tokens: [CLS] is this jack ##ville ? [SEP] no it is not . [SEP]
* type_ids: 0 0 0 0 0 0 0 1 1 1 1 1 1
b. For single sequences:
* tokens: [CLS] the dog is hairy . [SEP]
* type_ids: 0 0 0 0 0 0 0
Where "type_ids" are used to indicate whether this is the first
sequence or the second sequence. The embedding vectors for `type=0`
and `type=1` were learned during pre-training and are added to the
wordpiece embedding vector (and position vector). This is
not *strictly* necessarysince the [SEP] token unambiguously separates
the sequences, but it makes it easier for the model to learn
the concept of sequences.
For classification tasks, the first vector (corresponding to [CLS])
is used as as the "sentence vector". Note that this only makes sense
because the entire model is fine-tuned.
The convention for NMT is:
a. For sequence pairs:
* tokens:<BOS> is this jack ##ville ? <EOS> <BOS> no it is not . <EOS>
* type_ids:0 0 0 0 0 0 0 1 1 1 1 1 1 1
b. For single sequences:
* tokens: <BOS> the dog is hairy . <EOS>
* type_ids: 0 0 0 0 0 0 0
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if example.label == "-": # skip examples without a consensus label (e.g. in SNLI data set)
continue
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
if hasattr(tokenizer, 'text_to_tokens'):
tokens_a = tokenizer.text_to_tokens(example.text_a)
else:
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
if hasattr(tokenizer, 'text_to_tokens'):
tokens_b = tokenizer.text_to_tokens(example.text_b)
else:
tokens_b = tokenizer.tokenize(example.text_b)
special_tokens_count = 2 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 2 if bos_token else 0
special_tokens_count += 1 if cls_token else 0
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
special_tokens_count = 1 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 1 if bos_token else 0
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[: max_seq_length - special_tokens_count]
# Add special tokens to sequence_a
tokens = tokens_a
if bos_token:
tokens = [bos_token] + tokens
if eos_token:
tokens += [eos_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
# Add sequence separator between sequences
if tokens_b and sep_token_extra:
tokens += [sep_token_extra]
segment_ids += [sequence_a_segment_id]
# Add special tokens to sequence_b
if tokens_b:
if bos_token:
tokens += [bos_token]
segment_ids += [sequence_b_segment_id]
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
if eos_token:
tokens += [eos_token]
segment_ids += [sequence_b_segment_id]
# Add classification token - for BERT models
if cls_token:
if cls_token_at_end:
tokens += [cls_token]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
if hasattr(tokenizer, 'tokens_to_ids'):
input_ids = tokenizer.tokens_to_ids(tokens)
else:
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if hasattr(tokenizer, 'tokens_to_ids'):
pad_token_id = tokenizer.tokens_to_ids([pad_token])[0]
else:
pad_token_id = tokenizer.convert_tokens_to_ids([pad_token])[0]
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
if len(input_ids) != max_seq_length:
raise ValueError("input_ids must be of length max_seq_length")
if len(input_mask) != max_seq_length:
raise ValueError("input_mask must be of length max_seq_length")
if len(segment_ids) != max_seq_length:
raise ValueError("segment_ids must be of length max_seq_length")
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = np.float32(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(list(map(str, tokens))))
logging.info("input_ids: %s" % " ".join(list(map(str, input_ids))))
logging.info("input_mask: %s" % " ".join(list(map(str, input_mask))))
logging.info("segment_ids: %s" % " ".join(list(map(str, segment_ids))))
logging.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)
)
return features
class InputFeatures(object):
"""A single set of features of data.
Args:
input_ids: input/token ids
input_mask: masks out subword tokens
segment_ids: distinguish one sentence from the other one (if present)
label_ids: label for the current example
"""
def __init__(
self, input_ids: List[int], input_mask: List[int], segment_ids: List[int], label_id: Union[float, int]
):
"""Initialized InputFeatures."""
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
|
gkucsko/NeMo
|
nemo/collections/asr/parts/submodules/ssl_quantizers.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.collections.asr.parts.submodules.jasper import jasper_activations
from nemo.core import NeuralModule
from nemo.core.neural_types import EncodedRepresentation, LossType, NeuralType
class GumbelVectorQuantizer(NeuralModule):
def __init__(
self,
dim,
num_vars,
temp,
groups,
combine_groups,
vq_dim,
time_first,
activation="gelu",
weight_proj_depth=1,
weight_proj_factor=1,
):
"""Vector quantization using gumbel softmax
Args:
dim: input dimension (channels)
num_vars: number of quantized vectors per group
temp: temperature for training. this should be a tuple of 3 elements: (start, stop, decay factor)
groups: number of groups for vector quantization
combine_groups: whether to use the vectors for all groups
vq_dim: dimensionality of the resulting quantized vector
time_first: if true, expect input in BxTxC format, otherwise in BxCxT
activation: what activation to use (should be a module). this is only used if weight_proj_depth is > 1
weight_proj_depth: number of layers (with activation in between) to project input before computing logits
weight_proj_factor: this is used only if weight_proj_depth is > 1. scales the inner dimensionality of
projections by this factor
"""
super().__init__()
self.groups = groups
self.combine_groups = combine_groups
self.input_dim = dim
self.num_vars = num_vars
self.time_first = time_first
assert vq_dim % groups == 0, f"dim {vq_dim} must be divisible by groups {groups} for concatenation"
var_dim = vq_dim // groups
num_groups = groups if not combine_groups else 1
self.vars = nn.Parameter(torch.FloatTensor(1, num_groups * num_vars, var_dim))
nn.init.uniform_(self.vars)
if weight_proj_depth > 1:
activation = jasper_activations["gelu"]
def block(input_dim, output_dim):
return nn.Sequential(nn.Linear(input_dim, output_dim), activation)
inner_dim = self.input_dim * weight_proj_factor
self.weight_proj = nn.Sequential(
*[block(self.input_dim if i == 0 else inner_dim, inner_dim) for i in range(weight_proj_depth - 1)],
nn.Linear(inner_dim, groups * num_vars),
)
else:
self.weight_proj = nn.Linear(self.input_dim, groups * num_vars)
nn.init.normal_(self.weight_proj.weight, mean=0, std=1)
nn.init.zeros_(self.weight_proj.bias)
assert len(temp) == 3, "Quantize temperature should be a tuple of 3 elements: (start, stop, decay factor)"
self.max_temp, self.min_temp, self.temp_decay = temp
self.curr_temp = self.max_temp
self.codebook_indices = None
def set_num_updates(self, num_updates):
self.curr_temp = max(self.max_temp * self.temp_decay ** num_updates, self.min_temp)
def get_codebook_indices(self):
if self.codebook_indices is None:
from itertools import product
p = [range(self.num_vars)] * self.groups
inds = list(product(*p))
self.codebook_indices = torch.tensor(inds, dtype=torch.long, device=self.vars.device).flatten()
if not self.combine_groups:
self.codebook_indices = self.codebook_indices.view(self.num_vars ** self.groups, -1)
for b in range(1, self.groups):
self.codebook_indices[:, b] += self.num_vars * b
self.codebook_indices = self.codebook_indices.flatten()
return self.codebook_indices
def sample_from_codebook(self, b, n):
indices = self.get_codebook_indices()
indices = indices.view(-1, self.groups)
cb_size = indices.size(0)
assert n < cb_size, f"sample size {n} is greater than size of codebook {cb_size}"
sample_idx = torch.randint(low=0, high=cb_size, size=(b * n,))
indices = indices[sample_idx]
z = self.vars.squeeze(0).index_select(0, indices.flatten()).view(b, n, -1)
return z
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
if self.time_first:
return {"x": NeuralType(('B', 'T', 'D'), EncodedRepresentation())}
return {"x": NeuralType(('B', 'D', 'T'), EncodedRepresentation())}
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
if self.time_first:
return {
"x": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"quantize_prob_ppl": NeuralType(elements_type=LossType()),
}
return {
"x": NeuralType(('B', 'D', 'T'), EncodedRepresentation()),
"quantize_prob_ppl": NeuralType(elements_type=LossType()),
}
def forward(self, x, return_ids=False):
if not self.time_first:
x = x.transpose(1, 2)
bsz, tsz, fsz = x.shape
x = x.reshape(-1, fsz)
x = self.weight_proj(x)
x = x.view(bsz * tsz * self.groups, -1)
_, k = x.max(-1)
hard_x = x.new_zeros(*x.shape).scatter_(-1, k.view(-1, 1), 1.0).view(bsz * tsz, self.groups, -1)
# Calculate quantize prob perplexity
num_vars = self.num_vars * self.groups
avg_probs = torch.softmax(x.view(bsz * tsz, self.groups, -1).float(), dim=-1).mean(dim=0)
quantize_prob_ppl = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-7), dim=-1)).sum()
quantize_prob_ppl = (num_vars - quantize_prob_ppl) / num_vars
if self.training:
x = F.gumbel_softmax(x.float(), tau=self.curr_temp, hard=True).type_as(x)
else:
x = hard_x
x = x.view(bsz * tsz, -1)
vars = self.vars
if self.combine_groups:
vars = vars.repeat(1, self.groups, 1)
x = x.unsqueeze(-1) * vars
x = x.view(bsz * tsz, self.groups, self.num_vars, -1)
x = x.sum(-2)
x = x.view(bsz, tsz, -1)
cur_codebook_temp = self.curr_temp
if not self.time_first:
x = x.transpose(1, 2) # BTC -> BCT
if return_ids:
hard_x_max = hard_x.argmax(-1).reshape(bsz, tsz, -1)
# BxTxG
# create single id from multiple group ids
target_ids = hard_x.new_zeros(bsz, tsz).long()
for i in range(self.groups):
target_ids *= self.num_vars
target_ids += hard_x_max[:, :, i]
return x, quantize_prob_ppl, cur_codebook_temp, target_ids
else:
return x, quantize_prob_ppl, cur_codebook_temp
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/prompt_encoder.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import torch
from torch import nn
from nemo.core.classes import Exportable, NeuralModule
from nemo.core.classes.common import typecheck
from nemo.core.neural_types import ChannelType, NeuralType
__all__ = ['PromptEncoder']
class PromptEncoder(NeuralModule, Exportable):
"""
The prompt encoder network that is used to generate the virtual
token embeddings for p-tuning.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"taskname_embeddings": NeuralType(('B', 'T', 'C'), ChannelType(), optional=False),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"output_embeds": NeuralType(('B', 'T', 'C'), ChannelType())}
def __init__(self, total_virtual_tokens: int, hidden_size: int, lstm_dropout: float, num_layers: int):
"""
Initializes the PromptEncoder module.
Args:
total_virtual_tokens: the total number of vitural tokens
hidden_size: hidden dimension
lstm_dropout: the dropout used for the LSTM
num_layers: number of layers used in the LSTM
"""
super().__init__()
self.hidden_size = hidden_size
self.total_virtual_tokens = total_virtual_tokens
# Set fixed indicies for forward pass
self.register_buffer('indices', torch.LongTensor(list(range(self.total_virtual_tokens))))
# embedding
self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.hidden_size)
# LSTM
self.lstm_head = torch.nn.LSTM(
input_size=self.hidden_size,
hidden_size=self.hidden_size // 2,
num_layers=num_layers,
dropout=lstm_dropout,
bidirectional=True,
batch_first=True,
)
self.mlp_head = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(), nn.Linear(self.hidden_size, self.hidden_size)
)
@typecheck()
def forward(self, taskname_embeddings) -> torch.Tensor:
input_embeds = self.embedding(self.indices).unsqueeze(0)
batch_size, task_seq_length, _ = taskname_embeddings.shape
input_embeds = input_embeds.expand(batch_size, self.total_virtual_tokens, self.hidden_size).clone()
length = min(task_seq_length, self.total_virtual_tokens)
# Replace general input with task specific embeddings to specify the correct task
input_embeds[:, 0:length, :] = taskname_embeddings[:, 0:length, :]
output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])
return output_embeds
|
gkucsko/NeMo
|
nemo_text_processing/text_normalization/en/verbalizers/electronic.py
|
<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
TO_UPPER,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
from pynini.examples import plurals
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> c d f one at a b c dot e d u
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="verbalize", deterministic=deterministic)
graph_digit_no_zero = pynini.invert(pynini.string_file(get_abs_path("data/number/digit.tsv"))).optimize()
graph_zero = pynini.cross("0", "zero")
if not deterministic:
graph_zero |= pynini.cross("0", "o") | pynini.cross("0", "oh")
graph_digit = graph_digit_no_zero | graph_zero
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbol.tsv")).optimize()
default_chars_symbols = pynini.cdrewrite(
pynutil.insert(" ") + (graph_symbols | graph_digit) + pynutil.insert(" "), "", "", NEMO_SIGMA
)
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ default_chars_symbols
+ pynutil.delete("\"")
)
domain_common = pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
domain = (
default_chars_symbols
+ insert_space
+ plurals._priority_union(
domain_common, pynutil.add_weight(pynini.cross(".", "dot"), weight=0.0001), NEMO_SIGMA
)
+ pynini.closure(
insert_space + (pynini.cdrewrite(TO_UPPER, "", "", NEMO_SIGMA) @ default_chars_symbols), 0, 1
)
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ domain
+ delete_space
+ pynutil.delete("\"")
).optimize()
protocol = pynutil.delete("protocol: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
graph = (
pynini.closure(protocol + delete_space, 0, 1)
+ pynini.closure(user_name + delete_space + pynutil.insert(" at ") + delete_space, 0, 1)
+ domain
+ delete_space
).optimize() @ pynini.cdrewrite(delete_extra_space, "", "", NEMO_SIGMA)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
|
gkucsko/NeMo
|
nemo/collections/nlp/metrics/dialogue_metrics.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import Counter
import numpy as np
from sacrebleu import corpus_bleu
class DialogueGenerationMetrics(object):
@staticmethod
def save_predictions(
filename, generated_field, ground_truth_field, inputs,
):
"""
Save predictions as a jsonl file
Args:
Each arg is a list of strings (all args have the same length)
"""
docs = []
for i in range(len(inputs)):
docs.append(
{"input": inputs[i], "ground_truth": ground_truth_field[i], "generated": generated_field[i],}
)
with open(filename, 'w', encoding="UTF-8") as f:
for item in docs:
f.write(json.dumps(item) + "\n")
@staticmethod
def _get_one_f1(generated_field, ground_truth_field):
"""
Get precision, recall, f1 based on token overlap between generated and ground_truth sequence
"""
generated_tokens = generated_field.split()
ground_truth_tokens = ground_truth_field.split()
common = Counter(generated_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(generated_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return np.array([precision * 100, recall * 100, f1 * 100])
@staticmethod
def get_f1(generated_fields, ground_truth_fields):
total_p_r_f1 = np.array(
[
DialogueGenerationMetrics._get_one_f1(generated_fields[i], ground_truth_fields[i])
for i in range(len(ground_truth_fields))
]
)
avg_p_r_f1 = np.mean(total_p_r_f1, axis=0)
return avg_p_r_f1
@staticmethod
def get_bleu(generated_field, ground_truth_field):
"""
Referenced from NMT evaluation
Note 13a is the default tokenizer for English for WMT
Known issue that it doesn't hand edge case of None or ''
https://github.com/mjpost/sacrebleu/issues/161
"""
valid_indices = [i for i in range(len(generated_field)) if generated_field[i] and ground_truth_field[i]]
generated_field = [generated_field[i] for i in valid_indices]
ground_truth_field = [ground_truth_field[i] for i in valid_indices]
sacre_bleu = corpus_bleu(generated_field, [ground_truth_field], tokenize="13a")
return sacre_bleu.score
class DialogueClassificationMetrics(object):
@staticmethod
def save_predictions(
filename,
generated_labels,
generated_slots,
ground_truth_labels,
ground_truth_slots,
generated_field,
ground_truth_field,
inputs,
):
"""
Save predictions as a jsonl file
Args:
Each arg is a list of strings (all args have the same length)
"""
docs = []
for i in range(len(inputs)):
docs.append(
{
"input": inputs[i],
"ground_truth": ground_truth_field[i],
"ground_truth_slots": ground_truth_slots[i],
"ground_truth_labels": ground_truth_labels[i],
"generated": generated_field[i],
"generated_slots": generated_slots[i],
"generated_labels": generated_labels[i],
}
)
with open(filename, 'w', encoding="UTF-8") as f:
for item in docs:
f.write(json.dumps(item) + "\n")
@staticmethod
def split_label_and_slots(fields, with_slots=False):
"""
Split target into label and slots when doing joint label (i.e. intent) classificaiton and slot filling
For instance, split "reserve_restaurant\nslots: time_of_day(7pm), number_of_people(3)" into
label = "reserve_restaurant" and slots = ["time_of_day(7pm)", "number_of_people(3)"]
Args:
fields: list of strings
"""
labels = []
slots_list = []
for field in fields:
if with_slots:
combo = [i.strip() for i in field.split('slots:', 1)]
label = 'none'
if len(combo) == 2:
label, slots = combo
elif len(combo) == 1:
slots = combo[0]
label = 'none'
if isinstance(slots, str):
# temporary patch for purnendu model output
if 'possible intents:' in slots:
slots = slots.split('possible intents:')[0]
slots = slots.split(', ')
else:
slots = ['None']
else:
label = field
slots = []
slots_list.append(slots)
labels.append(label)
return labels, slots_list
@staticmethod
def get_slot_filling_metrics(generated_slots, ground_truth_slots):
"""
Args:
generated_slots: list of list of strings.
Each string is slot-name and slot-value pair e.g. location(Seattle)
ground_truth_slots: list of list of strings
"""
all_recall = []
all_precision = []
all_joint_goal_accuracy = []
for i in range(len(generated_slots)):
# depulicate and sort
ground_truth = sorted(list(set(ground_truth_slots[i])))
predicted = sorted(list(set(generated_slots[i])))
correct = [item for item in predicted if item in ground_truth]
recall = len(correct) / len(ground_truth) if len(ground_truth) > 0 else 0
precision = len(correct) / len(predicted) if len(predicted) > 0 else 0
joint_goal_accuracy = int(ground_truth == predicted)
all_recall.append(recall)
all_precision.append(precision)
all_joint_goal_accuracy.append(joint_goal_accuracy)
avg_joint_goal_accuracy = np.mean(all_joint_goal_accuracy) * 100
avg_precision = np.mean(all_precision) * 100
avg_recall = np.mean(all_recall) * 100
avg_f1 = 2 * (avg_recall * avg_precision) / (avg_recall + avg_precision + 1e-20)
return avg_precision, avg_recall, avg_f1, avg_joint_goal_accuracy
|
gkucsko/NeMo
|
scripts/dataset_processing/nlp/financial_phrase_bank/prompt_learning_financial_phrase_bank_preprocessing.py
|
<filename>scripts/dataset_processing/nlp/financial_phrase_bank/prompt_learning_financial_phrase_bank_preprocessing.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from tqdm import tqdm
"""
Dataset preprocessing script for the Financial Phrase Bank Sentiement dataset:
https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
data-dir: (str) The unziped directory where the Financial PhraseBank dataset was downloaded, files will be saved here
file-name: (str) Name of the input file you want to process
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'financial_phrase_bank' for
example, the files would be saved as financial_phrase_bank_train.jsonl, financial_phrase_bank_val.jsonl, and
financial_phrase_bank_test.jsonl
make-ground-truth: (bool) If true, test files will include labels, if false, test files will not include labels
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
train-percent: (float) Precentage of data that should be used for the train split. The val and test splits will be made
by splitting the remaining data evenly.
Saves train, val, and test files for the Financial PhraseBank dataset.
An example of the processed output written to file:
{"taskname": "sentiment", "sentence": "In the Baltic countries , sales fell by 42.6 % .", "label": " negative"}
{"taskname": "sentiment", "sentence": "Danske Bank is Denmark 's largest bank with 3.5 million customers .", "label": " neutral"}
{"taskname": "sentiment", "sentence": "The total value of the deliveries is some EUR65m .", "label": " neutral"}
{"taskname": "sentiment", "sentence": "Operating profit margin increased from 11.2 % to 11.7 % .", "label": " positive"}
{"taskname": "sentiment", "sentence": "It will also strengthen Ruukki 's offshore business .", "label": " positive"}
{"taskname": "sentiment", "sentence": "Sanoma News ' advertising sales decreased by 22 % during the year .", "label": " negative"}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="data/FinancialPhraseBank-v1.0")
parser.add_argument("--file-name", type=str, default="Sentences_AllAgree.txt")
parser.add_argument("--save-name-base", type=str, default="financial_phrase_bank")
parser.add_argument("--make-ground-truth", action='store_true')
parser.add_argument("--random-seed", type=int, default=1234)
parser.add_argument("--train-percent", type=float, default=0.8)
args = parser.parse_args()
data = open(f"{args.data_dir}/{args.file_name}", "r", encoding="ISO-8859-1").readlines()
save_name_base = f"{args.data_dir}/{args.save_name_base}"
process_data(data, save_name_base, args.train_percent, args.random_seed, args.make_ground_truth)
def process_data(data, save_name_base, train_percent, random_seed, make_ground_truth=False):
random.seed(random_seed)
random.shuffle(data)
data_total = len(data)
train_total = int(data_total * train_percent)
val_total = (data_total - train_total) // 2
train_set = data[0:train_total]
val_set = data[train_total : train_total + val_total]
test_set = data[train_total + val_total :]
gen_file(train_set, save_name_base, 'train')
gen_file(val_set, save_name_base, 'val')
gen_file(test_set, save_name_base, 'test', make_ground_truth)
def gen_file(data, save_name_base, split_type, make_ground_truth=False):
save_path = f"{save_name_base}_{split_type}.jsonl"
print(f"Saving {split_type} split to {save_path}")
with open(save_path, 'w') as save_file:
for line in tqdm(data):
example_json = {"taskname": "sentiment"}
sent, label = line.split('@')
sent = sent.strip()
label = label.strip()
example_json["sentence"] = sent
# Dont want labels in the test set
if split_type != "test" or make_ground_truth:
example_json["label"] = " " + label
save_file.write(json.dumps(example_json) + '\n')
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
scripts/nemo_legacy_import/nlp_checkpoint_port.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (<NAME>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.core import ModelPT
from nemo.core.config import TrainerConfig
def get_args(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=f"Update NLP models trained on previous versions to current version ",
)
parser.add_argument("source", help="Source .nemo file")
parser.add_argument("out", help="Location to write result to")
parser.add_argument("--megatron-legacy", help="If the source model is megatron-bert trained on NeMo < 1.5")
parser.add_argument(
"--megatron-checkpoint",
type=str,
help="Path of the MegatronBert nemo checkpoint converted from MegatronLM using megatron_lm_ckpt_to_nemo.py file (Not NLP model checkpoint)",
)
parser.add_argument("--verbose", default=None, help="Verbose level for logging, numeric")
args = parser.parse_args(argv)
return args
def nemo_convert(argv):
args = get_args(argv)
loglevel = logging.INFO
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
if args.verbose is not None:
numeric_level = getattr(logging, args.verbose.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % numeric_level)
loglevel = numeric_level
logger = logging.getLogger(__name__)
if logger.handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
logging.basicConfig(level=loglevel, format='%(asctime)s [%(levelname)s] %(message)s')
logging.info("Logging level set to {}".format(loglevel))
"""Convert a .nemo saved model trained on previous versions of nemo into a nemo fie with current version."""
nemo_in = args.source
out = args.out
# Create a PL trainer object which is required for restoring Megatron models
cfg_trainer = TrainerConfig(
gpus=1,
accelerator="ddp",
num_nodes=1,
# Need to set the following two to False as ExpManager will take care of them differently.
logger=False,
checkpoint_callback=False,
)
trainer = pl.Trainer(cfg_trainer)
logging.info("Restoring NeMo model from '{}'".format(nemo_in))
try:
# If the megatron based NLP model was trained on NeMo < 1.5, then we need to update the lm_checkpoint on the model config
if args.megatron_legacy:
if args.megatron_checkpoint:
connector = NLPSaveRestoreConnector()
model_cfg = ModelPT.restore_from(
restore_path=nemo_in, save_restore_connector=connector, trainer=trainer, return_config=True
)
OmegaConf.set_struct(model_cfg, True)
with open_dict(model_cfg):
model_cfg.language_model.lm_checkpoint = args.megatron_checkpoint
model_cfg['megatron_legacy'] = True
model_cfg['masked_softmax_fusion'] = False
model_cfg['bias_gelu_fusion'] = False
model = ModelPT.restore_from(
restore_path=nemo_in,
save_restore_connector=connector,
trainer=trainer,
override_config_path=model_cfg,
)
else:
logging.error("Megatron Checkpoint must be provided if Megatron legacy is chosen")
else:
model = ModelPT.restore_from(restore_path=nemo_in, trainer=trainer)
logging.info("Model {} restored from '{}'".format(model.cfg.target, nemo_in))
# Save the model
model.save_to(out)
logging.info("Successfully converted to {}".format(out))
del model
except Exception as e:
logging.error(
"Failed to restore model from NeMo file : {}. Please make sure you have the latest NeMo package installed with [all] dependencies.".format(
nemo_in
)
)
raise e
if __name__ == '__main__':
nemo_convert(sys.argv[1:])
|
gkucsko/NeMo
|
nemo/collections/asr/parts/utils/speaker_utils.py
|
<gh_stars>0
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
from copy import deepcopy
from functools import reduce
from typing import List
import numpy as np
import omegaconf
import soundfile as sf
import torch
from pyannote.core import Annotation, Segment, Timeline
from pyannote.metrics.diarization import DiarizationErrorRate
from tqdm import tqdm
from nemo.collections.asr.parts.utils.nmesc_clustering import COSclustering
from nemo.utils import logging
"""
This file contains all the utility functions required for speaker embeddings part in diarization scripts
"""
def get_uniqname_from_filepath(filepath):
"""
Return base name from provided filepath
"""
if type(filepath) is str:
uniq_id = os.path.splitext(os.path.basename(filepath))[0]
return uniq_id
else:
raise TypeError("input must be filepath string")
def get_uniq_id_with_dur(meta, deci=3):
"""
Return basename with offset and end time labels
"""
bare_uniq_id = get_uniqname_from_filepath(meta['audio_filepath'])
if meta['offset'] is None and meta['duration'] is None:
return bare_uniq_id
if meta['offset']:
offset = str(int(round(meta['offset'], deci) * pow(10, deci)))
else:
offset = 0
if meta['duration']:
endtime = str(int(round(meta['offset'] + meta['duration'], deci) * pow(10, deci)))
else:
endtime = 'NULL'
uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
return uniq_id
def audio_rttm_map(manifest):
"""
This function creates AUDIO_RTTM_MAP which is used by all diarization components to extract embeddings,
cluster and unify time stamps
Args: manifest file that contains keys audio_filepath, rttm_filepath if exists, text, num_speakers if known and uem_filepath if exists
returns:
AUDIO_RTTM_MAP (dict) : A dictionary with keys of uniq id, which is being used to map audio files and corresponding rttm files
"""
AUDIO_RTTM_MAP = {}
with open(manifest, 'r') as inp_file:
lines = inp_file.readlines()
logging.info("Number of files to diarize: {}".format(len(lines)))
for line in lines:
line = line.strip()
dic = json.loads(line)
meta = {
'audio_filepath': dic['audio_filepath'],
'rttm_filepath': dic.get('rttm_filepath', None),
'offset': dic.get('offset', None),
'duration': dic.get('duration', None),
'text': dic.get('text', None),
'num_speakers': dic.get('num_speakers', None),
'uem_filepath': dic.get('uem_filepath', None),
'ctm_filepath': dic.get('ctm_filepath', None),
}
uniqname = get_uniqname_from_filepath(filepath=meta['audio_filepath'])
if uniqname not in AUDIO_RTTM_MAP:
AUDIO_RTTM_MAP[uniqname] = meta
else:
raise KeyError(
"file {} is already part AUDIO_RTTM_Map, it might be duplicated".format(meta['audio_filepath'])
)
return AUDIO_RTTM_MAP
def parse_scale_configs(window_lengths_in_sec, shift_lengths_in_sec, multiscale_weights):
"""
Check whether multiscale parameters are provided correctly. window_lengths_in_sec, shift_lengfhs_in_sec and
multiscale_weights should be all provided in omegaconf.listconfig.ListConfig type. In addition, the scales
should be provided in descending order, from the longest scale to the base scale (the shortest).
Example:
Single-scale setting:
parameters.window_length_in_sec=1.5
parameters.shift_length_in_sec=0.75
parameters.multiscale_weights=null
Multiscale setting (base scale - window_length 0.5 s and shift_length 0.25):
parameters.window_length_in_sec=[1.5,1.0,0.5]
parameters.shift_length_in_sec=[0.75,0.5,0.25]
parameters.multiscale_weights=[0.33,0.33,0.33]
In addition, you can also specify session-by-session multiscale weight. In this case, each dictionary key
points to different weights.
"""
checkFloatConfig = [type(var) == float for var in (window_lengths_in_sec, shift_lengths_in_sec)]
checkListConfig = [
type(var) == type(omegaconf.listconfig.ListConfig([]))
for var in (window_lengths_in_sec, shift_lengths_in_sec, multiscale_weights)
]
if all(checkListConfig) or all(checkFloatConfig):
# If bare floating numbers are provided, convert them to list format.
if all(checkFloatConfig):
window_lengths, shift_lengths, multiscale_weights = (
[window_lengths_in_sec],
[shift_lengths_in_sec],
[1.0],
)
else:
window_lengths, shift_lengths, multiscale_weights = (
window_lengths_in_sec,
shift_lengths_in_sec,
multiscale_weights,
)
length_check = (
len(set([len(window_lengths), len(shift_lengths), len(multiscale_weights)])) == 1
and len(multiscale_weights) > 0
)
scale_order_check = (
window_lengths == sorted(window_lengths)[::-1] and shift_lengths == sorted(shift_lengths)[::-1]
)
# Check whether window lengths are longer than shift lengths
if len(window_lengths) > 1:
shift_length_check = all([w > s for w, s in zip(window_lengths, shift_lengths)]) == True
else:
shift_length_check = window_lengths[0] > shift_lengths[0]
multiscale_args_dict = {'use_single_scale_clustering': False}
if all([length_check, scale_order_check, shift_length_check]) == True:
if len(window_lengths) > 1:
multiscale_args_dict['scale_dict'] = {
k: (w, s) for k, (w, s) in enumerate(zip(window_lengths, shift_lengths))
}
else:
multiscale_args_dict['scale_dict'] = {0: (window_lengths[0], shift_lengths[0])}
multiscale_args_dict['multiscale_weights'] = multiscale_weights
return multiscale_args_dict
else:
raise ValueError('Multiscale parameters are not properly setup.')
elif any(checkListConfig):
raise ValueError(
'You must provide a list config for all three parameters: window, shift and multiscale weights.'
)
else:
return None
def get_embs_and_timestamps(multiscale_embeddings_and_timestamps, multiscale_args_dict):
"""
The embeddings and timestamps in multiscale_embeddings_and_timestamps dictionary are
indexed by scale index. This function rearranges the extracted speaker embedding and
timestamps by unique ID to make the further processing more convenient.
Args:
multiscale_embeddings_and_timestamps (dict):
Dictionary of embeddings and timestamps for each scale.
multiscale_args_dict (dict):
Dictionary of scale information: window, shift and multiscale weights.
Returns:
embs_and_timestamps (dict)
A dictionary containing embeddings and timestamps of each scale, indexed by unique ID.
"""
embs_and_timestamps = {
uniq_id: {'multiscale_weights': [], 'scale_dict': {}}
for uniq_id in multiscale_embeddings_and_timestamps[0][0].keys()
}
if multiscale_args_dict['use_single_scale_clustering']:
_multiscale_args_dict = deepcopy(multiscale_args_dict)
_multiscale_args_dict['scale_dict'] = {0: multiscale_args_dict['scale_dict'][0]}
_multiscale_args_dict['multiscale_weights'] = multiscale_args_dict['multiscale_weights'][:1]
else:
_multiscale_args_dict = multiscale_args_dict
for scale_idx in sorted(_multiscale_args_dict['scale_dict'].keys()):
embeddings, time_stamps = multiscale_embeddings_and_timestamps[scale_idx]
for uniq_id in embeddings.keys():
embs_and_timestamps[uniq_id]['multiscale_weights'] = (
torch.tensor(_multiscale_args_dict['multiscale_weights']).unsqueeze(0).half()
)
assert len(embeddings[uniq_id]) == len(time_stamps[uniq_id])
embs_and_timestamps[uniq_id]['scale_dict'][scale_idx] = {
'embeddings': embeddings[uniq_id],
'time_stamps': time_stamps[uniq_id],
}
return embs_and_timestamps
def get_contiguous_stamps(stamps):
"""
Return contiguous time stamps
"""
lines = deepcopy(stamps)
contiguous_stamps = []
for i in range(len(lines) - 1):
start, end, speaker = lines[i].split()
next_start, next_end, next_speaker = lines[i + 1].split()
if float(end) > float(next_start):
avg = str((float(next_start) + float(end)) / 2.0)
lines[i + 1] = ' '.join([avg, next_end, next_speaker])
contiguous_stamps.append(start + " " + avg + " " + speaker)
else:
contiguous_stamps.append(start + " " + end + " " + speaker)
start, end, speaker = lines[-1].split()
contiguous_stamps.append(start + " " + end + " " + speaker)
return contiguous_stamps
def merge_stamps(lines):
"""
Merge time stamps of the same speaker.
"""
stamps = deepcopy(lines)
overlap_stamps = []
for i in range(len(stamps) - 1):
start, end, speaker = stamps[i].split()
next_start, next_end, next_speaker = stamps[i + 1].split()
if float(end) == float(next_start) and speaker == next_speaker:
stamps[i + 1] = ' '.join([start, next_end, next_speaker])
else:
overlap_stamps.append(start + " " + end + " " + speaker)
start, end, speaker = stamps[-1].split()
overlap_stamps.append(start + " " + end + " " + speaker)
return overlap_stamps
def labels_to_pyannote_object(labels, uniq_name=''):
"""
Convert the given labels to pyannote object to calculate DER and for visualization
"""
annotation = Annotation(uri=uniq_name)
for label in labels:
start, end, speaker = label.strip().split()
start, end = float(start), float(end)
annotation[Segment(start, end)] = speaker
return annotation
def uem_timeline_from_file(uem_file, uniq_name=''):
"""
Generate pyannote timeline segments for uem file
<UEM> file format
UNIQ_SPEAKER_ID CHANNEL START_TIME END_TIME
"""
timeline = Timeline(uri=uniq_name)
with open(uem_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
speaker_id, channel, start_time, end_time = line.split()
timeline.add(Segment(float(start_time), float(end_time)))
return timeline
def labels_to_rttmfile(labels, uniq_id, out_rttm_dir):
"""
Write rttm file with uniq_id name in out_rttm_dir with time_stamps in labels
"""
filename = os.path.join(out_rttm_dir, uniq_id + '.rttm')
with open(filename, 'w') as f:
for line in labels:
line = line.strip()
start, end, speaker = line.split()
duration = float(end) - float(start)
start = float(start)
log = 'SPEAKER {} 1 {:.3f} {:.3f} <NA> <NA> {} <NA> <NA>\n'.format(uniq_id, start, duration, speaker)
f.write(log)
return filename
def rttm_to_labels(rttm_filename):
"""
Prepare time stamps label list from rttm file
"""
labels = []
with open(rttm_filename, 'r') as f:
for line in f.readlines():
rttm = line.strip().split()
start, end, speaker = float(rttm[3]), float(rttm[4]) + float(rttm[3]), rttm[7]
labels.append('{} {} {}'.format(start, end, speaker))
return labels
def write_cluster_labels(base_scale_idx, lines_cluster_labels, out_rttm_dir):
"""
Write cluster labels that are generated from clustering into a file.
Args:
base_scale_idx (int): The base scale index which is the highest scale index.
lines_cluster_labels (list): The start and end time-stamps of each segment with the predicted cluster label.
out_rttm_dir (str): The path where output rttm files are saved.
"""
out_label_name = os.path.join(
out_rttm_dir, '../speaker_outputs', f'subsegments_scale{base_scale_idx}_cluster.label'
)
with open(out_label_name, 'w') as f:
for clus_label_line in lines_cluster_labels:
f.write(clus_label_line)
def perform_clustering(embs_and_timestamps, AUDIO_RTTM_MAP, out_rttm_dir, clustering_params):
"""
Performs spectral clustering on embeddings with time stamps generated from VAD output
Args:
embs_and_timestamps (dict): This dictionary contains the following items indexed by unique IDs.
'embeddings' : Embeddings with key as unique_id
'time_stamps' : Time stamps list for each audio recording
AUDIO_RTTM_MAP (dict): AUDIO_RTTM_MAP for mapping unique id with audio file path and rttm path
out_rttm_dir (str): Path to write predicted rttms
clustering_params (dict): clustering parameters provided through config that contains max_num_speakers (int),
oracle_num_speakers (bool), max_rp_threshold(float), sparse_search_volume(int) and enhance_count_threshold (int)
Returns:
all_reference (list[uniq_name,Annotation]): reference annotations for score calculation
all_hypothesis (list[uniq_name,Annotation]): hypothesis annotations for score calculation
"""
all_hypothesis = []
all_reference = []
no_references = False
max_num_speakers = clustering_params['max_num_speakers']
lines_cluster_labels = []
cuda = True
if not torch.cuda.is_available():
logging.warning("cuda=False, using CPU for Eigen decomposition. This might slow down the clustering process.")
cuda = False
for uniq_id, value in tqdm(AUDIO_RTTM_MAP.items()):
if clustering_params.oracle_num_speakers:
num_speakers = value.get('num_speakers', None)
if num_speakers is None:
raise ValueError("Provided option as oracle num of speakers but num_speakers in manifest is null")
else:
num_speakers = None
cluster_labels = COSclustering(
uniq_embs_and_timestamps=embs_and_timestamps[uniq_id],
oracle_num_speakers=num_speakers,
max_num_speaker=max_num_speakers,
enhanced_count_thres=clustering_params.enhanced_count_thres,
max_rp_threshold=clustering_params.max_rp_threshold,
sparse_search_volume=clustering_params.sparse_search_volume,
cuda=cuda,
)
base_scale_idx = max(embs_and_timestamps[uniq_id]['scale_dict'].keys())
lines = embs_and_timestamps[uniq_id]['scale_dict'][base_scale_idx]['time_stamps']
assert len(cluster_labels) == len(lines)
for idx, label in enumerate(cluster_labels):
tag = 'speaker_' + str(label)
lines[idx] += tag
a = get_contiguous_stamps(lines)
labels = merge_stamps(a)
if out_rttm_dir:
labels_to_rttmfile(labels, uniq_id, out_rttm_dir)
lines_cluster_labels.extend([f'{uniq_id} {seg_line}\n' for seg_line in lines])
hypothesis = labels_to_pyannote_object(labels, uniq_name=uniq_id)
all_hypothesis.append([uniq_id, hypothesis])
rttm_file = value.get('rttm_filepath', None)
if rttm_file is not None and os.path.exists(rttm_file) and not no_references:
ref_labels = rttm_to_labels(rttm_file)
reference = labels_to_pyannote_object(ref_labels, uniq_name=uniq_id)
all_reference.append([uniq_id, reference])
else:
no_references = True
all_reference = []
if out_rttm_dir:
write_cluster_labels(base_scale_idx, lines_cluster_labels, out_rttm_dir)
return all_reference, all_hypothesis
def score_labels(AUDIO_RTTM_MAP, all_reference, all_hypothesis, collar=0.25, ignore_overlap=True):
"""
Calculates DER, CER, FA and MISS
Args:
AUDIO_RTTM_MAP : Dictionary containing information provided from manifestpath
all_reference (list[uniq_name,Annotation]): reference annotations for score calculation
all_hypothesis (list[uniq_name,Annotation]): hypothesis annotations for score calculation
Returns:
metric (pyannote.DiarizationErrorRate): Pyannote Diarization Error Rate metric object. This object contains detailed scores of each audiofile.
mapping (dict): Mapping dict containing the mapping speaker label for each audio input
< Caveat >
Unlike md-eval.pl, "no score" collar in pyannote.metrics is the maximum length of
"no score" collar from left to right. Therefore, if 0.25s is applied for "no score"
collar in md-eval.pl, 0.5s should be applied for pyannote.metrics.
"""
metric = None
if len(all_reference) == len(all_hypothesis):
metric = DiarizationErrorRate(collar=2 * collar, skip_overlap=ignore_overlap)
mapping_dict = {}
for (reference, hypothesis) in zip(all_reference, all_hypothesis):
ref_key, ref_labels = reference
_, hyp_labels = hypothesis
uem = AUDIO_RTTM_MAP[ref_key].get('uem_filepath', None)
if uem is not None:
uem = uem_timeline_from_file(uem_file=uem, uniq_name=ref_key)
metric(ref_labels, hyp_labels, uem=uem, detailed=True)
mapping_dict[ref_key] = metric.optimal_mapping(ref_labels, hyp_labels)
DER = abs(metric)
CER = metric['confusion'] / metric['total']
FA = metric['false alarm'] / metric['total']
MISS = metric['missed detection'] / metric['total']
logging.info(
"Cumulative Results for collar {} sec and ignore_overlap {}: \n FA: {:.4f}\t MISS {:.4f}\t \
Diarization ER: {:.4f}\t, Confusion ER:{:.4f}".format(
collar, ignore_overlap, FA, MISS, DER, CER
)
)
return metric, mapping_dict
else:
logging.warning(
"check if each ground truth RTTMs were present in provided manifest file. Skipping calculation of Diariazation Error Rate"
)
return None
def get_vad_out_from_rttm_line(rttm_line):
"""
Extract VAD timestamp from the given RTTM lines.
"""
vad_out = rttm_line.strip().split()
if len(vad_out) > 3:
start, dur, _ = float(vad_out[3]), float(vad_out[4]), vad_out[7]
else:
start, dur, _ = float(vad_out[0]), float(vad_out[1]), vad_out[2]
start, dur = float("{:}".format(start)), float("{:}".format(dur))
return start, dur
def get_offset_and_duration(AUDIO_RTTM_MAP, uniq_id, deci=5):
"""
Extract offset and duration information from AUDIO_RTTM_MAP dictionary.
If duration information is not specified, a duration value is extracted from the audio file directly.
Args:
AUDIO_RTTM_MAP (dict):
Dictionary containing RTTM file information, which is indexed by unique file id.
uniq_id (str):
Unique file id
Returns:
offset (float):
The offset value that determines the beginning of the audio stream.
duration (float):
The length of audio stream that is expected to be used.
"""
audio_path = AUDIO_RTTM_MAP[uniq_id]['audio_filepath']
if AUDIO_RTTM_MAP[uniq_id].get('duration', None):
duration = round(AUDIO_RTTM_MAP[uniq_id]['duration'], deci)
offset = round(AUDIO_RTTM_MAP[uniq_id]['offset'], deci)
else:
sound = sf.SoundFile(audio_path)
duration = sound.frames / sound.samplerate
offset = 0.0
return offset, duration
def write_overlap_segments(outfile, AUDIO_RTTM_MAP, uniq_id, overlap_range_list, include_uniq_id, deci=5):
"""
Write the json dictionary into the specified manifest file.
Args:
outfile:
File pointer that indicates output file path.
AUDIO_RTTM_MAP (dict):
Dictionary containing the input manifest information
uniq_id (str):
Unique file id
overlap_range_list (list):
List containing overlapping ranges between target and source.
"""
audio_path = AUDIO_RTTM_MAP[uniq_id]['audio_filepath']
for (stt, end) in overlap_range_list:
meta = {
"audio_filepath": audio_path,
"offset": round(stt, deci),
"duration": round(end - stt, deci),
"label": 'UNK',
"uniq_id": uniq_id,
}
json.dump(meta, outfile)
outfile.write("\n")
def read_rttm_lines(rttm_file_path):
"""
Read rttm files and return the rttm information lines.
Args:
rttm_file_path (str):
Returns:
lines (list):
List containing the strings from the RTTM file.
"""
if rttm_file_path and os.path.exists(rttm_file_path):
f = open(rttm_file_path, 'r')
else:
raise FileNotFoundError(
"Requested to construct manifest from rttm with oracle VAD option or from NeMo VAD but received filename as {}".format(
rttm_file_path
)
)
lines = f.readlines()
return lines
def isOverlap(rangeA, rangeB):
"""
Check whether two ranges have overlap.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(bool):
Boolean that indicates whether the input ranges have overlap.
"""
start1, end1 = rangeA
start2, end2 = rangeB
return end1 > start2 and end2 > start1
def getOverlapRange(rangeA, rangeB):
"""
Calculate the overlapping range between rangeA and rangeB.
Args:
rangeA (list, tuple):
List or tuple containing start and end value in float.
rangeB (list, tuple):
List or tuple containing start and end value in float.
Returns:
(list):
List containing the overlapping range between rangeA and rangeB.
"""
assert isOverlap(rangeA, rangeB), f"There is no overlap between rangeA:{rangeA} and rangeB:{rangeB}"
return [max(rangeA[0], rangeB[0]), min(rangeA[1], rangeB[1])]
def combine_float_overlaps(ranges, deci=5, margin=2):
"""
Combine overlaps with floating point numbers. Since neighboring integers are considered as continuous range,
we need to add margin to the starting range before merging then subtract margin from the result range.
Args:
ranges (list):
List containing ranges.
Example: [(10.2, 10.83), (10.42, 10.91), (10.45, 12.09)]
deci (int):
Number of rounding decimals
margin (int):
margin for determining overlap of the two ranges when ranges are converted to integer ranges.
Default is margin=2 which follows the python index convention.
Examples:
If margin is 0:
[(1, 10), (10, 20)] -> [(1, 20)]
[(1, 10), (11, 20)] -> [(1, 20)]
If margin is 1:
[(1, 10), (10, 20)] -> [(1, 20)]
[(1, 10), (11, 20)] -> [(1, 10), (11, 20)]
If margin is 2:
[(1, 10), (10, 20)] -> [(1, 10), (10, 20)]
[(1, 10), (11, 20)] -> [(1, 10), (11, 20)]
Returns:
merged_list (list):
List containing the combined ranges.
Example: [(10.2, 12.09)]
"""
ranges_int = []
for x in ranges:
stt, end = fl2int(x[0], deci) + margin, fl2int(x[1], deci)
if stt == end:
logging.warning(f"The range {stt}:{end} is too short to be combined thus skipped.")
else:
ranges_int.append([stt, end])
merged_ranges = combine_int_overlaps(ranges_int)
merged_ranges = [[int2fl(x[0] - margin, deci), int2fl(x[1], deci)] for x in merged_ranges]
return merged_ranges
def combine_int_overlaps(ranges):
"""
Merge the range pairs if there is overlap exists between the given ranges.
This algorithm needs a sorted range list in terms of the start time.
Note that neighboring numbers lead to a merged range.
Example:
[(1, 10), (11, 20)] -> [(1, 20)]
Refer to the original code at https://stackoverflow.com/a/59378428
Args:
ranges(list):
List containing ranges.
Example: [(102, 103), (104, 109), (107, 120)]
Returns:
merged_list (list):
List containing the combined ranges.
Example: [(102, 120)]
"""
ranges = sorted(ranges, key=lambda x: x[0])
merged_list = reduce(
lambda x, element: x[:-1:] + [(min(*x[-1], *element), max(*x[-1], *element))]
if x[-1][1] >= element[0] - 1
else x + [element],
ranges[1::],
ranges[0:1],
)
return merged_list
def fl2int(x, deci=3):
"""
Convert floating point number to integer.
"""
return int(round(x * pow(10, deci)))
def int2fl(x, deci=3):
"""
Convert integer to floating point number.
"""
return round(float(x / pow(10, deci)), int(deci))
def getMergedRanges(label_list_A: List, label_list_B: List, deci: int = 3) -> List:
"""
Calculate the merged ranges between label_list_A and label_list_B.
Args:
label_list_A (list):
List containing ranges (start and end values)
label_list_B (list):
List containing ranges (start and end values)
Returns:
(list):
List containing the merged ranges
"""
if label_list_A == [] and label_list_B != []:
return label_list_B
elif label_list_A != [] and label_list_B == []:
return label_list_A
else:
label_list_A = [[fl2int(x[0] + 1, deci), fl2int(x[1], deci)] for x in label_list_A]
label_list_B = [[fl2int(x[0] + 1, deci), fl2int(x[1], deci)] for x in label_list_B]
combined = combine_int_overlaps(label_list_A + label_list_B)
return [[int2fl(x[0] - 1, deci), int2fl(x[1], deci)] for x in combined]
def getMinMaxOfRangeList(ranges):
"""
Get the min and max of a given range list.
"""
_max = max([x[1] for x in ranges])
_min = min([x[0] for x in ranges])
return _min, _max
def getSubRangeList(target_range, source_range_list) -> List:
"""
Get the ranges that has overlaps with the target range from the source_range_list.
Example:
source range:
|===--======---=====---====--|
target range:
|--------================----|
out_range:
|--------===---=====---==----|
Args:
target_range (list):
A range (a start and end value pair) that defines the target range we want to select.
target_range = [(start, end)]
source_range_list (list):
List containing the subranges that need to be selected.
source_ragne = [(start0, end0), (start1, end1), ...]
Returns:
out_range (list):
List containing the overlap between target_range and
source_range_list.
"""
if target_range == []:
return []
else:
out_range = []
for s_range in source_range_list:
if isOverlap(s_range, target_range):
ovl_range = getOverlapRange(s_range, target_range)
out_range.append(ovl_range)
return out_range
def write_rttm2manifest(AUDIO_RTTM_MAP: str, manifest_file: str, include_uniq_id: bool = False, deci: int = 5) -> str:
"""
Write manifest file based on rttm files (or vad table out files). This manifest file would be used by
speaker diarizer to compute embeddings and cluster them. This function takes care of overlapping VAD timestamps
and trimmed with the given offset and duration value.
Args:
AUDIO_RTTM_MAP (dict):
Dictionary containing keys to uniqnames, that contains audio filepath and rttm_filepath as its contents,
these are used to extract oracle vad timestamps.
manifest (str):
The path to the output manifest file.
Returns:
manifest (str):
The path to the output manifest file.
"""
with open(manifest_file, 'w') as outfile:
for uniq_id in AUDIO_RTTM_MAP:
rttm_file_path = AUDIO_RTTM_MAP[uniq_id]['rttm_filepath']
rttm_lines = read_rttm_lines(rttm_file_path)
offset, duration = get_offset_and_duration(AUDIO_RTTM_MAP, uniq_id, deci)
vad_start_end_list_raw = []
for line in rttm_lines:
start, dur = get_vad_out_from_rttm_line(line)
vad_start_end_list_raw.append([start, start + dur])
vad_start_end_list = combine_float_overlaps(vad_start_end_list_raw, deci)
if len(vad_start_end_list) == 0:
logging.warning(f"File ID: {uniq_id}: The VAD label is not containing any speech segments.")
elif duration == 0:
logging.warning(f"File ID: {uniq_id}: The audio file has zero duration.")
else:
min_vad, max_vad = getMinMaxOfRangeList(vad_start_end_list)
if max_vad > round(offset + duration, deci) or min_vad < offset:
logging.warning("RTTM label has been truncated since start is greater than duration of audio file")
overlap_range_list = getSubRangeList(
source_range_list=vad_start_end_list, target_range=[offset, offset + duration]
)
write_overlap_segments(outfile, AUDIO_RTTM_MAP, uniq_id, overlap_range_list, include_uniq_id, deci)
return manifest_file
def segments_manifest_to_subsegments_manifest(
segments_manifest_file: str,
subsegments_manifest_file: str = None,
window: float = 1.5,
shift: float = 0.75,
min_subsegment_duration: float = 0.05,
include_uniq_id: bool = False,
):
"""
Generate subsegments manifest from segments manifest file
Args:
segments_manifest file (str): path to segments manifest file, typically from VAD output
subsegments_manifest_file (str): path to output subsegments manifest file (default (None) : writes to current working directory)
window (float): window length for segments to subsegments length
shift (float): hop length for subsegments shift
min_subsegments_duration (float): exclude subsegments smaller than this duration value
Returns:
returns path to subsegment manifest file
"""
if subsegments_manifest_file is None:
pwd = os.getcwd()
subsegments_manifest_file = os.path.join(pwd, 'subsegments.json')
with open(segments_manifest_file, 'r') as segments_manifest, open(
subsegments_manifest_file, 'w'
) as subsegments_manifest:
segments = segments_manifest.readlines()
for segment in segments:
segment = segment.strip()
dic = json.loads(segment)
audio, offset, duration, label = dic['audio_filepath'], dic['offset'], dic['duration'], dic['label']
subsegments = get_subsegments(offset=offset, window=window, shift=shift, duration=duration)
if include_uniq_id and 'uniq_id' in dic:
uniq_id = dic['uniq_id']
else:
uniq_id = None
for subsegment in subsegments:
start, dur = subsegment
if dur > min_subsegment_duration:
meta = {
"audio_filepath": audio,
"offset": start,
"duration": dur,
"label": label,
"uniq_id": uniq_id,
}
json.dump(meta, subsegments_manifest)
subsegments_manifest.write("\n")
return subsegments_manifest_file
def get_subsegments(offset: float, window: float, shift: float, duration: float):
"""
Return subsegments from a segment of audio file
Args:
offset (float): start time of audio segment
window (float): window length for segments to subsegments length
shift (float): hop length for subsegments shift
duration (float): duration of segment
Returns:
subsegments (List[tuple[float, float]]): subsegments generated for the segments as list of tuple of start and duration of each subsegment
"""
subsegments = []
start = offset
slice_end = start + duration
base = math.ceil((duration - window) / shift)
slices = 1 if base < 0 else base + 1
for slice_id in range(slices):
end = start + window
if end > slice_end:
end = slice_end
subsegments.append((start, end - start))
start = offset + (slice_id + 1) * shift
return subsegments
def embedding_normalize(embs, use_std=False, eps=1e-10):
"""
Mean and l2 length normalize the input speaker embeddings
Args:
embs: embeddings of shape (Batch,emb_size)
Returns:
embs: normalized embeddings of shape (Batch,emb_size)
"""
embs = embs - embs.mean(axis=0)
if use_std:
embs = embs / (embs.std(axis=0) + eps)
embs_l2_norm = np.expand_dims(np.linalg.norm(embs, ord=2, axis=-1), axis=1)
embs = embs / embs_l2_norm
return embs
|
gkucsko/NeMo
|
nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import torch
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.common.data import ConcatDataset
from nemo.collections.common.metrics import MetricStringToTorchMetric
from nemo.collections.common.metrics.classification_accuracy import ExactStringPerCategoryMatchMetric
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.utils import AppState, logging
try:
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator, get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ['MegatronT5FinetuneModel']
class MegatronT5FinetuneModel(MegatronT5Model):
"""Finetune Model that Inherits from MegatronT5Model instead."""
def __init__(self, cfg: DictConfig, trainer: Trainer):
super().__init__(cfg, trainer=trainer)
self.val_metric, self.val_metric_name = self.setup_metric(self.cfg.data.validation_ds)
self.val_metric = torch.nn.ModuleList(self.val_metric)
if hasattr(self.cfg.data, "test_ds"):
self.test_metric, self.test_metric_name = self.setup_metric(self.cfg.data.test_ds)
self.test_metric = torch.nn.ModuleList(self.test_metric)
def setup_metric(self, data_cfg):
# XNLI is a special case.
metric_name = "exact_string_match"
if hasattr(self.cfg, "eval_languages"):
metric = [ExactStringPerCategoryMatchMetric(self.cfg.eval_languages)]
else:
if not hasattr(data_cfg, "metric"):
metric = MetricStringToTorchMetric["exact_string_match"]
else:
if not hasattr(data_cfg.metric, "name"):
raise ValueError("Metric name is not provided in the metric config.")
if data_cfg.metric.name not in MetricStringToTorchMetric:
raise KeyError(
f"{data_cfg.metric.name} is not supported. List of supported metrics: {MetricStringToTorchMetric.keys()}"
)
metric_name = data_cfg.metric.name
metric = MetricStringToTorchMetric[metric_name]
# GLUE will not have a "src_file_name" attribute and will always have only a single metric.
if hasattr(data_cfg, "src_file_name"):
if isinstance(data_cfg.src_file_name, ListConfig):
# We pass average and num_classes to the metric constructor via kwargs even if they don't exist for each metric.
metric = [
metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)
for _ in range(len(self.cfg.data.test_ds.src_file_name))
]
else:
metric = [metric(average=data_cfg.metric.average, num_classes=data_cfg.metric.num_classes)]
else:
metric = [metric()] # GLUE does need to specify average or num_classes.
return metric, metric_name
def setup(self, stage=None):
# This is just to keep the parent class happy since we override its setup() method.
self.init_consumed_samples = 0
self.init_global_step = 0
if stage == 'predict':
return
# NOTE: PTL uses the same stage string "test" for both testing and validation.
self.build_train_valid_test_datasets(stage=stage)
if hasattr(self, '_validation_ds'):
self.setup_validation_data()
if hasattr(self, '_test_ds'):
self.setup_test_data()
if hasattr(self, '_train_ds'):
self.setup_training_data()
def _process_global_batch(self, global_batch):
"""Process a list of microbatches into a global batch."""
# If there is no language information in the global batch (ex: English MNLI), we can use the parent global batch processor as is.
if 'lang' not in global_batch[0]:
return self._process_global_batch_without_megatron_batch_sampler(global_batch)
# For validation data (XNLI), we need to process the global batch and and then deal with language info separately.
else:
assert all(['lang' in micro_batch for micro_batch in global_batch])
langs_list = []
processed_global_batch = self._process_global_batch_without_megatron_batch_sampler(
[{k: v for k, v in micro_batch.items() if k != 'lang'} for micro_batch in global_batch]
)
for micro_batch in global_batch:
langs_list.extend(micro_batch['lang'])
processed_global_batch['lang'] = langs_list
return processed_global_batch
def on_validation_epoch_start(self):
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_start()
def on_validation_epoch_end(self):
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# When running `trainer.validate()`, the training dataset is not available.
else:
logging.warning('No training data found, reconfiguring microbatches based on validation batch sizes.')
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self.cfg.data.validation_ds.global_batch_size,
micro_batch_size=self.cfg.data.validation_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().on_validation_epoch_end()
def training_step(self, batch, batch_idx):
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.train_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
batch = self._process_global_batch(batch)
return super().training_step(batch, batch_idx)
def cast_for_metric(self, pred, label, metric_name):
if metric_name == 'exact_string_match':
return pred, label
pred = pred.replace(' ', '')
label = label.replace(' ', '')
# Correlation metrics require casting to float.
if metric_name in ['pearson_corr_coef', 'spearman_corr_coef']:
# Text-to-text model predictions may not always be valid floating point numbers.
try:
pred = float(pred)
except ValueError:
pred = 0.0
try:
label = float(label)
except ValueError:
raise ValueError(f'Could not convert {label} to float.')
pred = torch.FloatTensor([pred]).to(self.device)
label = torch.FloatTensor([label]).to(self.device)
# Other metrics require casting to integers.
elif metric_name in ['accuracy', 'auc', 'auroc', 'average_precision', 'f1']:
# Text-to-text model predictions may not always be valid integers.
try:
pred = int(pred)
except ValueError:
pred = 0
try:
label = int(label)
except ValueError:
raise ValueError(f'Could not convert {label} to int.')
pred = torch.LongTensor([pred]).to(self.device)
label = torch.LongTensor([label]).to(self.device)
else:
raise ValueError(f'Metric {metric_name} not supported.')
return pred, label
def inference_step(self, batch, batch_idx, mode, dataloader_idx=0):
batch_has_lang_information = len(batch[0]) == 7
micro_batch_size = batch[0]['text_enc'].size(0)
# This should happen only on the last batch of the dataset.
if micro_batch_size != self.cfg.data.validation_ds.micro_batch_size:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size
* parallel_state.get_data_parallel_world_size()
* get_num_microbatches(),
micro_batch_size=micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# At this point processed_batch is a list of dictionaries where eatch dict is a microbatch.
# After the process_global_batch call, processed_batch will be a single dictionary containing the global batch.
# This is required since the parent class expects a single global batch dictioanry.
processed_batch = self._process_global_batch(batch)
# Call parent validation step to get the loss.
# NOTE: There could be extra keys in the processed_batch dictionary such as "langs" for XNLI, this will be ignored in the parent class.
loss = super().validation_step(processed_batch, batch_idx)
predicted_token_ids, _ = self.decode(
tokens_enc=processed_batch['text_enc'], enc_mask=processed_batch['enc_mask'], num_tokens_to_generate=30
)
# Special ids to text function to handle stripping <eos> and special tokens with sentencepiece tokenizers.
preds_text = self.ids_to_text(predicted_token_ids)
labels_text = self.ids_to_text(processed_batch['labels'])
input_text = self.ids_to_text(processed_batch['text_enc'])
if not batch_has_lang_information:
categories = [None] * len(preds_text)
else:
categories = processed_batch['lang']
metric = self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
assert len(categories) == len(preds_text) == len(labels_text)
for _, (pred, label, category) in enumerate(zip(preds_text, labels_text, categories)):
# To compute metrics like pearson or spearman correlation, we need to cast the predicted string and labels to floats.
pred, label = self.cast_for_metric(
pred, label, self.val_metric_name if mode == 'validation' else self.test_metric_name
)
if batch_has_lang_information:
_ = metric(pred, label, category)
else:
_ = metric(pred, label)
return {
'loss': loss,
'preds': preds_text,
'labels': labels_text,
'categories': categories,
'inputs': input_text,
}
def ids_to_text(self, batch_ids):
batch_ids = batch_ids.cpu().numpy().tolist()
texts = []
for ids in batch_ids:
if self.tokenizer.eos_id in ids:
idx = ids.index(self.tokenizer.eos_id)
ids = ids[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(self.tokenizer, 'special_token_to_id'):
ids = [id for id in ids if id not in self.tokenizer.special_token_to_id.values()]
text = self.tokenizer.ids_to_text(ids)
texts.append(text)
return texts
def _determine_log_key(self, data_config, dataloader_idx, metric_name, mode):
# Function that determines whether to log based on the user provided name of the dataset or the dataloader index.
base_key = f"{mode}_{metric_name}_" if metric_name is not None else f"{mode}_"
# If the user provided names for each validation/test dataset, use those.
if hasattr(data_config, "names") and data_config.names is not None:
# With only a single validation/test dataset, the name is not a list.
if not isinstance(data_config.names, ListConfig):
name = data_config.names
else:
name = data_config.names[dataloader_idx]
return base_key + name
else:
return base_key + f"dataloader{dataloader_idx}"
def inference_epoch_end(self, outputs, mode, data_cfg):
# Parent class will handle logging of the loss.
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
averaged_loss = []
averaged_metric = []
metric_name = self.val_metric_name if mode == 'validation' else self.test_metric_name
# Log metrics for each provided validation/test dataset.
for dataloader_idx, output in enumerate(outputs):
loss = super().validation_epoch_end([x['loss'] for x in output])
# Determine the key used to log the loss based on the user provided name of the dataset or the dataloader index.
loss_log_key = self._determine_log_key(data_cfg, dataloader_idx, "loss", mode)
# Determine the key used to log the eval metric based on the user provided name of the dataset or the dataloader index.
metric_log_key = self._determine_log_key(data_cfg, dataloader_idx, metric_name, mode)
self.log(loss_log_key, loss)
metric_object = (
self.val_metric[dataloader_idx] if mode == 'validation' else self.test_metric[dataloader_idx]
)
metric = metric_object.compute()
# Handle logging of GLUE/XNLI separately here. XNLI has a separate metric per language.
if isinstance(metric, dict):
# GLUE case:
if len(metric) == 1 and 'acc' in metric:
metric = metric['acc']
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
# XNLI case where the metric dictionary contains the language and the computed metric as values.
else:
for k, v in metric.items():
if k != 'acc' and 'total' not in k:
self.log(metric_log_key + f'_{k}', v)
logging.info(f"{mode} {metric_name} lang {k} : {v}")
metric = metric['acc']
else:
self.log(metric_log_key, metric)
logging.info(f"{mode} {metric_name}: {metric}")
metric_object.reset()
averaged_loss.append(loss)
averaged_metric.append(metric)
# Write predictions, labels, and inputs to a file for each validation/test dataset.
if data_cfg.get("write_predictions_to_file", False):
# Check if the user provided a prefix path to the file(s) they want to write.
if not hasattr(data_cfg, "output_file_path_prefix") or data_cfg.output_file_path_prefix is None:
raise ValueError(
f"Cannot write predictions to file when output_file_path_prefix is not set or present in the yaml config file."
)
# Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks.
gathered_outputs = [None for _ in range(self.world_size)]
torch.distributed.all_gather_object(
gathered_outputs,
[
{
'preds': x['preds'],
'labels': x['labels'],
'categories': x['categories'],
'inputs': x['inputs'],
}
for x in output
],
)
# Figure out what the suffix of the file should be.
filename_log_key = self._determine_log_key(data_cfg, dataloader_idx, None, mode)
# Keep a set of ground truths and inputs to write deduplicated predictions. Distributed Sampler may duplicate examples.
gt_inp_set = set()
deduplicated_outputs = {
'preds': [],
'labels': [],
'categories': [],
'inputs': [],
}
# PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0.
if self.global_rank == 0:
for rank in range(0, self.world_size):
for batch in gathered_outputs[rank]:
for pred, label, input, category in zip(
batch['preds'], batch['labels'], batch['inputs'], batch['categories']
):
if input + label not in gt_inp_set:
gt_inp_set.add(input + label)
deduplicated_outputs['preds'].append(pred)
deduplicated_outputs['labels'].append(label)
deduplicated_outputs['categories'].append(category)
deduplicated_outputs['inputs'].append(input)
self.write_predictions_to_file(
deduplicated_outputs, f"{data_cfg.output_file_path_prefix}_{filename_log_key}"
)
torch.distributed.barrier()
# Logging of the averaged metrics:
averaged_loss = sum(averaged_loss) / len(averaged_loss)
averaged_metric = sum(averaged_metric) / len(averaged_metric)
# Handle case where metrics can be nan or inf. This can break checkpoint save/load.
if torch.isinf(averaged_metric) or torch.isnan(averaged_metric):
app_state = AppState()
monitor_mode = app_state.checkpoint_callback_params.mode
assert monitor_mode in ['min', 'max']
averaged_metric = 0.0 if monitor_mode == 'max' else 1e5
if mode == 'validation':
self.log("validation_loss", averaged_loss)
self.log(f"validation_{self.val_metric_name}", averaged_metric)
elif mode == 'test':
self.log("test_loss", averaged_loss)
self.log(f"test_{self.test_metric_name}", averaged_metric)
return averaged_loss, averaged_metric
def write_predictions_to_file(self, outputs, output_file_path_prefix):
with open(output_file_path_prefix + "_inputs_preds_labels.json", "w") as f_json:
json_output = {
"inputs": outputs["inputs"],
"preds": outputs["preds"],
"labels": outputs["labels"],
}
json.dump(json_output, f_json)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'validation', dataloader_idx)
def validation_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'validation', self.cfg.data.validation_ds)
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.inference_step(batch, batch_idx, 'test', dataloader_idx)
def test_epoch_end(self, outputs):
_ = self.inference_epoch_end(outputs, 'test', self.cfg.data.test_ds)
def build_data_loader(
self,
dataset,
micro_batch_size,
global_batch_size,
shuffle,
num_workers,
pin_memory,
drop_last,
check_validation_interval,
):
"""Buld dataloader given an input dataset."""
if dataset is None:
return None
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=world_size, rank=rank, shuffle=shuffle
)
# This check makes sure the val_check_interval is less than the number of global batches.
# Normally, PTL would do this check and properly account for gradient accumulation.
# But now, it is implicit in the apex fwd/bwd functions and so we need to check for this somewhere.
# The consequence of not doing this is that training loop will never run validation.
# NOTE: Prog bar is also broken as a result of this.
global_batch_size_per_gpu = micro_batch_size * get_num_microbatches()
if (
self.trainer.val_check_interval > (sampler.num_samples // global_batch_size_per_gpu)
and check_validation_interval
):
raise ValueError(
f"trainer.val_check_interval {self.trainer.val_check_interval} is > number of global batches {sampler.num_samples // global_batch_size}"
)
# Data loader. Note that batch size is the per GPU batch size.
return torch.utils.data.DataLoader(
dataset,
collate_fn=dataset.collate_fn,
sampler=sampler,
batch_size=micro_batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
)
def setup_training_data(self):
self._train_dl = self.build_data_loader(
self._train_ds,
micro_batch_size=self.cfg.data.train_ds.micro_batch_size,
global_batch_size=self.cfg.data.train_ds.global_batch_size,
shuffle=self.cfg.data.train_ds.shuffle,
num_workers=self.cfg.data.train_ds.num_workers,
pin_memory=self.cfg.data.train_ds.pin_memory,
drop_last=self.cfg.data.train_ds.drop_last,
check_validation_interval=True,
)
def setup_eval_data(self, datasets, data_cfg):
dataloaders = []
for dataset in datasets:
eval_dl = self.build_data_loader(
dataset,
micro_batch_size=data_cfg.micro_batch_size,
global_batch_size=data_cfg.global_batch_size,
shuffle=data_cfg.shuffle,
num_workers=data_cfg.num_workers,
pin_memory=data_cfg.pin_memory,
drop_last=data_cfg.drop_last,
check_validation_interval=False,
)
dataloaders.append(eval_dl)
return dataloaders
def setup_validation_data(self):
self._validation_dl = self.setup_eval_data(self._validation_ds, self.cfg.data.validation_ds)
def setup_test_data(self):
self._test_dl = self.setup_eval_data(self._test_ds, self.cfg.data.test_ds)
def _build_train_dataset(self, data_cfg):
"""Build the training dataset."""
if (
data_cfg.drop_last is False
and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size()
):
raise ValueError(
f"Cannot use drop_last=False in your training data with gradient accumulation found grad acc of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} with global_batch_size {data_cfg.global_batch_size}, micro_batch_size {data_cfg.micro_batch_size}, data parallel size {parallel_state.get_data_parallel_world_size()}"
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
sampling_technique=data_cfg.get('concat_sampling_technique', 'temperature'),
sampling_temperature=data_cfg.get('concat_sampling_temperature', 5),
sampling_probabilities=data_cfg.get(
'concat_sampling_probabilities', [1 / len(datasets)] * len(datasets)
),
global_rank=parallel_state.get_data_parallel_rank(),
world_size=parallel_state.get_data_parallel_world_size(),
)
return dataset
else:
return datasets[0]
def _build_eval_dataset(self, data_cfg):
"""Build the evaluation dataset."""
if data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size():
raise ValueError(
f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.'
)
datasets = []
# Determine if we are using a single dataset or a list of datasets.
is_src_list_config = isinstance(data_cfg.src_file_name, ListConfig)
is_tgt_list_config = isinstance(data_cfg.tgt_file_name, ListConfig)
is_names_list_config = False
if hasattr(data_cfg, "names"):
if isinstance(data_cfg.names, ListConfig):
is_names_list_config = True
if (is_src_list_config and not is_tgt_list_config) or (is_tgt_list_config and not is_src_list_config):
raise ValueError("src_list and tgt_list must both be either a ListConfig or a string. ")
if is_src_list_config:
if len(data_cfg.src_file_name) != len(data_cfg.tgt_file_name):
raise ValueError("src_file_name and tgt_file_name must have the same number of elements. ")
if is_names_list_config and len(data_cfg.names) != len(data_cfg.src_file_name):
raise ValueError(
"If you are providing names for each src/tgt file, they must have the same number of elements."
)
else:
data_cfg.src_file_name = [data_cfg.src_file_name]
data_cfg.tgt_file_name = [data_cfg.tgt_file_name]
for src, tgt in zip(data_cfg.src_file_name, data_cfg.tgt_file_name):
dataset = SequenceToSequenceDataset(
src_file_name=src,
tgt_file_name=tgt,
src_tokenizer=self.tokenizer,
tgt_tokenizer=self.tokenizer,
max_src_seq_length=data_cfg.max_src_seq_length,
max_tgt_seq_length=data_cfg.max_tgt_seq_length,
)
datasets.append(dataset)
return datasets
def build_train_valid_test_datasets(self, stage):
logging.info('Building datasets ...')
if stage != 'test':
self._validation_ds = self._build_eval_dataset(self.cfg.data.validation_ds)
if stage != 'validation':
if hasattr(self.cfg.data, 'test_ds'):
self._test_ds = self._build_eval_dataset(self.cfg.data.test_ds)
if stage == 'validation' or stage == 'test':
return
self._train_ds = self._build_train_dataset(self.cfg.data.train_ds)
logging.info(f'Finished building datasets ...')
def on_train_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop._data_fetcher = GlobalBatchDataFetcher()
def on_validation_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
self.trainer.fit_loop.epoch_loop.val_loop._data_fetcher = GlobalBatchDataFetcher()
self.trainer.validate_loop._data_fetcher = GlobalBatchDataFetcher()
def on_test_start(self) -> None:
self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher()
|
spyysalo/prodigy-wiki-linking
|
builddb.py
|
#!/usr/bin/env python3
import sys
from os.path import join
from collections import defaultdict
from argparse import ArgumentParser
from tqdm import tqdm
from sqlitedict import SqliteDict
def load_wd_aliases(fn):
"""
Load mapping from Wikidata QIDs to Wikidata aliases.
"""
total = 0
aliases_by_qid = defaultdict(list)
with open(fn) as f:
next(f) # skip header
for ln, l in enumerate(f, start=2):
l = l.rstrip('\n')
qid, alias = l.split('|', 1)
aliases_by_qid[qid].append(alias)
total += 1
print(f'loaded {total} aliases for {len(aliases_by_qid)} IDs from {fn}',
file=sys.stderr)
return aliases_by_qid
def load_title_by_qid(fn):
"""
Load mapping from Wikipedia titles to Wikidata QIDs.
"""
title_by_qid = {}
with open(fn) as f:
next(f) # skip header
for ln, l in enumerate(f, start=2):
l = l.rstrip('\n')
title, qid = l.split('|')
assert qid not in title_by_qid, f'dup in {fn}: {qid}'
title_by_qid[qid] = title
print(f'loaded {len(title_by_qid)} titles from {fn}',
file=sys.stderr)
return title_by_qid
def load_descriptions(fn):
desc_by_qid = {}
with open(fn) as f:
next(f) # skip header
for ln, l in enumerate(f, start=2):
l = l.rstrip('\n')
qid, description = l.split('|', 1)
assert qid not in desc_by_qid, f'dup in {fn}: {qid}'
desc_by_qid[qid] = description
print(f'loaded {len(desc_by_qid)} descriptions from {fn}',
file=sys.stderr)
return desc_by_qid
def filter_title(title):
excluded_prefixes = [
'Käyttäjä:',
'Toiminnot:',
'Metasivu:',
'Luokka:',
':Luokka:'
]
if any(title.startswith(p) for p in excluded_prefixes):
return True
return False
def load_counts(fn):
filtered, total = 0, 0
counts = defaultdict(lambda: defaultdict(int))
with open(fn) as f:
next(f) # skip header
for ln, l in enumerate(f, start=2):
l = l.rstrip('\n')
alias, count, title = l.split('|')
count = int(count)
if filter_title(title):
filtered += 1
else:
counts[alias][title] += count
total += 1
print(f'filtered {filtered}/{total} titles from {fn}', file=sys.stderr)
print(f'loaded counts for {len(counts)} strings from {fn}', file=sys.stderr)
return counts
def argparser():
ap = ArgumentParser()
ap.add_argument('indir', help='directory with wiki csv data')
ap.add_argument('dbname', help='database name')
return ap
def main(argv):
args = argparser().parse_args(argv[1:])
aliases = load_wd_aliases(join(args.indir, 'entity_alias.csv'))
descs = load_descriptions(join(args.indir, 'entity_descriptions.csv'))
title_by_qid = load_title_by_qid(join(args.indir, 'entity_defs.csv'))
counts = load_counts(join(args.indir, 'prior_prob.csv'))
qid_by_title = { v: k for k, v in title_by_qid.items() }
# make sure each WD alias is included
for qid, aliases in aliases.items():
if qid not in title_by_qid:
continue # unmappable
for alias in aliases:
counts[alias][title_by_qid[qid]] += 0
with SqliteDict(args.dbname) as db:
for string, title_count in tqdm(counts.items()):
data = {}
for title, count in title_count.items():
qid = qid_by_title.get(title)
data[title] = {
'count': count,
'qid': qid,
'description': descs.get(qid),
}
db[string] = data
print('committing ...')
db.commit()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
spyysalo/prodigy-wiki-linking
|
el_kb.py
|
<reponame>spyysalo/prodigy-wiki-linking
import sys
import os
import re
from collections import defaultdict
from logging import warning
from sqlitedict import SqliteDict
from standoff import ann_stream
def load_lemma_data(fn):
lemma_data = defaultdict(list)
with open(fn) as f:
for ln, l in enumerate(f, start=1):
l = l.rstrip('\n')
if l.startswith('#'):
continue # skip comments
count, form, lemma, pos = l.split('\t')
# assume '#' is used to join compound words and remove it
# when it appears between two alphabetic characters
lemma = re.sub(r'(?<=[^\W\d])#(?=[^\W\d])', r'', lemma)
lemma_data[form].append((lemma, pos, count))
return lemma_data
def unique(sequence):
"""Return unique items in sequence, preserving order."""
# https://www.peterbe.com/plog/fastest-way-to-uniquify-a-list-in-python-3.6
return list(dict.fromkeys(sequence))
class KnowledgeBase:
def __init__(self, lemmafn):
self.lemma_data = load_lemma_data(lemmafn)
def lemmatize_last(self, words):
for lemma, pos, count in self.lemma_data.get(words[-1], []):
yield words[:-1] + [lemma]
def variants(self, string):
words = string.split(' ') # assume space-separated
for lemmatized in self.lemmatize_last(words):
yield ' '.join(lemmatized)
yield string
def candidates(self, string):
matches = []
for s in unique(self.variants(string)):
matches.extend(self.exact_match_candidates(s))
matches.sort(key=lambda c: c[0], reverse=True) # descending by count
seen, uniq = set(), []
for count, qid, title, desc in matches:
if title not in seen:
uniq.append((count, qid, title, desc))
seen.add(title)
return uniq
def exact_match_candidates(self, string):
raise NotImplementedError
class SqliteKnowledgeBase(KnowledgeBase):
def __init__(self, dbfn, lemmafn):
super().__init__(lemmafn)
self.db = SqliteDict(dbfn, flag='r')
def exact_match_candidates(self, string):
if string not in self.db:
return []
result = []
for title, data in self.db[string].items():
qid, desc = data['qid'], data['description']
result.append((data['count'], qid, title, desc))
result.sort(key=lambda c: c[0], reverse=True) # highest count first
return result
def main(argv):
kb = SqliteKnowledgeBase('fiwiki.sqlite', 'data/fi-lemmas.tsv')
stream = ann_stream('data/ann')
for sent, span in stream:
candidates = kb.candidates(span.text)
for c in candidates:
print(span.text, c)
if not candidates:
print('MISSED:', span.text)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
spyysalo/prodigy-wiki-linking
|
el_recipe.py
|
<gh_stars>0
import datetime
import hashlib
import prodigy
from pathlib import Path
from logging import warning
from prodigy.components.filters import filter_duplicates
from standoff import ann_stream
from el_kb import SqliteKnowledgeBase
def iso8601_now():
"""Return current time in ISO 8601 format w/o microseconds."""
return datetime.datetime.now().replace(microsecond=0).isoformat(' ')
def _hash(string):
return int(hashlib.sha1(string.encode('utf-8')).hexdigest(), 16) % 2**31
def make_prodigy_example(docid, text, span):
# Construct a dictionary with the structure that prodigy expects
example = {
'text': text,
'meta': { 'score': 1.0 } # TODO add ID
}
example['_input_hash'] = _hash(docid)
span_dict = {
'start': span.start,
'end': span.end,
'text': span.text,
'label': span.type,
'source': 'manual',
'rank': 0,
'score': 1.0,
'input_hash': example['_input_hash'],
}
example['spans'] = [span_dict]
# add _input_hash and _task_hash values required by prodigy
example['_task_hash'] = _hash(f'{docid} {span.id} {span.type} {span.text}')
return example
def format_option(count, qid, title, description):
wp_prefix = 'https://fi.wikipedia.org/wiki/'
wd_prefix = 'https://www.wikidata.org/wiki/'
return ''.join([
f'{title} ',
f'<a href="{wp_prefix}{title}" target="_blank">[WP]</a> ',
f'<a href="{wd_prefix}{qid}" target="_blank">[WD]</a> ' if qid else '',
f'({count})',
f': {description}' if description else ''
])
def add_options(stream, kb):
for task in stream:
for span in task['spans']:
options = []
for candidate in kb.candidates(span['text']):
count, qid, title, desc = candidate
options.append({
'id': title,
'html': format_option(*candidate),
})
if not options:
warning(f'no options for {span["text"]}, skipping...')
continue
options.append({ 'id': 'NIL_other', 'text': 'Not in options'})
options.append( {'id': 'NIL_ambiguous', 'text': 'Need more context'})
task['options'] = options
yield task
@prodigy.recipe(
'entity_linker.manual',
dataset=('The dataset to use', 'positional', None, str),
annotator=('Annotator name', 'positional', None, str),
directory=('The source data directory', 'positional', None, Path),
kbpath=('Path to the KB', 'positional', None, Path),
)
def entity_linker_manual(dataset, annotator, directory, kbpath):
kb = SqliteKnowledgeBase(kbpath, 'data/fi-lemmas.tsv')
stream = ann_stream(directory)
stream = (make_prodigy_example(*e) for e in stream)
stream = add_options(stream, kb)
stream = filter_duplicates(stream, by_input=False, by_task=True)
def before_db(examples):
for e in examples:
if 'created' not in e:
e['created'] = iso8601_now()
if 'annotator' not in e:
e['annotator'] = annotator
return examples
return {
'dataset': dataset,
'stream': stream,
'view_id': 'choice',
'before_db': before_db,
}
if __name__ == '__main__':
el = entity_linker_manual('dummy-dataset', 'dummy-user', 'normtest', 'fiwiki-kb-filtered')
for e in el['stream']:
print(e)
|
spyysalo/prodigy-wiki-linking
|
standoff.py
|
import os
from glob import glob
from collections import namedtuple
from logging import warning
# Entity types to exlude by default from candidates
EXCLUDE_BY_DEFAULT = set([
'DATE',
'TIME',
'CARDINAL',
'ORDINAL',
'QUANTITY',
'PERCENT',
])
Textbound = namedtuple('Textbound', 'id type start end text')
def load_textbounds(txt_fn, ann_fn):
with open(txt_fn) as txt_f:
doc_text = txt_f.read()
textbounds = []
with open(ann_fn) as ann_f:
for ln, l in enumerate(ann_f, start=1):
l = l.rstrip('\n')
if not l.startswith('T'):
continue # skip all but textbound annotations
id_, type_span, text = l.split('\t')
type_, start, end = type_span.split(' ')
start, end = int(start), int(end)
assert doc_text[start:end] == text
textbounds.append(Textbound(id_, type_, start, end, text))
return doc_text, textbounds
def get_span_sentence(text, span):
# Return the sentence in which the given span occurs in the text.
# Assumes that sentences are separated by newlines.
offset = 0
for index, sentence in enumerate(text.split('\n'), start=1):
if offset+len(sentence) > span.start:
assert offset+len(sentence) >= span.end
# Create span with adjusted text
s, e = span.start - offset, span.end - offset
sent_span = Textbound(span.id, span.type, s, e, span.text)
assert sentence[sent_span.start:sent_span.end] == sent_span.text
return index, sentence, sent_span
offset += len(sentence) + 1
def ann_stream(directory, exclude=EXCLUDE_BY_DEFAULT):
if exclude is None:
exclude = set()
# List .txt and .ann files
txt_fns = glob(os.path.join(directory, '*.txt'))
ann_fns = glob(os.path.join(directory, '*.ann'))
# Grab unique for each without extension
txt_fns = set(os.path.splitext(n)[0] for n in txt_fns)
ann_fns = set(os.path.splitext(n)[0] for n in ann_fns)
if txt_fns - ann_fns:
warning(f'.txt files without .ann: {txt_fns-ann_fns}')
if ann_fns - txt_fns:
warning(f'.ann files without .txt: {ann_fns-txt_fns}')
for fn in sorted(txt_fns & ann_fns):
text, spans = load_textbounds(f'{fn}.txt', f'{fn}.ann')
spans = [s for s in spans if s.type not in exclude]
for span in spans:
sentidx, sentence, sent_span = get_span_sentence(text, span)
yield f'{fn}.{sentidx}', sentence, sent_span
|
spyysalo/prodigy-wiki-linking
|
filter_kb.py
|
<filename>filter_kb.py
#!/usr/bin/env python3
import sys
import os
from collections import defaultdict
from argparse import ArgumentParser
from el_kb import load_wd_aliases, load_descriptions, load_title_qid_map
from el_kb import load_counts
def save_aliases(fn, aliases_by_qid):
with open(fn, 'w') as f:
print('WD_id|alias', file=f) # header
for qid, aliases in aliases_by_qid.items():
for alias in aliases:
print(f'{qid}|{alias}', file=f)
def save_descriptions(fn, desc_by_qid):
with open(fn, 'w') as f:
print('WD_id|description', file=f) # header
for qid, desc in desc_by_qid.items():
print(f'{qid}|{desc}', file=f)
def save_titles(fn, title_by_qid):
with open(fn, 'w') as f:
print('WP_title|WD_id', file=f) # header
for qid, title in title_by_qid.items():
print(f'{title}|{qid}', file=f)
def save_counts(fn, counts_by_alias, title_by_qid):
with open(fn, 'w') as f:
print('alias|count|entity', file=f) # header
for alias, counts in counts_by_alias.items():
for count, qid in counts:
title = title_by_qid[qid]
print(f'{alias}|{count}|{title}', file=f)
def argparser():
ap = ArgumentParser()
ap.add_argument('indir', help='KB directory')
ap.add_argument('outdir', help='output directory')
ap.add_argument('-m', '--min-count', type=int, default=2)
return ap
def main(argv):
args = argparser().parse_args(argv[1:])
aliases = load_wd_aliases(os.path.join(args.indir, 'entity_alias.csv'))
descriptions = load_descriptions(
os.path.join(args.indir, 'entity_descriptions.csv'))
titles = load_title_qid_map(os.path.join(args.indir, 'entity_defs.csv'))
counts = load_counts(os.path.join(args.indir, 'prior_prob.csv'), titles)
filtered_qids = set()
filtered_counts = defaultdict(list)
filtered, total = 0, 0
for alias, count_list in counts.items():
for count, qid in count_list:
if count >= args.min_count:
filtered_counts[alias].append((count, qid))
filtered_qids.add(qid)
else:
filtered += 1
total += 1
print(f'filtered {filtered}/{total} ({filtered/total:.1%}) counts',
file=sys.stderr)
filtered_titles = {}
filtered, total = 0, 0
for qid, title in titles.items():
if qid in filtered_qids:
filtered_titles[qid] = title
else:
filtered += 1
total += 1
print(f'filtered {filtered}/{total} ({filtered/total:.1%}) titles',
file=sys.stderr)
filtered_descs = {}
filtered, total = 0, 0
for qid, desc in descriptions.items():
if qid in filtered_qids:
filtered_descs[qid] = desc
else:
filtered += 1
total += 1
print(f'filtered {filtered}/{total} ({filtered/total:.1%}) descriptions',
file=sys.stderr)
filtered_aliases = defaultdict(list)
filtered, total = 0, 0
for qid, alias_list in aliases.items():
for alias in alias_list:
if qid in filtered_qids:
filtered_aliases[qid].append(alias)
else:
filtered += 1
total += 1
print(f'filtered {filtered}/{total} ({filtered/total:.1%}) aliases',
file=sys.stderr)
save_aliases(os.path.join(args.outdir, 'entity_alias.csv'),
filtered_aliases)
save_descriptions(os.path.join(args.outdir, 'entity_descriptions.csv'),
filtered_descs)
save_titles(os.path.join(args.outdir, 'entity_defs.csv'),
filtered_titles)
save_counts(os.path.join(args.outdir, 'prior_prob.csv'),
filtered_counts, titles)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
jhult/compose_format
|
setup.py
|
<filename>setup.py<gh_stars>10-100
from setuptools import setup
def readme():
with open('README.rst') as file:
return file.read()
setup(
name='compose_format',
version='1.1.0',
description='format docker-compose files',
long_description=readme(),
url='http://github.com/funkwerk/compose_format',
author='<NAME>',
license='MIT',
packages=['compose_format'],
install_requires=['ruamel.yaml'],
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Environment :: Console',
'Operating System :: OS Independent',
],
keywords='docker-compose format docker yml',
include_package_data=True,
scripts=['bin/compose_format'])
|
jhult/compose_format
|
compose_format/__init__.py
|
<filename>compose_format/__init__.py
from ruamel.yaml import RoundTripDumper, RoundTripLoader, dump, load
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.scalarstring import SingleQuotedScalarString
class ComposeFormat:
TOPLEVEL_ORDER = ['version', 'services', 'volumes', 'networks', 'secrets']
SERVICE_ORDER = [
'image', 'command', 'entrypoint', 'container_name',
'links', 'volumes_from', 'volumes', 'volume_driver', 'tmpfs',
'build',
'expose', 'ports',
'net', 'network_mode', 'networks',
'deploy',
'labels',
'devices',
'read_only',
'healthcheck',
'env_file', 'environment',
'secrets',
'cpu_shares', 'cpu_quota', 'cpuset', 'domainname', 'hostname', 'ipc',
'mac_address', 'mem_limit', 'memswap_limit', 'privileged', 'shm_size',
'depends_on', 'extends', 'external_links',
'stdin_open', 'user', 'working_dir',
'extra_hosts', 'restart', 'ulimits', 'tty', 'dns', 'dns_search', 'pid',
'security_opt', 'cap_add', 'cap_drop', 'cgroup_parent', 'logging', 'log_driver', 'log_opt',
'stopsignal', 'stop_signal', 'stop_grace_period',
'sysctls', 'userns_mode',
'autodestroy', 'autoredeploy',
'deployment_strategy', 'sequential_deployment', 'tags', 'target_num_containers',
'roles',
]
DEPLOY_ORDER = [
'placement', 'replicas', 'mode',
'update_config',
'resources',
'restart_policy',
'labels',
]
HEALTHCHECK_ORDER = [
'test',
'interval', 'timeout', 'retries',
'disable',
]
BUILD_ORDER = ['context', 'dockerfile', 'args', 'cache_from', 'labels', 'shm_size', 'target']
ORDERS = {
'version': TOPLEVEL_ORDER,
'services': TOPLEVEL_ORDER,
'image': SERVICE_ORDER,
'dockerfile': BUILD_ORDER,
'placement': DEPLOY_ORDER,
'replicas': DEPLOY_ORDER,
'test': HEALTHCHECK_ORDER,
}
NON_SORTABLE_ARRAYS = [
'entrypoint',
'command',
'test',
]
def __init__(self):
pass
def format(self, path, replace=False, strict=True):
with open(path, 'r') as file:
data = file.read()
original = data
formatted = self.format_string(data, replace=replace, strict=strict)
if replace:
with open(path, 'w') as file:
file.write(formatted)
else:
print(formatted)
return original == formatted
def format_string(self, data, replace=False, strict=True):
data = self.reorder(load(data, RoundTripLoader), strict=strict)
formatted = dump(data, Dumper=RoundTripDumper,
indent=2, block_seq_indent=2, width=120)
return formatted.strip() + '\n'
@staticmethod
def reorder(data, strict=True):
if type(data) is CommentedMap:
order = ComposeFormat.order_map(list(data.keys()))
keys = list(data.keys())
while ComposeFormat.sorted_by_order(keys, order, strict) != keys:
for a, b in zip(ComposeFormat.sorted_by_order(keys, order, strict), keys):
if a == b:
continue
data.move_to_end(b)
break
keys = list(data.keys())
for key, item in data.items():
if key in ComposeFormat.NON_SORTABLE_ARRAYS:
continue
ComposeFormat.reorder(item, strict)
return data
if type(data) is CommentedSeq:
for i, value in enumerate(data):
if type(value) is not CommentedMap:
data[i] = ComposeFormat.fix_sexadecimal_numbers(value)
data.sort()
return data
return data
@staticmethod
def fix_sexadecimal_numbers(value):
import re
SEXADECIMAL_NUMBER = '(?P<left>\d+):(?P<right>\d+)'
match = re.match(SEXADECIMAL_NUMBER, str(value))
if not match or int(match.group('left')) > 60 or int(match.group('right')) > 60:
return value
return SingleQuotedScalarString('{0}:{1}'.format(match.group('left'), match.group('right')))
@staticmethod
def order_map(keys):
for key in ComposeFormat.ORDERS.keys():
if key in keys:
return ComposeFormat.ORDERS[key]
return None
@staticmethod
def sorted_by_order(keys, order, strict):
if order is None:
return sorted(keys)
def order_function(key):
if strict:
assert key in order, 'key: {0} not known'.format(key)
if key in order:
return order.index(key)
return len(order) + ComposeFormat.name_to_order(key)
return sorted(keys, key=order_function)
@staticmethod
def name_to_order(value):
from functools import reduce
return reduce(lambda left, right: (left * 256 + right), (ord(char) for char in value))
|
JinJackson/Simcse-unsup
|
train_unsup.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import argparse
import logging
import os
from pathlib import Path
from datasets import load_dataset
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer
from SimCSE import SimCSE
from CSECollator import CSECollator
#python train_unsup.py --train_file ./data/news_title.txt --pretrained./model/bert-wwm/
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--train_file", type=str, help="train text file")
parser.add_argument("--pretrained", type=str, default="hfl/chinese-bert-wwm-ext", help="huggingface pretrained model")
parser.add_argument("--model_out", type=str, default="./model", help="model output path")
parser.add_argument("--num_proc", type=int, default=5, help="dataset process thread num")
parser.add_argument("--max_length", type=int, default=100, help="sentence max length")
parser.add_argument("--batch_size", type=int, default=64, help="batch size")
parser.add_argument("--epochs", type=int, default=2, help="epochs")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--tao", type=float, default=0.05, help="temperature")
parser.add_argument("--device", type=str, default="cuda", help="device")
parser.add_argument("--display_interval", type=int, default=50, help="display interval")
parser.add_argument("--save_interval", type=int, default=100, help="save interval")
parser.add_argument("--pool_type", type=str, default="cls", help="pool_type")
parser.add_argument("--dropout_rate", type=float, default=0.3, help="dropout_rate")
args = parser.parse_args()
return args
def load_data(args, tokenizer):
data_files = {"train": args.train_file}
ds = load_dataset("text", data_files=data_files)
ds_tokenized = ds.map(lambda example: tokenizer(example["text"]), num_proc=args.num_proc)
collator = CSECollator(tokenizer, max_len=args.max_length)
dl = DataLoader(ds_tokenized["train"],
batch_size=args.batch_size,
collate_fn=collator.collate)
return dl
def compute_loss(y_pred, tao=0.05, device="cuda"):
idxs = torch.arange(0, y_pred.shape[0], device=device)
y_true = idxs + 1 - idxs % 2 * 2
similarities = F.cosine_similarity(y_pred.unsqueeze(1), y_pred.unsqueeze(0), dim=2)
similarities = similarities - torch.eye(y_pred.shape[0], device=device) * 1e12
similarities = similarities / tao
loss = F.cross_entropy(similarities, y_true)
return torch.mean(loss)
def train(args):
tokenizer = BertTokenizer.from_pretrained(args.pretrained, mirror="tuna")
dl = load_data(args, tokenizer)
model = SimCSE(args.pretrained, args.pool_type, args.dropout_rate).to(args.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
model_out = Path(args.model_out)
if not model_out.exists():
os.mkdir(model_out)
model.train()
batch_idx = 0
for epoch_idx in range(args.epochs):
for data in tqdm(dl):
batch_idx += 1
pred = model(input_ids=data["input_ids"].to(args.device),
attention_mask=data["attention_mask"].to(args.device),
token_type_ids=data["token_type_ids"].to(args.device))
loss = compute_loss(pred, args.tao, args.device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.item()
if batch_idx % args.display_interval == 0:
logging.info(f"batch_idx: {batch_idx}, loss: {loss:>10f}")
if batch_idx % args.save_interval == 0:
torch.save(model.state_dict(), model_out / "epoch_{0}-batch_{1}-loss_{2:.6f}".format(epoch_idx, batch_idx, loss))
def main():
args = parse_args()
train(args)
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
JinJackson/Simcse-unsup
|
SimCSERetrieval.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import faiss
import numpy as np
import torch
from tqdm import tqdm
from transformers import BertTokenizer
from SimCSE import SimCSE
class SimCSERetrieval(object):
def __init__(self,
fname,
pretrained_path,
simcse_model_path,
batch_size=32,
max_length=100,
device="cuda"):
self.fname = fname
self.batch_size = batch_size
self.max_length = max_length
self.device = device
self.tokenizer = BertTokenizer.from_pretrained(pretrained_path)
model = SimCSE(pretrained=pretrained_path).to(device)
model.load_state_dict(torch.load(simcse_model_path))
self.model = model
self.model.eval()
self.id2text = None
self.vecs = None
self.ids = None
self.index = None
def encode_batch(self, texts):
text_encs = self.tokenizer(texts,
padding=True,
max_length=self.max_length,
truncation=True,
return_tensors="pt")
input_ids = text_encs["input_ids"].to(self.device)
attention_mask = text_encs["attention_mask"].to(self.device)
token_type_ids = text_encs["token_type_ids"].to(self.device)
with torch.no_grad():
output = self.model.forward(input_ids, attention_mask, token_type_ids)
return output
def encode_file(self):
all_texts = []
all_ids = []
all_vecs = []
with open(self.fname, "r", encoding="utf8") as h:
texts = []
idxs = []
for idx, line in tqdm(enumerate(h)):
if not line.strip():
continue
texts.append(line.strip())
idxs.append(idx)
if len(texts) >= self.batch_size:
vecs = self.encode_batch(texts)
vecs = vecs / vecs.norm(dim=1, keepdim=True)
all_texts.extend(texts)
all_ids.extend(idxs)
all_vecs.append(vecs.cpu())
texts = []
idxs = []
all_vecs = torch.cat(all_vecs, 0).numpy()
id2text = {idx: text for idx, text in zip(all_ids, all_texts)}
self.id2text = id2text
self.vecs = all_vecs
self.ids = np.array(all_ids, dtype="int64")
def build_index(self, nlist=256):
dim = self.vecs.shape[1]
quant = faiss.IndexFlatIP(dim)
index = faiss.IndexIVFFlat(quant, dim, min(nlist, self.vecs.shape[0]))
index.train(self.vecs)
index.add_with_ids(self.vecs, self.ids)
self.index = index
def sim_query(self, sentence, topK=20):
vec = self.encode_batch([sentence])
vec = vec / vec.norm(dim=1, keepdim=True)
vec = vec.cpu().numpy()
_, sim_idx = self.index.search(vec, topK)
sim_sentences = []
for i in range(sim_idx.shape[1]):
idx = sim_idx[0, i]
sim_sentences.append(self.id2text[idx])
return sim_sentences
|
JinJackson/Simcse-unsup
|
test_unsup.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import logging
from SimCSERetrieval import SimCSERetrieval
def main():
fname = "./data/news_title.txt"
pretrained = "hfl/chinese-bert-wwm-ext" # huggingface modelhub 下载的预训练模型
simcse_model = "./model/batch-1400"
batch_size = 64
max_length = 100
device = "cuda"
logging.info("Load model")
simcse = SimCSERetrieval(fname, pretrained, simcse_model, batch_size, max_length, device)
logging.info("Sentences to vectors")
simcse.encode_file()
logging.info("Build faiss index")
simcse.build_index(n_list=1024)
simcse.index.nprob = 20
query_sentence = "基金亏损路未尽 后市看法仍偏谨慎"
print("\nquery title:{0}".format(query_sentence))
print("\nsimilar titles:")
print(u"\n".join(simcse.sim_query(query_sentence, topK=10)))
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
JinJackson/Simcse-unsup
|
CSECollator.py
|
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
class CSECollator(object):
def __init__(self,
tokenizer,
features=("input_ids", "attention_mask", "token_type_ids"),
max_len=100):
self.tokenizer = tokenizer
self.features = features
self.max_len = max_len
def collate(self, batch):
new_batch = []
for example in batch:
for i in range(2):
# 每个句子重复两次
new_batch.append({fea: example[fea] for fea in self.features})
new_batch = self.tokenizer.pad(
new_batch,
padding=True,
max_length=self.max_len,
return_tensors="pt"
)
return new_batch
|
JinJackson/Simcse-unsup
|
SimCSE.py
|
<filename>SimCSE.py
# -*- coding: utf-8 -*-
# @Time : 2021/6/10
# @Author : kaka
import torch.nn as nn
from transformers import BertConfig, BertModel
class SimCSE(nn.Module):
def __init__(self, pretrained="hfl/chinese-bert-wwm-ext", pool_type="cls", dropout_prob=0.3):
super().__init__()
conf = BertConfig.from_pretrained(pretrained)
conf.attention_probs_dropout_prob = dropout_prob
conf.hidden_dropout_prob = dropout_prob
self.encoder = BertModel.from_pretrained(pretrained, config=conf)
assert pool_type in ["cls", "pooler"], "invalid pool_type: %s" % pool_type
self.pool_type = pool_type
def forward(self, input_ids, attention_mask, token_type_ids):
output = self.encoder(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
if self.pool_type == "cls":
output = output.last_hidden_state[:, 0]
elif self.pool_type == "pooler":
output = output.pooler_output
return output
|
JinJackson/Simcse-unsup
|
eval_unsup.py
|
<filename>eval_unsup.py
# -*- coding: utf-8 -*-
# @Time : 2021/6/18
# @Author : kaka
import logging
import torch
import torch.nn.functional as F
import numpy as np
import scipy.stats
from transformers import BertTokenizer
from SimCSE import SimCSE
def load_test_data(fname, tokenizer, max_length):
lines = open(fname, "r", encoding="utf8").read().splitlines()
sent_a = []
sent_b = []
score = []
for line in lines:
_, sa, sb, s = line.strip().split(u"||")
sent_a.append(sa)
sent_b.append(sb)
score.append(float(s))
sent_a_encs = tokenizer(sent_a, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt")
sent_b_encs = tokenizer(sent_b, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt")
return {"sent_a_encs": sent_a_encs, "sent_b_encs": sent_b_encs, "score": np.array(score)}
def eval(data, model, device):
model.eval()
model.to(device)
with torch.no_grad():
a_embed = model(
input_ids=data["sent_a_encs"]["input_ids"].to(device),
attention_mask=data["sent_a_encs"]["attention_mask"].to(device),
token_type_ids=data["sent_a_encs"]["token_type_ids"].to(device),
)
b_embed = model(
input_ids=data["sent_b_encs"]["input_ids"].to(device),
attention_mask=data["sent_b_encs"]["attention_mask"].to(device),
token_type_ids=data["sent_b_encs"]["token_type_ids"].to(device),
)
sim_score = F.cosine_similarity(a_embed, b_embed).cpu().numpy()
corr = scipy.stats.spearmanr(sim_score, data["score"]).correlation
return corr
def main():
pretrained_model_path = "hfl/chinese-bert-wwm-ext" # huggingface 提供的预训练模型,也可指定本地模型文件
simcse_model_path = "" # simcse训练得到的模型文件
f_test = "./data/STS-B/cnsd-sts-test.txt"
f_dev = "./data/STS-B/cnsd-sts-dev.txt"
logging.info("Load tokenizer")
tokenizer = BertTokenizer.from_pretrained(pretrained_model_path)
max_length = 100
device = torch.device("cuda")
test_data = load_test_data(f_test, tokenizer, max_length)
logging.info("test data:{0}".format(len(test_data["sent_a_encs"]["input_ids"])))
dev_data = load_test_data(f_dev, tokenizer, max_length)
logging.info("dev data:{0}".format(len(dev_data["sent_a_encs"]["input_ids"])))
logging.info("eval bert model")
model = SimCSE(pretrained_model_path, "cls")
bert_test_score = eval(test_data, model, device)
bert_dev_score = eval(dev_data, model, device)
logging.info("eval simcse model\n")
model.load_state_dict(torch.load(simcse_model_path))
simcse_test_score = eval(test_data, model, device)
simcse_dev_score = eval(dev_data, model, device)
logging.info(u"bert model test score:{:.4f}".format(bert_test_score))
logging.info(u"bert model dev score:{:.4f}".format(bert_dev_score))
logging.info(u"simcse model test score:{:.4f}".format(simcse_test_score))
logging.info(u"simcse model dev score:{:.4f}".format(simcse_dev_score))
if __name__ == "__main__":
log_fmt = "%(asctime)s|%(name)s|%(levelname)s|%(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
Mamdasn/Perceptron-a-Showcase
|
Perceptron-a-Showcase.py
|
import pygame
import sys
window = pygame.display.set_mode((620,450))
pygame.display.set_caption("Time Series Analysis")
WHITE = [255, 255, 255]
RED = [200,0,0]
GREEN = [0,200,0]
BLACK = [20,20,20]
GRAY = [128,128,128]
window.fill(WHITE)
pygame.display.flip()
pygame.font.init() # you have to call this at the start,
# if you want to use this module.
myfont = pygame.font.SysFont("Times New Roman", 16, bold=False, italic=False)
myfont_small = pygame.font.SysFont("Times New Roman", 11, bold=False, italic=False)
radius = 10
# Initilize the GUI
def initialize_screen(window):
window.fill(WHITE)
for i in range(10):
pygame.draw.lines(window, BLACK, False, [(27+28*(i+1),35), (177,150)], 1)
for i in range(21):
pygame.draw.circle(window,GRAY,(28*(i+1),20),radius) # DRAW 21 INPUT CIRCLES
pygame.draw.circle(window,GRAY,(177, 165),radius) # DRAW THE PREDICTION CIRCLE
pygame.draw.circle(window,GREEN,(310, 100+8),radius) # HELPER
window.blit(myfont.render('1=neuron active', True, BLACK),(330,100)) # HELPER
pygame.draw.circle(window,RED,(310, 130+8),radius) # HELPER
window.blit(myfont.render('0=neuron quiescent', True, BLACK),(330,130)) # HELPER
window.blit(myfont_small.render('Prediction for the INPUT', True, BLACK),(117,185)) # HELPER
pygame.draw.lines(window, BLACK, False, [(167,180), (187,180)], 1) # HELPER
window.blit(myfont_small.render('INPUT', True, BLACK),(12,40)) # HELPER
pygame.draw.lines(window, BLACK, False, [(18,35), (38,35)], 1) # HELPER
pygame.draw.lines(window, BLACK, False, [(25,339), (320,339)], 2) # HELPER FOR PLOTTING WEIGHTS
window.blit(myfont.render("weight distribution", True, BLACK),(330,330)) # HELPER FOR PLOTTING WEIGHTS
window.blit(myfont.render("Commands: 1, 0, [n]ew statistics, [e]nd", True, BLACK),(330,400)) # HELPER
initialize_screen(window)
pygame.display.update()
import random
# Number of input neurons
N = 10
# Initialize variables
runs = 0
corrects = 0
neuron = []
neuron_last_21 = []
weights = [0 for _ in range(N)]
lastNpredictionStatus = []
y = 0
while True:
# User input controls
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
# n for new statistics
if event.key == pygame.K_n:
neuron = []
neuron_last_21 = []
weights = [0 for _ in range(N)]
initialize_screen(window)
pygame.display.update()
continue
# e for exit
elif event.key == pygame.K_e:
sys.exit()
# 1 for 1
elif event.key == pygame.K_1:
y = 1
break
# 0 for -1
elif event.key == pygame.K_0:
y = -1
break
else:
continue
# Increment number of runs
runs += 1
# Predict the y
h = 0
for each_neuron, each_weight in zip(neuron, weights):
h += each_weight * each_neuron
if h * y > 0:
# The prediction is correct.
corrects += 1
# If a wrong prediction happened, update the weights
if h * y <= 0:
for i in range(len(neuron)):
weights[i] += y * neuron[i] / len(neuron)
# This is for the sake of calculating the accuracy of the predictions
lastNpredictionStatus.append(h * y > 0)
if len(lastNpredictionStatus) > N:
lastNpredictionStatus = lastNpredictionStatus[-N:]
# Append the user input to the input neurons
neuron.append(y)
if len(neuron) > N:
neuron = neuron[-N:]
# Save last 21 user input for further calculation
neuron_last_21.append(y)
if len(neuron_last_21) > 21:
neuron_last_21 = neuron_last_21[-21:]
# Calculate hit frequency
hit_frequency = round(corrects/runs * 100, 2)
# Calculate hit frequency for the last N user inputs
hit_frequency_N = round(sum(lastNpredictionStatus)/len(lastNpredictionStatus) * 100, 2)
prediction = 1 if h > 0 else -1
# Draw colored circles for the last 21 user inputs
for i in range(len(neuron_last_21)):
length_of_neuron_last_21 = len(neuron_last_21)
color = RED if neuron_last_21[length_of_neuron_last_21-1-i] == -1 else GREEN
pygame.draw.circle(window,color,(28*(i+1),20),radius) # DRAW CIRCLE
# Draw the prediction circle
color = RED if prediction == -1 else GREEN
pygame.draw.circle(window,color,(177, 165),radius) # DRAW CIRCLE
window.blit(myfont.render('The prediction is correct' if h * y > 0 else \
'The prediction is incorrect', True, BLACK),(300,200))
window.blit(myfont.render(f"hit percentage: {hit_frequency}%"\
, True, BLACK),(300,220))
window.blit(myfont.render(f"hit percentage for the last {N} steps: {hit_frequency_N}%"\
, True, BLACK),(300,240))
# Draw weights
for i in range(N):
bar = -int(weights[i]*100//1)
top = 340
if bar > 0: # left, top, width, height
pygame.draw.rect(window, BLACK, pygame.Rect(30*(i+1), top, 15, bar), 1)
else:
pygame.draw.rect(window, BLACK, pygame.Rect(30*(i+1), top+bar, 15, abs(bar)), 1)
pygame.display.update()
initialize_screen(window)
|
cohenjer/mscode
|
mscode/xp/fista_reglevel.py
|
<reponame>cohenjer/mscode
# Recov vs best lamb/lamb_max (generate randomly, find best, report scores, show histogram of realizations in background with second origin)
# Show av. Recovery vs deviations from best (0 dev means average from previous curve)
# Do for all Fistamix and Fista
# Check redundancy rate
import numpy as np
from matplotlib import pyplot as plt
#from itertools import combinations, product
#import shelve
from mscode.utils.utils import count_support
from mscode.methods.algorithms import ista_mix, ista
from mscode.utils.generator import gen_mix, initialize
import pandas as pd
import plotly.express as px
# Problem parameters
k = 5
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
tol = 1e-6
distr = 'Gaussian'
# Grid on lambda/lambda_max (11 values)
#grid = np.linspace(0, 0.1, 11, endpoint=True)
grid = [0, 0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1]
print(grid)
# Storage
Ntest = 200
labels=['-90%', '-50%', '-20%', 'optimal', '+100%', '+400%', '+900%']
# Storing with Pandas DataFrame
store_pd = pd.DataFrame(columns=["xp", "value", "algorithm", "alpha", "alpha_mod"])
# Loop on tensors, test 100
for i in range(Ntest):
print(i)
# Data generation
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
X0 = initialize([d,r], distr = 'Zeros')
# initial recovery rates and best lambdas
recov_istam = 0
recov_ista = 0
lamb_best_istam = 0
lamb_best_ista = 0
cnt = 1
cnt_m = 1
for lamb in grid:
# Running FIstas
_, _, _, S_istam = ista_mix(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
_, _, _, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
# Computing recovery rates, store if larger
temp_recov_istam = count_support(S, S_istam)
if temp_recov_istam > recov_istam:
lamb_best_istam = lamb
recov_istam = temp_recov_istam
elif temp_recov_istam == recov_istam:
# store for averaging
cnt_m += 1
lamb_best_istam += lamb
temp_recov_ista = count_support(S, S_ista)
if temp_recov_ista > recov_ista:
lamb_best_ista = lamb
recov_ista = temp_recov_ista
elif temp_recov_ista == recov_ista:
# store for averaging
cnt += 1
lamb_best_ista += lamb
lamb_best_ista = lamb_best_ista/cnt
lamb_best_istam = lamb_best_istam/cnt_m
dic = {
"xp":2*['xp1'],
"alpha": [lamb_best_ista, lamb_best_istam],
"alpha_mod": [lamb_best_ista, lamb_best_istam],
"value": [recov_ista, recov_istam],
"algorithm": ["Block-FISTA","Mixed-FISTA"]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
# Robustness to lambda trials
# log'ish' scale
grid2 = np.array([-9*lamb_best_ista/10, -5*lamb_best_ista/10, -2*lamb_best_ista/10, 0, lamb_best_ista, 5*lamb_best_ista, 10*lamb_best_ista]) + lamb_best_ista
for (cnt, lamb) in enumerate(grid2):
# Running Fista on a local optimal grid
_, _, _, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
recov_ista = count_support(S, S_ista)
# TODO
dic = {
"xp":1*['xp2'],
"alpha": [lamb_best_ista],
"alpha_mod": [labels[cnt]],
"value": [recov_ista],
"algorithm": ["Block-FISTA"]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
for (cnt, lamb) in enumerate(grid2):
# Running Fista mix on a local optimal grid
_, _, _, S_istam = ista_mix(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
recov_istam = count_support(S, S_istam)
#storage_onedatam.append(recov_istam)
dic = {
"xp":1*['xp2'],
"alpha": [lamb_best_istam],
"alpha_mod": [labels[cnt]],
"value": [recov_istam],
"algorithm": ["Mixed-FISTA"]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
#storage_robustm.append(storage_onedatam)
# end loop on data matrices
# Starting plots
# lambda effect, maybe todo improve?
#storage = np.array(storage)
#storage_m = np.array(storage_m)
#plt.figure()
#plt.scatter(np.log10(storage[:,0]),storage[:,1], color='r')
#plt.scatter(np.log10(storage_m[:,0]),storage_m[:,1], color='b')
#plt.legend(['Block Fista','Mixed Fista'])
df1 = store_pd[store_pd.xp == 'xp1']
df2 = store_pd[store_pd.xp == 'xp2']
fig = px.scatter(df1, x='alpha', y='value', color='algorithm', title="", labels={'value': 'Support recovery', 'alpha': 'regularization strength alpha'},log_x=True)
#fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
#fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
#paper_bgcolor="white",#'rgb(233,233,233)',
#plot_bgcolor="white",#'rgb(233,233,233)',
)
fig.show()
fig2 = px.box(df2, x='alpha_mod', y='value', color='algorithm', facet_col='algorithm', title="", labels={'value': 'Support recovery', 'alpha_mod': 'variation wrt optimal alpha'})#, log_x=True)
#fig2.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig2.update_xaxes(type='category')
fig2.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
)
fig2.show()
# note: because the grid for finding the optimal lambda percentage is grosser than the second refinement grid, also the grids are different, it is possible that around the optimal value we still improve. Optimal is only wrt the first grid search.
# Uncomment for storing outputs
# path designed for running from mscode root (where setup is located)
#year = 2021
#month = 10
#day = 25
#path = '../..'
#stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP5/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XP5/{}_plot.pdf'.format(path,stor_name))
#fig2.write_image('{}/data/XP5/{}_plot2.pdf'.format(path,stor_name))
|
cohenjer/mscode
|
mscode/xp/general_comparison.py
|
<reponame>cohenjer/mscode<gh_stars>0
# recovery (and error) vs noise for all algorithms
# recovery (and error) vs condB for all algorithms
# recovery vs (k,d) for all algorithms (heatmap)
# todo: also condD?
# Questions:
# - test two distributions for X: Gaussian, and decreasing
# - to choose lambda(s), we fix according to average best one from a set of experiments using the same settings on the fly. The grid is very coarse. In practice, use cross-validation.
# - We initialize with 1 zero init, cf init tests for more details
# Reasonable dimensions for reasonable runtime
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
import pandas as pd
from mscode.utils.utils import count_support, redundance_count, find_lambda
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix
from mscode.utils.generator import gen_mix, initialize
import plotly.express as px
# Random seeding
np.random.seed(seed=0)
# Problem parameters
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
distr = 'Uniform'
tol = 1e-6
# We run the tests several times since performances are very problem-dependent
Nbdata = 50
# Recovery and error versus noise
grid_SNR = [1000, 100, 50, 40, 30, 20, 15, 10, 5, 2, 0] #[40, 20]
grid_lambda = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# Store results in Pandas DataFrame
store_pd = pd.DataFrame(columns=["xp", "value", "algorithm", "error type", "SNR", "lambda", "k", "r", "d", "m", "n", "cond"])
for SNR in grid_SNR:
print('SNR', SNR, 'dB')
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr = distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP1'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus conditionning
SNR = 20
grid_cond = [1, 10, 50, 100, 5*1e2, 1e3, 5*1e3, 1e4, 5*1e4, 1e5]
for cond in grid_cond:
print('cond', cond)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
dic={
'xp':10*['XP2'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[np.round(condB,3)],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus (k,d)
cond = 5*1e2
grid_k = [1, 2, 5, 10, 20]
grid_d = [20, 50, 100, 200, 400]
for d in grid_d:
for k in grid_k:
print('(d,k) is', d, k)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP3'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
df1 = store_pd[store_pd.xp=='XP1']
df2 = store_pd[store_pd.xp=='XP2']
df3 = store_pd[store_pd.xp=='XP3']
fig = px.box(df1[df1['error type']=='support recovery'], x='SNR', y='value', facet_col='algorithm', color='algorithm', title="Support recovery versus SNR", labels={'value':'Support recovery'})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig.show()
fig2 = px.box(df2[df2['error type']=='support recovery'], x='cond', y='value', color='algorithm', facet_col='algorithm', title="Support recovery versus conditionning of B", labels={'value':'Support recovery'})
fig2.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig2.update_xaxes(type='category')
fig2.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig2.show()
# Normalizing the support recovery scores
fig3=px.density_heatmap(df3[df3['error type']=='support recovery'], x='d', y='k', z='value', facet_col='algorithm', color_continuous_scale='Viridis', histfunc="avg", labels={'value':'Support recovery'}, title='Recovery for varying sparsity and dictionary size')
fig3.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig3.update_xaxes(type='category')
fig3.update_yaxes(type='category')
fig3.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=310,
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
)
fig3.show()
year = 2021
month = 10
day = 20
path = '../..'
stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XP1/{}_plot1.pdf'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.pdf'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.pdf'.format(path,stor_name))
# for Frontiers export
#fig.write_image('{}/data/XP1/{}_plot1.jpg'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.jpg'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.jpg'.format(path,stor_name))
# to load data
#store_pd = pd.read_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
|
cohenjer/mscode
|
mscode/xp/homp_bahvior.py
|
<gh_stars>0
# Check convergence for k>1
# Same for iht ?
import numpy as np
from matplotlib import pyplot as plt
#from itertools import combinations, product
#import shelve
from mscode.utils.utils import count_support
from mscode.methods.algorithms import iht_mix, homp
from mscode.utils.generator import gen_mix, initialize
# Problem parameters
# todo: implement a loader to choose these globally
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
tol = 1e-6
distr = 'Gaussian'
Nbruns = 4
Nbinit = 10 # not a working parameter at the moment
store_err = []
store_rec = []
for i in range(Nbruns):
print(i)
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond)
for j in range(Nbinit):
X0 = initialize([d,r], distr = 'Gaussian')
_, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
store_rec.append(count_support(S_homp, S))
store_err.append(err_homp)
X0 = initialize([d,r], distr= 'Zeros')
_, err_homp, S_homp = homp(Y, D, B, k, X0)
store_rec.append(count_support(S_homp, S))
store_err.append(err_homp)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
plt.figure()
Nbinit = Nbinit+1
for i in range(Nbruns):
for j in range(Nbinit):
plt.semilogy(store_err[i*Nbinit+j], color=colors[i])
#plt.legend(['run1','run2', 'run3', 'run4', 'run5'])
plt.show()
# Note : zero init seems good for small and large values of k, but its nice to also try random init.
# Uncomment for storing outputs
# path designed for running from mscode root (where setup is located)
#year = 2021
#month = 3
#day = 16
#path = '.'
#stor_name = '{}-{}-{}'.format(year,month,day)
#np.save('{}/data/XP6/{}_{}_support_err'.format(path,stor_name,distr), store_err)
#np.save('{}/data/XP6/{}_{}_rec_err'.format(path,stor_name,distr), store_rec)
|
cohenjer/mscode
|
mscode/xp/noise_comparison_nn.py
|
<reponame>cohenjer/mscode
# recovery (and error) vs noise for block-FISTA and its nonnegative version
# Questions:
# - test two distributions for X: Gaussian, and decreasing
# - to choose lambda(s), we fix according to average best one from a set of experiments using the same settings on the fly. The grid is very coarse. In practice, use cross-validation.
# - We initialize with 1 zero init, cf init tests for more details
# Reasonable dimensions for reasonable runtime
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
from mscode.utils.utils import count_support, redundance_count, find_lambda
from mscode.methods.algorithms import ista, ista_nn
from mscode.utils.generator import gen_mix, initialize
import plotly.express as px
import pandas as pd
# Problem parameters
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
distr = 'Uniform'
tol = 1e-6
# We run the tests several times since performances are very problem-dependent
Nbdata = 50
# Recovery and error versus noise
grid_SNR = [1000, 100, 50, 40, 30, 20, 15, 10, 5, 2, 0] #[40, 20]
grid_lambda = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# Storage variables
l_SNR = len(grid_SNR)
# Store results in Pandas DataFrame
store_pd = pd.DataFrame(columns=["value", "algorithm",'SNR'])
#stock_ista_nn = np.zeros([l_SNR,Nbdata,2])
#stock_ista = np.zeros([l_SNR,Nbdata,2])
for (i,SNR) in enumerate(grid_SNR):
print('SNR', SNR, 'dB')
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_nn = []
for iter in range(3):
#store_lamb_nn.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_nn'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
store_lamb_nn = store_lamb
lamb = np.median(store_lamb)
lamb_nn = np.median(store_lamb_nn)
print('lambda ratio is', lamb, 'and for nonneg', lamb_nn)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr = distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Fista and Fista_mix
# Running ISTA_mix (init 0)
X_istann, _, err_ista_nn, S_ista_nn = ista_nn(Y, D, B, lamb_nn, k=k, X0=X0, verbose=False, tol=tol)
#stock_ista_nn[i, j, 0]=count_support(S, S_ista_nn)
#stock_ista_nn[i, j, 1]=np.linalg.norm(X - X_istann, 'fro')
# Test Ista
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
#stock_ista[i, j, 0]=count_support(S, S_ista)
#stock_ista[i, j, 1]=np.linalg.norm(X - X_ista, 'fro')
dic = {
'value':[count_support(S, S_ista), count_support(S, S_ista_nn)],
'algorithm': ['Block-FISTA', 'nonnegative Block-FISTA'],
'SNR': 2*[SNR]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
# Plots
fig = px.box(store_pd, x='SNR', y='value', facet_col='algorithm', color='algorithm', title="", labels={'value':'Support recovery'} )
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False)
fig.show()
# Uncomment for storing outputs
# path designed for running from mscode root (where setup is located)
#year = 2021
#month = 10
#day = 25
#path = '../..'
#stor_name = '{}-{}-{}'.format(year,month,day)
#stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XPnn/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XPnn/{}_plot.pdf'.format(path,stor_name))
|
cohenjer/mscode
|
mscode/xp/basic_tests.py
|
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
from mscode.utils.utils import count_support, redundance_count
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix, ista_nn
from mscode.utils.generator import gen_mix, initialize
# Generation
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
distr = 'Uniform'
tol = 1e-6
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, cond_rand=False, distr=distr)
X0 = initialize([d,r], distr = 'Uniform')
# Running HOMP
print('Using HOMP\n')
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
# Running ISTA_mix (init 0)
print('Using Ista(s)\n')
Xinit = X0
lamb_rel = 0.001
X_ista, cost_ista, err_ista, S_ista = ista_mix(Y, D, B, lamb_rel, k=k, X0=Xinit, verbose=False, tol=tol)
# checkup sparsity level
#nz_level = np.sum(np.abs(X_ista) > 1e-16, axis=0)
#print('There are ', nz_level, 'nonzeros in columns of X_ista')
## Same with larger lambda
#lamb_rel = 0.2
#X_ista2, cost_ista_m, err_ista2, S_ista2 = ista_mix(Y, D, B, lamb_rel, k=k, X0=Xinit, tol=tol, verbose=False)
## checkup sparsity level
##nz_level2 = np.sum(np.abs(X_ista2) > 1e-16, axis=0)
##print('There are ', nz_level2, 'nonzeros in columns of X_ista')
# Test Ista (nn)
lamb_rel = 0.001
X_ista0, cost_ista0, err_ista0, S_ista0 = ista_nn(Y, D, B, lamb_rel, k=k, X0=Xinit, tol=tol, verbose=False)
#X_ista0, cost_ista0, err_ista0, S_ista0 = ista(Y, D, B, lamb_rel, k=k, X0=Xinit, tol=tol, verbose=False)
# checkup sparsity level
#nz_level0 = np.sum(np.abs(X_ista0) > 1e-16, axis=0)
#print('There are ', nz_level0, 'nonzeros in columns of X_ista')
# Test IHTmix
print('Using IHTmix\n')
eta = 1/np.linalg.norm(D.T@D)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, eta=eta, tol=1e-8, itermax=1000)
# Test ADMM
print('using ADMM')
X_adm, err_adm, S_adm, Z_adm, err_Z = admm_mix(Y, D, B, k, X0)
# Comparison with trick
print('Using PseudoTrick\n')
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# HOMP with Trick init test
print('Using HOMP Trick init')
X_homp2, err_homp2, S_homp2 = homp(Y, D, B, k, X_trick)
# Compare with best solution of true support (S)
X_S, err_S = ls_kn_supp(Y, D, B, S, k)
# Plotting error curves of iterative algorithms
plt.figure()
plt.semilogy(cost_ista)
plt.semilogy(cost_ista0)
plt.semilogy(err_homp)
plt.semilogy(err_iht)
plt.semilogy(err_adm)
plt.legend(['Mixed Fista','Block Fista', 'HOMP', 'IHT', 'ADMM'])
# checking support
#for i in range(r):
# print('True', np.sort(S[i]))
# print('IHT', np.sort(S_iht[:,i]))
# print('ADMM', np.sort(S_adm[:,i]))
# print('HOMP', np.sort(S_homp[:,i]))
# print('ISTAm', np.sort(S_ista[:,i]))
# print('ISTAm large', np.sort(S_ista2[:,i]))
# print('ISTA', np.sort(S_ista0[:,i]))
# print('OMPcol', np.sort(S_trick[:,i]))
# print('HOMPtrickinit', np.sort(S_homp2[:,i]), '\n')
#
# Error prints
print('Reconstruction errors \n')
print('TrueS error', err_S)
print('IHT error', err_iht[-1])
print('ADMM error', err_adm[-1])
print('FISTA_mix (small lambda) error', err_ista[-1])
#print('FISTA_mix (large lambda) error', err_ista2[-1])
print('FISTA error', err_ista0[-1])
print('HOMP error', err_homp[-1])
print('Trick error', np.linalg.norm(Y-D@X_trick@B.T, 'fro'))
print('HOMPi error', err_homp2[-1])
# Support evaluations
print('support scores')
print('Fista_m ', count_support(S, S_ista))
#print('Fista_m large', count_support(S, S_ista2))
print('Fista', count_support(S, S_ista0))
print('HOMP ', count_support(S, S_homp))
print('HOMP init trick ', count_support(S, S_homp2))
print('OMP trick ', count_support(S, S_trick))
print('FIHT ', count_support(S, S_iht))
print('ADMM ', count_support(S, S_adm))
# Checking redudancy levels
print('True redundance', redundance_count(S))
print('Fista_m redundance', redundance_count(S_ista))
#print('Fista_m large redundance', redundance_count(S_ista2))
print('Fista redundance', redundance_count(S_ista0))
print('HOMP redundance', redundance_count(S_homp))
print('HOMPinit redundance', redundance_count(S_homp2))
print('Trick redundance', redundance_count(S_trick))
print('FIHT redundance', redundance_count(S_iht))
print('ADMM redundance', redundance_count(S_adm))
|
cohenjer/mscode
|
mscode/xp/runtimes.py
|
# Average 5 runs (problems) for various n,m dimensions. k,d fixed
# Same wrt k,d and n,m fixed large
# For all algorithms
# Check av. number of iterations and time per iteration as well when applicable
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
from mscode.utils.utils import count_support, redundance_count
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix
from mscode.utils.generator import gen_mix, initialize
import time
import pandas as pd
# Seeding
np.random.seed(0)
## 1: fixed k,d, runtime wrt n,m
# Generation
k = 5
r = 6
d = 100
SNR = 20 # dB
cond = 2*1e2
tol = 1e-6
distr = 'Gaussian'
# vary n,m
grid_n = [10, 50, 1000]
grid_m = [10, 50, 1000]
Nbruns = 10
# Storing in DataFrame
store_pd = pd.DataFrame(columns=["xp", "n", "m", "d", "k", "algorithm", "iteration", "time"])
for i in range(3):
for j in range(3):
for l in range(Nbruns):
n = grid_n[i]
m = grid_n[j]
print('d,k,n,m,r',d,k,n,m,r)
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
X0 = initialize([d,r], distr = 'Gaussian')
# Running HOMP
tic = time.time()
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
time_homp = time.time()-tic
# Running ISTA_mix (init 0)
lamb = 0.0005
tic = time.time()
_, cost_istam, err_istam, S_istam = ista_mix(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
time_istam = time.time()-tic
# Test Ista
lamb = 0.001
tic = time.time()
_, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
time_ista = time.time()-tic
# Test IHTmix
tic = time.time()
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
time_iht = time.time()-tic
# Comparison with trick
tic = time.time()
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
time_trick = time.time()-tic
# Compare with best solution of true support (S)
tic = time.time()
X_S, err_S = ls_kn_supp(Y, D, B, S, k)
time_supportest = time.time()-tic
# Storing
dic = {
"xp":"XP1",
'n':6*[n], 'm':6*[m], 'd':6*[d], 'k':6*[k],
'algorithm': ['HOMP', 'Mixed-FISTA', 'Block-FISTA', 'IHT', 'TrickOMP', 'Fixed Support'],
'iteration': [len(err_homp), len(err_istam), len(err_ista), len(err_iht), 1, 1],
'time': [time_homp, time_istam, time_ista, time_iht, time_trick, time_supportest],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
# Second test: fix n, m vary d,k
n = 50
m = 50
grid_d = [50, 100, 1000]
grid_k = [5, 10, 30]
Nbruns = 10
for i in range(3):
for j in range(3):
for l in range(Nbruns):
d = grid_d[i]
k = grid_k[j]
print('d,k,n,m,r',d,k,n,m,r)
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond)
X0 = initialize([d,r], distr = 'Gaussian')
### Running HOMP
tic = time.time()
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
time_homp = time.time()-tic
# Running ISTA_mix (init 0)
lamb = 0.0005
tic = time.time()
_, cost_istam, err_istam, S_istam = ista_mix(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
time_istam = time.time()-tic
# Test Ista
lamb = 0.001
tic = time.time()
_, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
time_ista = time.time()-tic
# Test IHTmix
tic = time.time()
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
time_iht = time.time()-tic
# Comparison with trick
tic = time.time()
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
time_trick = time.time()-tic
# Compare with best solution of true support (S)
tic = time.time()
X_S, err_S = ls_kn_supp(Y, D, B, S, k)
time_supportest = time.time()-tic
# Storing
dic = {
"xp":"XP2",
'n':6*[n], 'm':6*[m], 'd':6*[d], 'k':6*[k],
'algorithm': ['HOMP', 'Mixed-FISTA', 'Block-FISTA', 'IHT', 'TrickOMP', 'Fixed Support'],
'iteration': [len(err_homp), len(err_istam), len(err_ista), len(err_iht), 1, 1],
'time': [time_homp, time_istam, time_ista, time_iht, time_trick, time_supportest],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
#todo automate processing of values
# Uncomment for storing outputs
# path designed for running from mscode root (where setup is located)
#year = 2021
#month = 10
#day = 21
#path = '../..'
#stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP7/{}_results'.format(path,stor_name))
|
cohenjer/mscode
|
mscode/utils/utils.py
|
<filename>mscode/utils/utils.py
import numpy as np
from mscode.methods.algorithms import ista_mix, ista, ista_nn
from mscode.utils.generator import gen_mix, initialize
def count_support(Strue, Sest):
'''
Counts the number of corrected estimated atoms
Parameters
----------
Strue : list of list (or numpy nd array)
True support
Sest : list of list (or numpy nd array)
Estimated support
Returns
-------
score : float
Percentage of correctly estimated atoms
'''
if isinstance(Strue, np.ndarray):
# Conversion to list of list from ndarray numpy
Strue = list(Strue.T)
Strue = [list(i) for i in Strue]
if isinstance(Sest, np.ndarray):
# Conversion to list of list from ndarray numpy
Sest = list(Sest.T)
Sest = [list(i) for i in Sest]
r = len(Strue)
maxscore = r*len(Strue[0]) # all same size
cnt = 0
for i in range(r):
for el in Strue[i]:
if el in Sest[i]:
cnt += 1
return cnt/maxscore*100
def redundance_count(S):
'''
Checks how many times the same columns are chosen in various support.
Gives the total of repeated columns with multiplicities.
Parameters
----------
S : list of list or numpy array
Support
Returns
-------
out : int
Number of repeated columns
'''
if isinstance(S, np.ndarray):
# Conversion to list of list from ndarray numpy
Strue = list(S.T)
Strue = [list(i) for i in S]
cnt=0
r = len(S)
# unfolded uniqued list
Sunfold = []
for i in S:
for j in i:
Sunfold.append(j)
Sunfold = list(set(Sunfold))
for el in Sunfold:
for i in S:
for j in i:
if el == j:
cnt += 1
return cnt - len(Sunfold)
def find_lambda(dims, grid, ista_type):
'''
Finds a good lambda value by running a fixed (Mixed) Lasso problem of fixed size with varying regularization.
Parameters
----------
dims : list
(m,n,d,k,r,SNR,cond)
grid : list
tested lambda values
ista_type : string
choose between 'Fista', 'Fista_m' and 'Fista_nn'
Returns
-------
lamb : float
estimated good regularization ratio
'''
m,n,d,k,r,SNR,cond = dims
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond)
X0 = initialize([d,r], distr = 'Zeros')
# initial recovery rates and best lambdas
recov_ista = 0
lamb_best_ista = 0
cnt = 1
for lamb in grid:
# Running FIstas
if ista_type == 'Fista_m':
_, _, _, S_ista = ista_mix(Y, D, B, lamb, k=k, X0=X0, tol=1e-5, verbose=False)
elif ista_type == 'Fista':
_, _, _, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, tol=1e-5, verbose=False)
elif ista_type == 'Fista_nn':
_, _, _, S_ista = ista_nn(Y, D, B, lamb, k=k, X0=X0, tol=1e-5, verbose=False)
# Computing recovery rates, store if larger
temp_recov_ista = count_support(S, S_ista)
if temp_recov_ista > recov_ista:
lamb_best_ista = lamb
recov_ista = temp_recov_ista
elif temp_recov_ista == recov_ista:
# store for averaging
cnt += 1
lamb_best_ista += lamb
lamb_best_ista = lamb_best_ista/cnt
return lamb_best_ista
|
cohenjer/mscode
|
mscode/methods/prox_ind_l1_norm.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script illustrates how to use the call the compiled C code in Python
using ctypes.
@author: <NAME>
"""
# ============================================================================
# import modules
# ============================================================================
import numpy as np
import ctypes
from ctypes import POINTER, byref
# ============================================================================
# classes and functions
# ============================================================================
class Matrix(ctypes.Structure):
_fields_ = [("data",POINTER(ctypes.c_double)),
("nrows",ctypes.c_uint),
("ncols",ctypes.c_uint)]
# ----------------------------------------------------------------------------
# load external libraries
# ============================================================================
# load library for computing the proximal operator
#lib = ctypes.cdll.LoadLibrary('/home/jecohen/Travail/Ecriture-Projets/2021/DCPD_part1_MixtureSparseCoding/Mixed_Sparse_coding/mscode/mscode/methods/libprox.so')
# Todo: set as parameter
# Todo: does not work on windows, disabling this function for now
# proximal operator
#prox_l1 = lib.prox_l1_norm_column_sort
# wrapper for the proximal operator
def prox_l1inf(V,X,LAMBDA):
## get data dimensions
#n,m = V.shape
## create matrix object
#Mx = Matrix(X.ctypes.data_as(POINTER(ctypes.c_double)),n,m)
#Mv = Matrix(V.ctypes.data_as(POINTER(ctypes.c_double)),n,m)
## call proximal operator (result in X)
#prox_l1(byref(Mx),byref(Mv),ctypes.c_double(LAMBDA))
return
# ============================================================================
# MAIN - module executed as a script
# ============================================================================
if __name__ == "__main__":
# ------------------------------------------------------------------------
# parameters
# ========================================================================
# random seed initialization
np.random.seed(0)
# image dimensions (in pixels)
n, m = 5, 5
# regularization
LAMBDA = 1
# ------------------------------------------------------------------------
# variables and data loading
# ========================================================================
# create input and output matrices
V = np.asfortranarray(np.random.randn(n,m))
# optimization variable (PROX VARIABLE MUST BE F_CONTIGUOUS)
X = np.asfortranarray(np.zeros((n,m)))
# ------------------------------------------------------------------------
# compute proximal operator
# ========================================================================
# l1,oo proximal operator
prox_l1inf(V,X,LAMBDA)
# display original and thresholded matrices
print('\nOriginal matrix:')
print(V)
print('\nThresholded matrix:')
print(X)
# display l1 norm of the columns
print('\nOriginal matrix -- l1 norm of columns:')
print(np.sum(np.abs(V),axis=0))
print('\nThresholded matrix -- l1 norm of columns:')
print(np.sum(np.abs(X),axis=0))
# check that the norm of the projection equals LAMBDA
print('\nLAMBDA = %.2f'%(LAMBDA))
print('loo,1 norm of projection = %.2f'%(np.sum(np.max(np.abs(V-X),axis=0))))
|
cohenjer/mscode
|
setup.py
|
#!/usr/bin/env python
import setuptools
setuptools.setup(
name='mscode',
version='0.0',
description='Code for Mixed Sparse Coding',
author='<NAME>',
author_email='<EMAIL>',
packages=setuptools.find_packages(),
license='MIT'
)
|
cohenjer/mscode
|
mscode/methods/algorithms.py
|
<filename>mscode/methods/algorithms.py
import numpy as np
from itertools import combinations
from mscode.methods.proxs import prox_ml1,prox_ml1_fast
from mscode.methods.proxs import SoftT
from mscode.methods.proxs import ml1
from mscode.methods.proxs import HardT
from mscode.methods.proxs import ls_kn_supp, ls_cg
from scipy.linalg import svdvals
import time
def admm_mix(Y, D, B, k, X0=None, itermax=1000, tol=1e-6, verbose=True, rho=None):
'''
Solves (approximatively, without guaranties) the mixed sparse coding problem using ADMM with hard thresholding as the proximity operator of the l0 sparsity constraint. The problem is formulated as
:math:`\\min_X\; \\|Y - DXB\\|_F^2 \; s.t.\; \\|X_i\\|_0 \\leq k`
where k is the maximal number of nonzeros per column of X.
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
k : integer
sparsity level per column, thresholded at the end, required
rho : float
augmented Lagrangian penality coefficient, by default it is set to ??
Returns
----------
X : numpy array
estimated X
e : list
fittings along iterations
support : numpy array
the support of each column of X
'''
# Copy input
X = np.copy(X0)
# Input caracteristics
n, d = D.shape
m, r = B.shape
# Store DtD, BtB and DtYB if possible
DtD = D.T@D
Bt = B.T
BtB = Bt@B
DtYB = D.T@Y@B
# Initialisation of coefficients x
# this does not matter too much since the problem is convex
if X0 is None:
X = np.zeros([d, r])
# Choice of rho, use Lipschitz constant
singvalD = np.linalg.svd(DtD)[1][0]
singvalB = np.linalg.svd(BtB)[1][0] #why 2?
eta = 1/singvalD/singvalB
if rho is None:
rho = (np.linalg.norm(D, 'fro')*np.linalg.norm(B, 'fro'))**2/(n*r)
if verbose:
print('The automatic value of rho is ', rho) # heuristic based on optimal quadratic rho, here the average of squared singular values of the mixing matrix
# Initial error
e0 = (np.linalg.norm(Y - D@X@Bt, 'fro') ** 2)/2
e = [np.Inf, e0]
err_Z = [np.Inf]
# Initial iteration count
iter = 0
# Main print
if verbose:
print('ADMM mix running\n')
# Main loop
# pairing and dual variables for ADMM
Z = np.copy(X)
nu = np.zeros([d,r])
while iter < itermax:
if np.abs(e[-1] - e[-2])/e[-1] < tol and np.abs(err_Z[-1] - err_Z[-2]) < 1e-2:
break
# printing
if iter % 10 == 1:
if verbose:
print('ADMM iteration ', iter, ' cost ', e[-1])
#else:
# print('.', end='')
iter += 1
# Prox wrt X
rhs = DtYB + rho * (Z - nu)
X, _ = ls_cg(rhs, DtD, BtB, X, rho, itercg = 50) # solves linear system wrt X
# Prox wrt Z
Z = HardT(X + nu, k)
# Gradient ascent step
nu = nu + X - Z
# error computation
rec = np.linalg.norm(Y - D@X@Bt, 'fro')
e.append(rec**2/2)
err_Z.append(np.linalg.norm(X - Z, 'fro')/np.linalg.norm(X, 'fro'))
e = e[1:]
err_Z = err_Z[1:]
# Get k largest entries per columns
# Estimating support (np arrays with supports)
support = np.argsort(np.abs(X), 0)
# truncating the support
support = support[-k:, :]
# Post-processing option for k-sparse columnwise
# Running least squares
X, _ = ls_kn_supp(Y, D, B, support, k)
# error computation
rec = np.linalg.norm(Y - D@X@Bt, 'fro')
e.append(rec**2/2)
if verbose:
print('\n')
return X, e, support, Z, err_Z
def ista_mix(Y, D, B, lamb_rel, k=None, X0=None, itermax=1000, tol=1e-6, verbose=True):
'''
Solves the tighest convex relaxation of the mixed sparse coding problem using Fast Iterative Soft Thresholding (Fista).
The cost function is
:math:`\\frac{1}{2} \\|Y - DXB^T \\|_F^2 + \\lambda \\|X\\|_{1,1}`
where :math:`\\lambda = \\lambda_{rel}\\lambda_{\\max}`
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
lamb_rel : float
ratio of lambda_max used as a regularization, required
k : integer
sparsity level per column, thresholded at the end
X0 : numpy array
initial estimation of X
itermax : integer (default 1000)
maximum number of proximal iterations
tol : float (default 1e-6)
relative error threshold for stopping the algorithm
verbose : boolean (default True)
Set to False to remove prints
Returns
----------
X : numpy array
estimated X
e : list
fittings along iterations
rec : list
reconstruction errors along iterations
support : numpy array
the support of each column of X
'''
# Copy input
X = np.copy(X0)
# Input caracteristics
n, d = D.shape
m, r = B.shape
# Store DtD, BtB and DtYB if possible
DtD = D.T@D
Bt = B.T
BtB = Bt@B
DtYB = D.T@Y@B
# Computing lambda_max
DtYBabs = np.abs(DtYB)
lambda_max = np.sum(np.max(DtYBabs, axis=0))
lamb = lamb_rel*lambda_max
if verbose:
print('lambda max is', lambda_max, ' \n')
# Initialisation of coefficients x
# this does not matter too much since the problem is convex
if X0 is None:
X = np.zeros([d, r])
# Choice of stepsize, use Lipschitz constant
singvalD = np.linalg.svd(DtD)[1][0]
singvalB = np.linalg.svd(BtB)[1][0] #why 2?
eta = 1/singvalD/singvalB
# Initial error
rec0 = np.linalg.norm(Y - D@X@Bt, 'fro')
e0 = rec0**2/2 + lamb*ml1(X)
# e_old = 0
rec = [rec0]
e = [np.Inf, e0]
# Initial iteration count
iter = 0
# Main print
if verbose:
print('ISTA l11 running\n')
# Main loop with proximal gradient
# pairing variable for Fista
Z = np.copy(X)
beta = 1
while np.abs(e[-1] - e[-2])/e[-1] > tol and iter < itermax:
# printing
if iter % 10 == 1:
if verbose:
print('ISTA iteration ', iter, ' cost ', e[-1], '\n')
#else:
# print('.', end='')
iter += 1
# compute the gradient
X_old = np.copy(X)
#X, _, _ = prox_ml1(Z - eta * (DtD@Z@BtB - DtYB), lamb*eta, tol=1e-6)
X = prox_ml1_fast(Z - eta * (DtD@Z@BtB - DtYB), lamb*eta)
# Extrapolation
beta_old = beta
beta = (1+np.sqrt(1+4*beta**2))/2
Z = X + ((beta_old-1)/beta) * (X-X_old)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + lamb*ml1(X))
e = e[1:]
# Get k largest entries per columns
# Estimating support (np arrays with supports)
support = np.argsort(np.abs(X), 0)
# truncating the support
support = support[-k:, :]
# Post-processing option for k-sparse columnwise
if k is not None:
# Running least squares
X, _ = ls_kn_supp(Y, D, B, support, k)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + lamb*ml1(X))
if verbose:
print('\n')
return X, e, rec, support
def ista(Y, D, B, lamb_rel, k=None, X0=None, itermax=1000, tol=1e-6, verbose=True, samereg=False, warning=False, DtD=None, DtY=None, DtYB=None, BtB=None, return_old=False, eta=None):
'''
Solves a simple convex relaxation of the mixed sparse coding problem using Fast Iterative Soft Thresholding (Block-Fista). Each columns has its own regularization parameter.
The cost function is
:math:`\\frac{1}{2} \\|Y - DXB^T \\|_F^2 + \\sum_i \\lambda_i \\|X_i\\|_{1}`
where :math:`\\lambda_i = \\lambda_{rel,i}\\lambda_{\\max,i}`
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
lamb_rel : float or list of floats
ratio of lambda_max used as a regularization, required.
If float is provided, the same regularization ratio is used in all columns.
If list is provided, it must be of length X.shape[1] and provide regularization level for each columns.
k : integer
sparsity level per column, thresholded at the end. Use None to ignore.
samereg : boolean
if True, all lamb values are equal to lambda_max*lamb_rel, which yields the usual Lasso problem.
X0 : numpy array
initial estimation of X
itermax : integer (default 1000)
maximum number of proximal iterations
tol : float (default 1e-6)
relative error threshold for stopping the algorithm
verbose : boolean (default True)
Set to False to remove prints
warning : boolean (default False)
If True, will prompt a warning when the final estimation before debiaising is sparser than the desired sparsity level.
return_old : boolean (default False)
If True, adds a fith output which is the estimated X before debiaising
eta : float (default None)
the stepsize for ISTA. If None, the Lipschitz constant is computed and used.
DtD, BtB (...) : numpy arrays
pre-computed cross products of the inputs if available.
Returns
----------
X : numpy array
estimated X
e : list
fittings along iterations
rec : list
reconstruction errors along iterations
support : numpy array
the support of each column of X
'''
# Copy input
X = np.copy(X0)
# Input caracteristics
n, d = D.shape
m, r = B.shape
# Store DtD, BtB and DtYB if possible
if DtD is None:
DtD = D.T@D
if DtY is None:
DtY = D.T@Y
Bt = B.T
if BtB is None:
BtB = Bt@B
if DtYB is None:
DtYB = DtY@B
# Computing lambda_max
DtYBabs = np.abs(DtYB)
lambda_max = np.max(DtYBabs, axis=0)
if samereg:
# We get the usual Lasso lambda_max
lambda_max = np.max(lambda_max)
lamb = lamb_rel*lambda_max
if verbose:
print('lambda max is', lambda_max, ' \n')
# Initialisation of coefficients x
# this does not matter too much since the problem is convex
if X0 is None:
X = np.zeros([d, r])
# Choice of stepsize, use Lipschitz constant (improvable by power iteration or randomization)
if eta is None:
singvalD = np.linalg.svd(DtD)[1][0]
singvalB = np.linalg.svd(BtB)[1][0]
eta = 1/singvalD/singvalB
# Initial error
rec0 = np.linalg.norm(Y - D@X@Bt, 'fro')
e0 = rec0**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0))
# e_old = 0
rec = [rec0]
e = [np.Inf, e0]
# Initial iteration count
iter = 0
# Main print
if verbose:
print('ISTA l11 running\n')
# Main loop with proximal gradient
# pairing variable for Fista
Z = np.copy(X)
beta = 1
while np.abs(e[-1] - e[-2])/e[-1] > tol and iter < itermax:
# printing
if iter % 10 == 1:
if verbose:
print('ISTA iteration ', iter, ' cost ', e[-1], '\n')
#else:
# print('.', end='')
iter += 1
# compute the gradient
X_old = np.copy(X)
X = SoftT(Z - eta * (DtD@Z@BtB - DtYB), lamb*eta)
# Extrapolation
beta_old = beta
beta = (1+np.sqrt(1+4*beta**2))/2
Z = X + ((beta_old-1)/beta) * (X-X_old)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0)))
e = e[1:]
# Get k largest entries per columns
# Estimating support (np arrays with supports)
support = np.argsort(np.abs(X), 0)
if k is not None:
# truncating the support
support = support[-k:, :]
if warning:
# check if there are too many zeroes already
for i in range(r):
Xabs = np.abs(X)
if np.min(Xabs[support[:,i],i])==0:
print('Warning: regularization may be too strong')
if verbose:
print('\n')
if return_old:
X_old=np.copy(X)
# Post-processing option for k-sparse columnwise
if k is not None:
# Running least squares
X, _ = ls_kn_supp(Y, D, B, support, k)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0)))
if return_old:
return X, e, rec, support, X_old
else:
return X, e, rec, support
def ista_nn(Y, D, B, lamb_rel, k=None, X0=None, itermax=1000, tol=1e-6, verbose=True, samereg=False, return_old=False, DtD=None, DtY=None, DtYB=None, BtB=None,eta=None, warning=False):
'''
Solves a simple convex relaxation of the mixed sparse coding problem using Fast Iterative Soft Thresholding (Fista) under nonnegativity constraints. Each columns has its own regularization parameter.
The cost function is
:math:`\\frac{1}{2} \\|Y - DXB^T \\|_F^2 + \\sum_i \\lambda_i \\|X_i\\|_{1}\; s.t.\; X\\geq 0`
where :math:`\\lambda_i = \\lambda_{rel,i}\\lambda_{\\max,i}`
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
lamb_rel : float or list of floats
ratio of lambda_max used as a regularization, required.
If float is provided, the same regularization ratio is used in all columns.
If list is provided, it must be of length X.shape[1] and provide regularization level for each columns.
k : integer
sparsity level per column, thresholded at the end. Use None to ignore.
samereg : boolean
if True, all lamb values are equal to lambda_max*lamb_rel, which yields the usual Lasso problem.
X0 : numpy array
initial estimation of X
itermax : integer (default 1000)
maximum number of proximal iterations
tol : float (default 1e-6)
relative error threshold for stopping the algorithm
verbose : boolean (default True)
Set to False to remove prints
warning : boolean (default False)
If True, will prompt a warning when the final estimation before debiaising is sparser than the desired sparsity level.
return_old : boolean (default False)
If True, adds a fith output which is the estimated X before debiaising
eta : float (default None)
the stepsize for ISTA. If None, the Lipschitz constant is computed and used.
DtD, BtB (...) : numpy arrays
pre-computed cross products of the inputs if available.
Returns
----------
X : numpy array
estimated X
e : list
fittings along iterations
rec : list
reconstruction errors along iterations
support : numpy array
the support of each column of X
'''
# Copy input
X = np.copy(X0)
# Input caracteristics
n, d = D.shape
m, r = B.shape
# Store DtD, BtB and DtYB if possible
if DtD is None:
DtD = D.T@D
if DtY is None:
DtY = D.T@Y
Bt = B.T
if BtB is None:
BtB = Bt@B
if DtYB is None:
DtYB = DtY@B
# Computing lambda_max
DtYBabs = np.abs(DtYB)
lambda_max = np.max(DtYBabs, axis=0)
if samereg:
# We get the usual Lasso lambda_max
lambda_max = np.max(lambda_max)
lamb = lamb_rel*lambda_max
if verbose:
print('lambda max is', lambda_max, ' \n')
# Initialisation of coefficients x
# this does not matter too much since the problem is convex
if X0 is None:
X = np.zeros([d, r])
# Choice of stepsize, use Lipschitz constant (improvable by power iteration or randomization)
if eta is None:
singvalD = np.linalg.svd(DtD)[1][0]
singvalB = np.linalg.svd(BtB)[1][0]
eta = 1/singvalD/singvalB
# Initial error
rec0 = np.linalg.norm(Y - D@X@Bt, 'fro')
e0 = rec0**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0))
# e_old = 0
rec = [rec0]
e = [np.Inf, e0]
# Initial iteration count
iter = 0
# Main print
if verbose:
print('ISTA l11 running\n')
# Main loop with proximal gradient
# pairing variable for Fista
Z = np.copy(X)
beta = 1
while np.abs(e[-1] - e[-2])/e[-1] > tol and iter < itermax:
# printing
if iter % 10 == 1:
if verbose:
print('ISTA iteration ', iter, ' cost ', e[-1], '\n')
#else:
# print('.', end='')
iter += 1
# compute the gradient
X_old = np.copy(X)
X = np.maximum(Z - eta * (DtD@Z@BtB - DtYB + lamb*np.ones(Z.shape)) ,0)
# Extrapolation
beta_old = beta
beta = (1+np.sqrt(1+4*beta**2))/2
Z = X + ((beta_old-1)/beta) * (X-X_old)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0)))
e = e[1:]
# Get k largest entries per columns
# Estimating support (np arrays with supports)
support = np.argsort(np.abs(X), 0)
if k is not None:
# truncating the support
support = support[-k:, :]
if warning:
# check if there are too many zeroes already
for i in range(r):
Xabs = np.abs(X)
if np.min(Xabs[support[:,i],i])==0:
print('Warning: regularization may be too strong')
if verbose:
print('\n')
if return_old:
X_old=np.copy(X)
# Post-processing option for k-sparse columnwise
if k is not None:
# Running nonnegative least squares
X, _ = ls_kn_supp(Y, D, B, support, k, nonnegative=True)
# error computation
rec_new = np.linalg.norm(Y - D@X@Bt, 'fro')
rec.append(rec_new)
e.append(rec_new**2/2 + np.sum(lamb*np.sum(np.abs(X), axis=0)))
if return_old:
return X, e, rec, support, X_old
else:
return X, e, rec, support
def iht_mix(Y, D, B, k , X_in, tol=1e-6, itermax=1000, verbose=False, DtD=None, DtY=None, eta=None):
'''
An adaptation of the (Extrapolated) Iterated Hard Thresholding Algorithm for the mixed sparse coding problem. At each iteration, a Nesterov Fast Gradient step is performed with projection on the set of columnwise k-sparse matrices.
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
k : integer
Sparsity level, must be the number of terms in each subset of S (not checked)
Xin : numpy array
Initial guess for solution X
itermax : integer (default 1000)
maximum number of proximal iterations
tol : float (default 1e-6)
relative error threshold for stopping the algorithm
verbose : boolean (default True)
Set to False to remove prints
eta : float (default None)
the stepsize for ISTA. If None, the Lipschitz constant is computed and used.
DtD, DtY : numpy arrays
pre-computed cross products of the inputs if available.
Returns
----------
X : numpy array
Solution of the mixed sparse coding problem with fixed column sparsity
err : float
Reconstruction error / residuals.
support : numpy array
the support of each column of X
'''
X = np.copy(X_in)
if DtD is None:
DtD = D.T@D
if DtY is None:
DtY = D.T@Y
Bt = B.T
BtB = Bt@B
DtY = D.T@Y
DtYB = DtY@B
if eta is None:
step = 1/np.linalg.svd(DtD)[1][0]/np.linalg.svd(BtB)[1][0] #why 2?
else:
step = eta
if verbose:
print('IHT stepsize is fixed to ', step)
err = np.linalg.norm(Y - D@X@Bt, 'fro')
err = [np.Inf, err]
#Fast version
Z = np.copy(X)
beta = 1
iter = 0
while np.abs(err[-1] - err[-2])/err[-1] > tol and iter < itermax:
# printing
if iter % 10 == 1:
if verbose:
print('IHT iteration ', iter, ' cost ', err[-1], '\n')
#else:
# print('.', end='')
iter += 1
X_old = np.copy(X)
X = HardT(Z - step * (DtD@Z@BtB - DtYB), k)
beta_old = beta
beta = (1+np.sqrt(1+4*beta**2))/2
Z = X + ((beta_old-1)/beta) * (X-X_old)
err.append(np.linalg.norm(Y - D@X@Bt, 'fro')) # suboptimal
#for iter in range(itermax):
#X = ht_op(X - step * (DtD@X@BtB - DtYB), k)
#err.append(np.linalg.norm(Y - D@X@B.T, 'fro')) # suboptimal
err = err[1:]
# Get k largest entries per columns
# Estimating support (np arrays with supports)
support = np.argsort(np.abs(X), 0)
# truncating the support
support = support[-k:, :]
# Post-processing option for k-sparse columnwise
if k is not None:
# Running least squares
X, _ = ls_kn_supp(Y, D, B, support, k)
# error computation
err.append(np.linalg.norm(Y - D@X@Bt, 'fro'))
if verbose:
print('\n')
return X, err, support
def homp(Y, D, B, k, Xin=None, tol=1e-6, itermax=1000):
'''
Hierarchical Orthogonal Matching Pursuit for the mixed sparse coding
problem. Computes k-sparse approximations of column sub-problems until
convergence using OMP (modified) as a routine.
Parameters
----------
Y : numpy array
Input data, required
D : numpy array
Input dictionary, required
B : numpy array
Input mixing matrix, required
k : int
Sparsity level, required
Xin : numpy array (default: random)
Initial X
itermax : integer (default 1000)
maximum number of proximal iterations
tol : float (default 1e-6)
relative error threshold for stopping the algorithm
Returns
----------
X : numpy array
Final estimated X
err : list
Error after each pass.
S : numpy array
Support of the solution
'''
n, m = Y.shape
_, r = B.shape
_, d = D.shape
# todo: checks on sizes
# todo: init X random
X = np.copy(Xin)
# Supports in numpy 1/0 format
S = np.zeros(X.shape)
# Supports in list of list format (col per col)
Slist = [[0 for i in range(k)] for j in range(r)]
err = np.linalg.norm(Y-D@X@B.T, 'fro')
err = [np.Inf, err]
#itermax = 50
it = 0
while it < itermax:
if np.abs(err[-1] - err[-2])/err[-1] < tol:
break
it += 1
for i in range(r):
reject = 0
V = Y - D@X@B.T + D@np.outer(X[:,i],B[:,i])
normb = np.linalg.norm(B[:,i],2) ** 2
Vb = V@B[:,i]/normb
x, s_idx = omp(Vb, D, k)
if np.linalg.norm(Vb - D@x) > np.linalg.norm(Vb - D@X[:,i]):
#print('bad, bad omp !! Rejecting step')
x = np.zeros(d)
z = np.linalg.lstsq(D[:,Slist[i]], Vb, rcond=None)
x[Slist[i]] = z[0]
reject += 1
else:
Slist[i] = s_idx
X[:,i] = x
err.append(np.linalg.norm(Y - D@X@B.T, 'fro'))
if reject == r:
print('why, why homp !!! you were such a good boy. Stopping algorithm to avoid infinity loop.')
break
err = err[1:]
# At the end, do a least squares with the final support
X, err_temp = ls_kn_supp(Y, D, B, Slist, k)
err.append(err_temp)
return X, err, np.transpose(np.array(Slist))
def omp(V, D, k):
'''
Orthogonal Matrix Pursuit modified, in a naive implementation.
Parameters
----------
V : numpy column vector
input data, required
D : numpy array
input dictionary, required
b : numpy column vector
input mixing vector, required
k : integer
sparsity level, required
Returns
----------
x : numpy column vector
estimated sparse coefficients
s : numpy column vector
binary vector with ones at the support position of x
TODO: write for matrix input
'''
_, d = D.shape
x = np.zeros(d)
list_idx = []
res = V
for p in range(k):
temp = D.T@res
j = np.argmax(np.abs(temp))
list_idx.append(j)
z = np.linalg.lstsq(D[:,list_idx], V, rcond=None)
x[list_idx] = z[0]
res = V - D@x
return x, list_idx
def pseudo_trick(Y,D,B,k):
'''
Tries to solve the mixed sparse coding problem by looking at the distorted problem
:math:`min_{\\|X_i\\|_0\\leq k} \\|Y(B^T)^\\dagger - DX \\|_F^2`,
which is solved in parallel, column by column, using the omp algorithm.
As a post-processing step, a least squares fitting is done with the identified support.
Parameters
----------
Y : numpy array
Input data, required
D : numpy array
Input dictionary, required
B : numpy array
Input mixing matrix, required
k : int
Sparsity level, required
Returns
----------
X : numpy array
Final estimated X
err : list
Error after each pass.
S : numpy array
Support of the solution
'''
_, d = D.shape
_, r = B.shape
C = np.linalg.pinv(B.T)
V = Y@C
X_trick = np.zeros([d, r])
Slist = [[0 for i in range(k)] for j in range(r)]
for i in range(r):
z, s_idx = omp(V[:, i], D, k)
Slist[i] = s_idx
X_trick[:, i] = z
# Debiaising
X_trick, err = ls_kn_supp(Y, D, B, Slist, k)
return X_trick, err, np.transpose(np.array(Slist))
def brute_trick(Y,D,B,k):
'''
A brute force version of the pseudo_trick, for checking counter examples. Returns the error in the B domain.
'''
_, d = D.shape
_, r = B.shape
C = np.linalg.pinv(B.T);
V = Y@C
X_trick = np.zeros([d,r])
Slist = [[0 for i in range(k)] for j in range(r)]
for i in range(r):
print(i)
err = 10 ** 16
count=0
listcombi = combinations([i for i in range(d)],k)
for s in listcombi:
count = count+1
if count%100==0:
print(count)
Ds = D[:,s]
z = np.linalg.lstsq(Ds,V[:,i], rcond=None)[0]
errnew = np.linalg.norm(V[:,i]-Ds@z)
if errnew < err:
err = errnew
store_s = s
store_z = z
Slist[i] = list(store_s)
X_trick[store_s,i] = store_z
# Debiaising
#X_trick, err = ls_kn_supp(Y,D,B,Slist,k)
err = np.linalg.norm(Y - D@X_trick<EMAIL>, 'fro')
return X_trick, err, Slist
|
cohenjer/mscode
|
mscode/methods/proxs.py
|
import numpy as np
from scipy.optimize import nnls
from numpy.matlib import repmat as repmat
from mscode.methods.prox_ind_l1_norm import prox_l1inf
def ml1(X):
'''
Computes the induced matrix l1 norm of X
'''
return np.max(np.sum(np.abs(X), axis=0))
def SoftT(x, lamb):
'''
Computes the Soft-Thresholding of input vector x, with coefficient lamb
'''
return np.maximum(np.abs(x) - lamb, 0)*np.sign(x)
def ls_kn_supp(Y, D, B, S, k, nonnegative=False):
'''
Solves the mixed sparse coding problem once the support has been fixed. This is a least-squares problem with a lot of structure, so let's be careful not to waste too much computation time. If the support is large, better not to use this function as it will form a k**2 * r**2 system
We solve a linear system :math:`M^T y = M^T M z` where
:math:`M = [D_{S_1} \\odot b_1, \\ldots, D_{S_i} \\odot b_i, \\ldots]`
which is yields the unique solution to the overcomplete least squares problem
:math:`\\min_z \\| y - Mz \\|_2^2`. We use the structure of M to compute :math:`M^T y` and :math:`M^T M`.
Can also handle nonnegative least squares with scipy nnls active set solver.
Parameters
----------
Y : numpy array
input data, required
D : numpy array
input dictionary (fat), required
B : numpy array
input mixing matrix, required
S : list of list of integers (or numpy array)
Known support for each column of the solution, required
example: [[0,3],[1,3]] means columns one has support S_1 = {0,3} and column 2 has support S_2={1,3}.
k : integer
Sparsity level, must be the number of terms in each subset of S (not checked)
nonnegative : boolean
Default to False. Set to True to impose nonnegativity constraints on the solution, the NNLS solver is active set from scipy.
Returns
-------
X : numpy array
Solution of the mixed sparse coding problem with fixed column sparsity
err : float
Reconstruction error / residuals.
'''
#TODO: check correctness, seen some weird things with error not being minimal.
#TODO: if support is too large, solve with conjugate gradient instead.
m, n = Y.shape
_, d = D.shape
_, r = B.shape
klist = np.array([i for i in range(k)])
if isinstance(S, np.ndarray):
# Conversion to list of list from ndarray numpy
S = list(S.T)
S = [list(i) for i in S]
# Computing Mty
# YBt = [N_1, ..., N_i, ..., N_r]
YB = Y@B # m*r
Mty = np.zeros(k*r)
for i in range(r):
D_S = D[:, S[i]] #size should be k
Mty[i*k + klist] = D_S.T@YB[:,i]
# Computing MtM
MtM = np.zeros([k*r, k*r])
for i in range(r):
for j in range(r):
D_Si = D[:,S[i]] #size should be k
D_Sj = D[:,S[j]] #size should be k
temp = (D_Si.T@D_Sj)*(B[:,i].T@B[:,j]) # size should be k^2
for p in range(k):
MtM[klist + i*k, j*k+p] = temp[:,p]
# Let's solve the small square system
if nonnegative==False:
try:
z = np.linalg.solve(MtM, Mty)
except np.linalg.LinAlgError:
MtM = MtM + 1e-6*np.eye(MtM.shape[0],MtM.shape[1])
z = np.linalg.solve(MtM, Mty)
else:
MtM = MtM + 1e-6*np.eye(MtM.shape[0],MtM.shape[1])
z,_ = nnls(MtM, Mty, 1000)
# Putting back the values in X
X = np.zeros([d,r])
for i in range(r):
X[S[i],i] = z[i*k + klist]
# error computed in the original domain, check if efficient version
err = np.linalg.norm(Y - D@X@B.T, 'fro')
return X, err
def HardT(X, k):
'''
Truncates to the k largest values of X columnwise, arbitrarily so if necessary. This is somewhat the proximal operator of the l0 "norm".
'''
if X.ndim == 1:
m=X.shape[0]
Z = np.copy(X)
idx = np.argsort(np.abs(X), axis = 0)
Z[idx[0:m-k]] = 0
else:
m, n = X.shape
Z = np.copy(X)
idx = np.argsort(np.abs(X), axis = 0)
for p in range(n): # may be vectorized
Z[idx[0:m-k,p],p] = 0
return Z
def ls_cg(Y, DtD, BtB, Xinit, rho, itercg = 50):
'''
Solves linear systems of the form
:math:`(D^TD X B^TB + \\rho X) = Y`
using conjugate gradient. The important point is that the large mixing matrix (DtD otimes BtB + rho I) is never formed explicitely. However the DtD matrix is formed, as in the other methods.
Parameters
----------
Y : numpy array
Input data. It corresponds to DtYB + rho (Z - mu) in ADMM.
DtD : numpy array
dictionary
BBt : numpy array
mixing matrix
Xinit : numpy array
current estimate for the coefficients X
rho : float
parameter of the ADMM
itercg : int (default 50)
maximum number of conjugate gradient iterations
Returns
-------
X : numpy array
estimated least squares solution
out : internal output
'''
X = np.copy(Xinit)
d, r = np.shape(Y)
# getting this next part right is the whole point of the CG here
AX = (DtD@X)@BtB + rho*X
R = Y - AX
tol = 1e-16*np.linalg.norm(Y,'fro')
P = np.copy(R)
stock = []
for i in range(itercg):
if np.dot(R.flatten(), R.flatten()) < tol:
break
# Classical CG (vectorized for understanding but we could do all in matrix format), with clever Ap product.
AP = DtD@P@BtB + rho*P
alpha = np.dot(R.flatten(), R.flatten())/np.dot(P.flatten(), AP.flatten())
X = X + alpha*P
R_old = np.copy(R)
R = R - alpha*AP
beta = np.dot(R.flatten(),R.flatten())/np.dot(R_old.flatten(), R_old.flatten())
P = R + beta * P
stock.append(np.dot(R.flatten(), R.flatten()))
return X, stock
def prox_ml1_fast(X,lamb):
'''
A faster proximal operator algorithm for l1infty norm, exact after a few steps.
Reference:
The fastest l1oo prox in the west, <NAME>, <NAME> and <NAME>, 2019
Note: This is simply a wrapper for their code.
Be careful of the following bug: if X is an integer array, the output will always be 0.
'''
# output variable
X = np.asfortranarray(X)
V = np.asfortranarray(np.zeros(X.shape))
# Catch exceptions or bugs
if lamb==0:
V = X
else:
prox_l1inf(X,V,lamb)
return V
def prox_ml1(X, lamb, tol=1e-10):
'''
Computes the proximal operator of the matrix induced l1 norm.
Parameters
----------
X : numpy array
input of the proximal operator
lamb : float
regularization parameter
tol : float
small tolerance on the value of the maximal columnwise 1 norm
Returns
-------
U : numpy array
the proximal operator applied to X
t : float
maximum l1 norm of the columns of U
nu_t : list of floats
optimal dual parameters
Credits to <NAME>
Reference: "Computing the proximal operator of the l1 induced matrix norm, J.E.Cohen, arxiv:2005.06804v2".
'''
# Lambda cannot be Zero
if lamb == 0:
out = np.copy(X)
return out, np.max(np.sum(np.abs(X), axis=0)), 0
# Remove single column case
if X.ndim==1:
out = SoftT(B, lamb)
return out, np.sum(np.abs(out)), 1
#%% Precomputations
# Constants
n, m = np.shape(X)
ps = np.linspace(1,n,n)
Xsort = np.sort(np.abs(X), axis=0)
Xsort = np.flip(Xsort, axis=0)
Xcumsum=np.cumsum(Xsort, axis=0)
# Maximal value of t (prox is identity)
Xsum = np.sum(Xsort, axis=0) # Xsort for abs value
tmax = np.max(Xsum)
tmin = 0
t = tmax/2 # initial value in the middle of the admissible interval
# Find the order of visited columns in X
sorted_sums = np.flip(np.sort(Xsum))
order = np.flip(np.argsort(Xsum))
# Deal with the lamb>lamb_max case in advance to avoid bug
if lamb>=np.sum(Xsort[0,:]):
U = np.zeros([n,m])
t = 0
nu_t = Xsort[0,:]/lamb
return U, t, nu_t
#%% Starting bisection
while tmax-tmin > tol:
# Compute the current active set and its size
I = order[sorted_sums>t]
l = len(I)
# Process commulative sums
Xcumsum_t = (Xcumsum[:, I]-t)/lamb
# Compute the candidate nu values
Ps = np.transpose(repmat(ps, l, 1)) # matrix of sequences from 1 to n in with i columns
nu = Xcumsum_t/Ps
nu_t = np.zeros(m)
N = Xsort[:, I] - lamb*nu
# temp is reverse sorted
N = np.flip(N, axis=0) # temp is sorted
for j in range(l):
# Find the largest index for which the condition described in the paper is satisfied
# i.e. xsort(p) - \lambda\nu(p) >= 0
idx = np.searchsorted(N[:,j], 0, side='left')
idx = len(N[:,j]) - idx - 1 # counter effect flip
nu_t[I[j]] = nu[idx, j]
# end j loop
# Checking dual condition 1< or 1> to move t
if np.sum(nu_t) < 1:
# t must be decreased
tmax = t
t = (t + tmin)/2
else:
# t must be increased
tmin = t
t = (t + tmax)/2
# Final step, thresholding vectors that need to be
U = SoftT(X, lamb*nu_t)
return U, t, nu_t
|
cohenjer/mscode
|
mscode/utils/generator.py
|
<reponame>cohenjer/mscode<filename>mscode/utils/generator.py
import numpy as np
import tensorly as tl
def gen_mix(dims, k, snr=20, distr='Gaussian', cond=1, cond_rand=False, decrease_coeff = 0.7):
'''
Generates simulated dataset for experiments according to the mixed sparse coding model.
Parameters
----------
dims : list of length 4
[m, n, d, r]
k : integer
sparsity constraint
snr : integer
signal to noise ratio, controls noise level
distr : string
Default is 'Gaussian', but 'Uniform' also works. 'Decreasing' is Gaussian D,B and Uniform X with artificially decreasing weights for X.
cond : float
Controls the conditionning of the B matrix
cond_rand : boolean
If True, the singular values of a random gaussian matrix are scaled by cond. If False, the singular values are linearly spaced and conditionning is indeed cond.
decrease_coeff : float (default 0.7)
In the 'Decreasing setup', multiplicative factor for decrease
Returns
-------
Y : nd numpy array
noised data
Ytrue : nd numpy array
noiseless data
D : nd numpy array
dictionary normalized columnswise in l2 norm
B : nd numpy array
mixing matrix
X : nd numpy array
true unknown sparse coefficients
S : nd numpy array
support of X
sig : float
noise variance used in practice
condB : float
the true conditionning of B
'''
m, n, d, r = dims
if distr == ('Gaussian' or 'Decreasing'):
D = np.random.randn(n, d)
B = np.random.randn(m, r)
elif distr == 'Uniform':
D = np.random.rand(n, d)
B = np.random.rand(m, r)
else:
print('Distribution not supported')
for i in range(d):
D[:, i] = D[:, i]/np.linalg.norm(D[:, i])
# add poor B conditionning
if cond:
u, s, v = np.linalg.svd(B)
if cond_rand:
a = (s[0] - s[-1]/100)/(s[0]-s[-1])
b = s[0] - a*s[0]
s = a*s + b # conditionning is exactly 100, sv are linearly spaced
else:
s = np.linspace(1, 1/cond, r)
B = u[:, :r]@np.diag(s)@v.T
else:
s = [1,1]
# X k-sparse columnwise generation
X = np.zeros([d, r])
S = []
for i in range(r):
pos = np.random.permutation(d)[0:k]
if distr == 'Uniform':
X[pos, i] = np.random.rand(k)
elif distr == 'Gaussian':
X[pos, i] = np.random.randn(k)
elif distr == 'Decreasing':
for l, npos in enumerate(pos):
X[npos,i] = np.random.choice((-1,1))*np.random.rand(1)*(decrease_coeff ** l)
else:
print('Distribution not supported')
S.append(pos)
# Formatting to np array
S = np.transpose(np.array(S))
# Noise and SNR
Ytrue = D@X@B.T
noise = np.random.rand(n, m)
spower = np.linalg.norm(Ytrue, 'fro')**2
npower = np.linalg.norm(noise, 'fro')**2
old_snr = np.log10(spower/npower)
sig = 10**((old_snr - snr/10)/2)
noise = sig*noise # scaling to get SNR right
Y = Ytrue + noise
return Y, Ytrue, D, B, X, S, sig, s[0]/s[-1]
def gen_mix_tensor(dims, dims_D, k, snr=20, distr='Gaussian', cond=1, cond_rand=False, decrease_coeff = 0.7):
'''
Generates simulated dataset for experiments according to the mixed sparse coding model.
Parameters
----------
dims : list
[m, n, l]
#todo: implement for arbriratry order
dims_D : list with the number of atoms and rank
[d, r]
k : integer
sparsity constraint
snr : integer
signal to noise ratio, controls noise level
distr : string
Default is 'Gaussian', but 'Uniform' also works. 'Decreasing' is Gaussian D,B and Uniform X with artificially decreasing weights for X.
cond : float
Controls the conditionning of the B matrix
cond_rand : boolean
If True, the singular values of a random gaussian matrix are scaled by cond. If False, the singular values are linearly spaced and conditionning is indeed cond.
decrease_coeff : float (default 0.7)
In the 'Decreasing setup', multiplicative factor for decrease
Returns
-------
Y : nd numpy array
noised data
Ytrue : nd numpy array
noiseless data
D : nd numpy array
dictionary normalized columnswise in l2 norm
B : nd numpy array
mixing matrix
X : nd numpy array
true unknown sparse coefficients
S : nd numpy array
support of X
sig : float
noise variance used in practice
condB : float
the true conditionning of B
'''
n,m,l = dims
d, r = dims_D
if distr == ('Gaussian' or 'Decreasing'):
D = np.random.randn(n, d)
B = np.random.randn(m, r)
C = np.random.randn(l, r)
elif distr == 'Uniform':
D = np.random.rand(n, d)
B = np.random.rand(m, r)
C = np.random.rand(l, r)
else:
print('Distribution not supported')
for i in range(d):
D[:, i] = D[:, i]/np.linalg.norm(D[:, i])
# add poor B conditionning
u, s, v = np.linalg.svd(B)
if cond_rand:
a = (s[0] - s[-1]/100)/(s[0]-s[-1])
b = s[0] - a*s[0]
s = a*s + b # conditionning is exactly 100, sv are linearly spaced
else:
s = np.linspace(1, 1/cond, r)
B = u[:, :r]@np.diag(s)@v.T
# X k-sparse columnwise generation
X = np.zeros([d, r])
S = []
for i in range(r):
pos = np.random.permutation(d)[0:k]
if distr == 'Uniform':
X[pos, i] = np.random.rand(k)
elif distr == 'Gaussian':
X[pos, i] = np.random.randn(k)
elif distr == 'Decreasing':
for l, npos in enumerate(pos):
X[npos,i] = np.random.choice((-1,1))*np.random.rand(1)*(decrease_coeff ** l)
else:
print('Distribution not supported')
S.append(pos)
# Formatting to np array
S = np.transpose(np.array(S))
# Noise and SNR
#Ytrue = D@X@B.T
Ytrue = tl.cp_tensor.cp_to_tensor((None,[D@X,B,C]))
noise = np.random.rand(n, m, l)
spower = tl.norm(Ytrue, 2)**2
npower = tl.norm(noise, 2)**2
old_snr = np.log10(spower/npower)
sig = 10**((old_snr - snr/10)/2)
noise = sig*noise # scaling to get SNR right
Y = Ytrue + noise
return Y, Ytrue, D, B, C, X, S, sig, s[0]/s[-1]
def initialize(dims, distr='Gaussian'):
'''
Provides an initial guess for X in the mixed sparse coding problem given dimensions are an elementwise standard distribution.
Parameters
----------
dims : list
[d,r], where d is the dictionary size, and r the mixing size
distr : string
"Gaussian", "Uniform", "Zeros"
Returns
-------
X : numpy array
Initial guess for Mixed Sparse Coding
'''
if distr == 'Gaussian':
X = np.random.randn(dims[0], dims[1])
elif distr == 'Uniform':
X = np.random.rand(dims[0], dims[1])
elif distr == 'Zeros':
X = np.zeros(dims)
return X
|
cohenjer/mscode
|
mscode/xp/initialization_sensitivity.py
|
# For all functions, boxplot with normal and uniform inits, only error. Keep problem constant, Fista should not change results.
import numpy as np
from matplotlib import pyplot as plt
#from itertools import combinations, product
#import shelve
from mscode.utils.utils import count_support
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix
from mscode.utils.generator import gen_mix, initialize
import pandas as pd
import plotly.express as px
# Problem parameters
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
tol = 1e-6
lamb_m = 0.001
lamb= 0.005
distr='Uniform' #always uniform here
init_distr='Gaussian'
# Store results in Pandas DataFrame
store_pd = pd.DataFrame(columns=["value", "algorithm", "init type", "data number"])
# Generate a few data matrices, and for each run 10 init
Nbdata = 10
Nbinit = 10
for i in range(Nbdata):
print(i)
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# Zero initialization test
X0z = initialize([d,r], distr= 'Zeros')
# IHT
_, err_iht, S_iht = iht_mix(Y, D, B, k, X0z, tol=tol)
# Comparison with trick (independent of init)
_, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Running HOMP
_, err_homp, S_homp = homp(Y, D, B, k, X0z, tol=tol)
# Running ISTA_mix (init 0)
_, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0z, verbose=False, tol=tol)
# Test Ista
_, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0z, verbose=False, tol=tol)
dic = {
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)],
'algorithm': ['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"init type": 5*['zero init'],
"data number": 5*[i]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
# Initialise Loop
for j in range(Nbinit):
if init_distr=='Gaussian':
X0 = initialize([d,r], distr = 'Gaussian')
else:
X0 = initialize([d,r], distr = 'Uniform')
# Running all algorithms
# IHT
_, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
# Running HOMP
_, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
# Running ISTA_mix (init 0)
_, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
# Test Ista
_, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
dic = {
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht)],
'algorithm': ['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT'],
"init type": 4*['random init'],
"data number": 4*[i]
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
fig = px.box(store_pd, x='data number', y='value', facet_col='algorithm', color='init type', title="Robustess to random initializations for 10 problem instances", labels={'value':'Support recovery', 'data number': 'problem index'}, notched=True)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1500,
height=500,
#paper_bgcolor="white",#'rgb(233,233,233)',
#plot_bgcolor="white",#'rgb(233,233,233)',
)
fig.show()
# Note: ISta algorithms are convex and should not depend too much on the initialization (they still do, maybe run longer?).
# Uncomment for storing outputs
# path designed for running from mscode root (where setup is located)
#year = 2021
#month = 10
#day = 22
#path = '../..'
#stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP4/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XP4/{}_plot.pdf'.format(path,stor_name))
|
4n6ir/remedy-lambda-public-ip-visibility
|
bundle/get-public-ip/extension.py
|
#!/usr/bin/env python3
import json
import os
import requests
import signal
import sys
from pathlib import Path
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
LAMBDA_EXTENSION_NAME = Path(__file__).parent.name
def execute_custom_processing(event):
retry_strategy = Retry(
total = 3,
status_forcelist = [429, 500, 502, 503, 504],
backoff_factor = 1
)
adapter = HTTPAdapter(
max_retries = retry_strategy
)
http = requests.Session()
http.mount("https://", adapter)
r = http.get('https://checkip.amazonaws.com')
print(f"[{LAMBDA_EXTENSION_NAME}] {r.text}", flush=True)
def handle_signal(signal, frame):
sys.exit(0)
def register_extension():
headers = {
'Lambda-Extension-Name': LAMBDA_EXTENSION_NAME,
}
payload = {
'events': [
'INVOKE',
'SHUTDOWN'
],
}
response = requests.post(
url=f"http://{os.environ['AWS_LAMBDA_RUNTIME_API']}/2020-01-01/extension/register",
json=payload,
headers=headers
)
ext_id = response.headers['Lambda-Extension-Identifier']
return ext_id
def process_events(ext_id):
headers = {
'Lambda-Extension-Identifier': ext_id
}
while True:
response = requests.get(
url=f"http://{os.environ['AWS_LAMBDA_RUNTIME_API']}/2020-01-01/extension/event/next",
headers=headers,
timeout=None
)
event = json.loads(response.text)
if event['eventType'] == 'SHUTDOWN':
sys.exit(0)
else:
execute_custom_processing(event)
def main():
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
extension_id = register_extension()
process_events(extension_id)
if __name__ == "__main__":
main()
|
4n6ir/remedy-lambda-public-ip-visibility
|
remedy_lambda_public_ip_visibility/remedy_lambda_public_ip_visibility_stack.py
|
from aws_cdk import (
RemovalPolicy,
Stack,
aws_lambda as _lambda,
)
from constructs import Construct
class RemedyLambdaPublicIpVisibilityStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
layer = _lambda.LayerVersion(
self, 'layer',
code = _lambda.Code.from_asset('bundle/extension.zip'),
compatible_architectures = [
_lambda.Architecture.ARM_64,
_lambda.Architecture.X86_64
],
compatible_runtimes = [
_lambda.Runtime.PYTHON_2_7,
_lambda.Runtime.PYTHON_3_6,
_lambda.Runtime.PYTHON_3_7,
_lambda.Runtime.PYTHON_3_8,
_lambda.Runtime.PYTHON_3_9
],
description = 'AWS Lambda Extension captures the Public IP into Cloud Watch logs at execution using Requests v2.27.1 Python library.',
layer_version_name = 'get-public-ip',
license = 'Apache-2.0 License',
removal_policy = RemovalPolicy.DESTROY
)
|
tanukihee/Pearson3CurveFitting
|
HessianProbabilityGrid.py
|
# Pearson-III curve plotting and fitting,
# used for hydrological analysis and hydraulic calculations.
# v6.1
# Copyright (c) 2020 -- 2021 ListLee
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import probscale
import scipy.stats as stats
from scipy.optimize import curve_fit
from scipy.stats import pearson3
USE_TEX = True
if USE_TEX:
matplotlib.use("pgf")
plt.rcParams["figure.constrained_layout.use"] = True
plt.rcParams["pgf.rcfonts"] = False
plt.rcParams["pgf.preamble"] += r"""
\usepackage{xeCJK}
\usepackage{amsmath}
\usepackage{siunitx}
\sisetup{detect-all}
\usepackage{unicode-math}
\setsansfont{FiraGO}
\setmathfont{Fira Math}
\setCJKsansfont{Source Han Sans SC}
"""
else:
plt.rcParams["font.sans-serif"] = "SimHei"
class Data:
"""
# 水文数据类
## 构造函数参数
+ `arr`:实测水文数据
"""
def __init__(self, arr):
self.arr = np.sort(arr)[::-1]
# 降序排序输入数组
self.n = len(arr)
# 实测期长度
self.extreme_num = 0
# 特大洪水数
def history(self, arr, length, num=0):
"""
# 历史洪水数据
## 输入参数
+ `arr` 历史特大洪水序列,均为特大洪水
+ `length` 调查期长度
+ `num` 特大洪水数,包括历史特大洪水与实测特大洪水,默认为历史特大洪水数
"""
self.historia = np.sort(arr)[::-1]
# 历史洪水序列
self.length = length
# 调查期长度
self.extreme_num = max(len(self.historia), num)
# 特大洪水数
self.extreme_num_in_measure = self.extreme_num - len(arr)
# 实测期特大洪水数
# 特大洪水序列与一般洪水序列
self.extreme = self.historia
self.ordinary = self.arr
if self.extreme_num_in_measure > 0:
for i in range(self.extreme_num_in_measure):
self.extreme = np.append(self.extreme, self.arr[i])
self.ordinary = np.delete(self.arr,
range(self.extreme_num_in_measure))
self.arr = np.sort(np.append(self.extreme, self.ordinary))[::-1]
def figure(self, grid=True, logVert=False):
"""
# 绘制图形
## 输入参数
+ `gird`:是否显示背景网格,默认为 `True`
+ `logVert`:纵坐标是否为对数坐标,默认为 `False`
"""
self.fig, self.ax = plt.subplots(figsize=(7, 5))
# 创建「画板」与「画布」
self.ax.set_xscale("prob")
# 横坐标改为概率坐标
self.ax.set_xlabel(r"频率 $P$(%)")
self.ax.set_ylabel(r"流量 $Q$(\si{m\cubed /s})")
self.ax.grid(grid)
# 背景网格
if logVert:
self.ax.set_yscale("log")
def empi_scatter(self, empi_prob=None):
"""
# 点绘经验概率点
"""
# 数学期望公式计算经验概率
if empi_prob is None:
if self.extreme_num == 0:
self.empi_prob = (np.arange(self.n) + 1) / (self.n + 1) * 100
else:
self.extreme_prob = (np.arange(self.extreme_num) +
1) / (self.length + 1) * 100
self.ordinary_prob = self.extreme_prob[-1] + (
100 - self.extreme_prob[-1]) * (
np.arange(self.n - self.extreme_num_in_measure) +
1) / (self.n - self.extreme_num_in_measure + 1)
self.empi_prob = np.append(self.extreme_prob,
self.ordinary_prob)
else:
self.empi_prob = empi_prob
# 画布坐标轴设置
prob_lim = lambda prob: 1 if prob > 1 else 10**(np.ceil(
np.log10(prob) - 1))
self.prob_lim_left = prob_lim(self.empi_prob[0])
self.prob_lim_right = 100 - prob_lim(100 - self.empi_prob[-1])
self.ax.set_xlim(self.prob_lim_left, self.prob_lim_right)
# 点绘经验概率
if self.extreme_num:
self.ax.scatter(self.ordinary_prob,
self.ordinary,
marker="o",
c="none",
edgecolors="k",
label="一般洪水经验概率点")
self.ax.scatter(self.extreme_prob,
self.extreme,
marker="x",
c="k",
label="特大洪水经验概率点")
else:
self.ax.scatter(self.empi_prob,
self.arr,
marker="o",
c="none",
edgecolors="k",
label="经验概率点")
def stat_params(self, output=True):
"""
# 输出数据的统计参数
## 输入参数
+ `output`:是否在控制台输出参数,默认为 True
"""
if self.extreme_num == 0:
self.expectation = np.mean(self.arr)
# 期望
self.modulus_ratio = self.arr / self.expectation
# 模比系数
self.coeff_of_var = np.sqrt(
np.sum((self.modulus_ratio - 1)**2) / (self.n - 1))
# 变差系数
else:
self.expectation = (np.sum(self.extreme) +
(self.length - self.extreme_num) /
(self.n - self.extreme_num_in_measure) *
np.sum(self.ordinary)) / self.length
self.coeff_of_var = (np.sqrt(
(np.sum((self.extreme - self.expectation)**2) +
(self.length - self.extreme_num) /
(self.n - self.extreme_num_in_measure) * np.sum(
(self.ordinary - self.expectation)**2)) /
(self.length - 1))) / self.expectation
self.coeff_of_skew = stats.skew(self.arr, bias=False)
# 偏态系数
if output:
print("期望 EX 为 %.2f" % self.expectation)
print("变差系数 Cv 为 %.4f" % self.coeff_of_var)
print("偏态系数 Cs 为 %.4f" % self.coeff_of_skew)
def moment_plot(self):
"""
# 绘制矩法估计参数理论概率曲线
"""
x = np.linspace(self.prob_lim_left, self.prob_lim_right, 1000)
theo_y = (pearson3.ppf(1 - x / 100, self.coeff_of_skew) *
self.coeff_of_var + 1) * self.expectation
self.ax.plot(x, theo_y, "--", lw=1, label="矩法估计参数概率曲线")
# 绘制理论曲线
def plot_fitting(self, sv_ratio=0, ex_fitting=True, output=True):
"""
# 优化适线
## 输入参数
+ `sv_ratio`:倍比系数,即偏态系数 `Cs` 与 变差系数 `Cv` 之比。
默认为 0,即关闭倍比系数功能。
- 当 `sv_ratio` ≠ 0 时,Cs 不参与适线运算中,且 `Cs` = `sv_ratio` × `Cv`;
- 当 `sv_ratio` = 0 时,Cs 正常参与适线运算。
+ `ex_fitting`:适线时是否调整 EX,默认为 True
+ `output`:是否在控制台输出参数,默认为 True
"""
if sv_ratio == 0:
if ex_fitting:
p3 = lambda prob, ex, cv, cs: (pearson3.ppf(
1 - prob / 100, cs) * cv + 1) * ex
[self.fit_EX, self.fit_CV, self.fit_CS], pcov = curve_fit(
p3, self.empi_prob, self.arr,
[self.expectation, self.coeff_of_var, self.coeff_of_skew])
else:
p3 = lambda prob, cv, cs: (pearson3.ppf(1 - prob / 100, cs) *
cv + 1) * self.expectation
[self.fit_CV, self.fit_CS
], pcov = curve_fit(p3, self.empi_prob, self.arr,
[self.coeff_of_var, self.coeff_of_skew])
self.fit_EX = self.expectation
else:
if ex_fitting:
p3 = lambda prob, ex, cv: (pearson3.ppf(
1 - prob / 100, cv * sv_ratio) * cv + 1) * ex
[self.fit_EX, self.fit_CV
], pcov = curve_fit(p3, self.empi_prob, self.arr,
[self.expectation, self.coeff_of_var])
else:
p3 = lambda prob, cv: (pearson3.ppf(
1 - prob / 100, cv * sv_ratio) * cv + 1) * self.expectation
[self.fit_CV], pcov = curve_fit(p3, self.empi_prob, self.arr,
[self.coeff_of_var])
self.fit_EX = self.expectation
self.fit_CS = self.fit_CV * sv_ratio
if output:
print("适线后")
print("期望 EX 为 %.2f" % self.fit_EX)
print("变差系数 Cv 为 %.4f" % self.fit_CV)
print("偏态系数 Cs 为 %.4f" % self.fit_CS)
def fitted_plot(self):
"""
# 绘制适线后的概率曲线
"""
x = np.linspace(self.prob_lim_left, self.prob_lim_right, 1000)
theoY = (pearson3.ppf(1 - x / 100, self.fit_CS) * self.fit_CV +
1) * self.fit_EX
self.ax.plot(x, theoY, lw=2, label="适线后概率曲线")
# 绘制理论曲线
def prob_to_value(self, prob):
"""
# 由设计频率转换设计值
## 输入参数
+ `prob`:设计频率,单位百分数
## 输出参数
+ `value`:设计值
"""
value = (pearson3.ppf(1 - prob / 100, self.fit_CS) * self.fit_CV +
1) * self.fit_EX
print("%.4f%% 的设计频率对应的设计值为 %.2f" % (prob, value))
return value
def value_to_prob(self, value):
"""
# 由设计值转换设计参数
## 输入参数
+ `value`:设计值
## 输出参数
+ `prob`:设计频率,单位百分数
"""
prob = 100 - pearson3.cdf(
(value / self.fit_EX - 1) / self.fit_CV, self.fit_CS) * 100
print("%.2f 的设计值对应的设计频率为 %.4f%%" % (value, prob))
return prob
def successive():
data = Data(
np.array([
680.6, 468.4, 489.2, 450.6, 436.8, 586.2, 567.9, 473.9, 357.8,
650.9, 391, 201.2, 452.4, 750.9, 585.2, 304.5, 370.5, 351, 294.8,
360.9, 276, 549.1, 534, 349, 350, 372, 292, 485, 427, 620.8, 539,
474, 292, 228, 357, 425, 365, 241, 267, 305, 306, 238.9, 277.3,
170.8, 217.9, 208.5, 187.9
]))
# 本例取自《工程水文学》(2010 年第 4 版,詹道江 徐向阳 陈元芳 主编)P150~151 表 6-3
data.figure()
data.empi_scatter()
data.stat_params()
data.moment_plot()
data.plot_fitting()
data.fitted_plot()
data.ax.legend()
data.fig.savefig("successive.pdf", transparent=True)
def nonsuccessive():
data = Data(
np.array([
1800, 530, 590, 1460, 2440, 490, 1060, 1790, 1480, 2770, 1420, 410,
7100, 2200, 3400, 1300, 3080, 946, 430, 857, 421, 4500, 2800, 846,
1400, 1100, 740, 3600, 1470, 690
]))
data.history(np.array([9200]), 100, 2)
# 本例取自《工程水文学》(1992 年第 2 版,王燕生 主编)P203 例 10-2
data.figure()
data.empi_scatter()
data.stat_params()
data.moment_plot()
data.plot_fitting()
data.fitted_plot()
data.ax.legend()
data.fig.savefig("nonsuccessive.pdf", transparent=True)
if __name__ == "__main__":
successive()
nonsuccessive()
|
Litou-lyh/pytorch_resnet_cifar10
|
scheduler.py
|
import torch
import torch.optim.lr_scheduler as lr_scheduler
class MultiStageScheduler():
def __init__(self,optimizer,args=None) -> None:
self.optimizer = optimizer
self.policy = args.policy if args else None
self.schedulers = []
self.steps = 0
self.stage = 0
self.milestones = args.milestones if args and args.policy[0] != 'step' else None
self.decay = 0.1
for i, p in enumerate(self.policy):
if p == 'cyclic':
self.schedulers.append(lr_scheduler.CyclicLR(self.optimizer, base_lr=args.lr * pow(self.decay,i), max_lr=args.max_lr * pow(self.decay,i),step_size_up=5000 * pow(self.decay * 5,i)))
elif p == 'step':
self.schedulers.append(lr_scheduler.MultiStepLR(self.optimizer, milestones=args.milestones))
elif p == 'rop':
self.schedulers.append(lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max'))
elif p == 'sgdr':
self.schedulers.append(lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer,T_0=10, T_mult=2))
elif p == 'constant':
self.schedulers.append(None)
elif p == 'cos':
self.schedulers.append(lr_scheduler.CosineAnnealingLR(self.optimizer, args.epochs))
self.current_scheduler = self.schedulers[0] if self.schedulers else None
def check_switch(self):
if self.stage >= len(self.milestones):
return
if self.steps > self.milestones[self.stage]:
self.stage += 1
if self.stage >= len(self.schedulers):
raise Exception('error')
self.current_scheduler = self.schedulers[self.stage]
def step(self, arg=None):
if self.milestones:
self.check_switch()
if not self.current_scheduler: return
if arg:
self.current_scheduler.step(arg)
else:
self.current_scheduler.step()
self.steps += 1
|
Litou-lyh/pytorch_resnet_cifar10
|
trainer.py
|
<filename>trainer.py
import argparse
import os
from scheduler import MultiStageScheduler
import shutil
import time
from typing_extensions import get_args
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import resnet
model_names = sorted(name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet")
and callable(resnet.__dict__[name]))
print(model_names)
parser = argparse.ArgumentParser(description='Propert ResNets for CIFAR10 in pytorch')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet56',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--base_lr', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--save-every', dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int, default=10)
parser.add_argument('--gpu', '--device', dest='gpu',
help='gpu_id',
type=str, default='cuda:0')
parser.add_argument('--policy','--p', nargs='*', dest='policy', help=
'lr scheduler', type=str, default=['step'])
parser.add_argument('--milestones', nargs='*', help=
'milestones', type=int, default=None)
parser.add_argument('--max_lr', default=3.0, type=float,
help='max learning rate')
parser.add_argument('--exp', dest='exp',type=str, help='experiments: super / lr_range', default='super')
parser.add_argument('--seed', dest='seed',type=int,default=None)
parser.add_argument('--max_iters', dest='max_iters',type=int,default=float('inf'))
best_prec1 = 0
iters = 0
def main():
global args, best_prec1,iters
args = parser.parse_args()
print('policy:',args.policy)
if args.seed:
torch.backends.cudnn.deterministic = True
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# np.random.seed(args.seed)
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch](),device_ids=[args.gpu])
model.cuda(args.gpu)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,nesterov=False)
max_iter = args.max_iters#10000
scheduler = None
second_scheduler = None
switch_iter = float('inf')
# if args.p1 == 'cyclic':
# max_iter = 80000
# if args.p2:
# max_iter = 15000
# switch_iter = 10000
# sche_list = [scheduler, second_scheduler]
# policy_list = [args.p1, args.p2]
# for idx, p in enumerate(policy_list):
# if p == 'cyclic':
# sche_list[idx] = torch.optim.lr_scheduler.CyclicLR(optimizer, 0.1, 0.3, step_size_up=2000, step_size_down=None,
# mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True,
# base_momentum=0.8, max_momentum=0.9, last_epoch=-1)
# elif p == 'rop':
# sche_list[idx] = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=10, threshold=0.0001,
# threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08, verbose=False)
# elif p == 'step': # small problem, default p2 != step
# sche_list[idx] = torch.optim.lr_scheduler.MultiStepLR(optimizer,
# milestones=[100,150], last_epoch=args.start_epoch - 1)
# else:
# sche_list[idx] = None
scheduler = MultiStageScheduler(optimizer, args)
# if args.arch in ['resnet1202', 'resnet110']:
# for resnet1202 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
# for param_group in optimizer.param_groups:
# param_group['lr'] = args.lr*0.1
# current_sche = sche_list[0]
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if iters >= max_iter:
break
# if iters == switch_iter:
# current_sche = sche_list[1]
print(f"lr: {optimizer.state_dict()['param_groups'][0]['lr']}")
# train for one epoch
if scheduler and type(scheduler.current_scheduler) == torch.optim.lr_scheduler.CyclicLR:
train(train_loader, model, criterion, optimizer, epoch, scheduler=scheduler)
else:
train(train_loader, model, criterion, optimizer, epoch, scheduler=None)
# evaluate on validation set
prec1 = 0
if args.exp == 'super' or epoch == 99: # only for lr range test
prec1 = validate(val_loader, model, criterion)
else:
next
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
'''Change LR policy here'''
if scheduler:
if type(scheduler.current_scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(prec1)
elif type(scheduler.current_scheduler) == torch.optim.lr_scheduler.MultiStepLR or type(scheduler.current_scheduler) == torch.optim.lr_scheduler.CosineAnnealingWarmRestarts or type(scheduler.current_scheduler) == torch.optim.lr_scheduler.CosineAnnealingLR:
scheduler.step()
if epoch > 0 and epoch % args.save_every == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'checkpoint.th'))
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.th'))
#print(f"lr: {optimizer.state_dict()['param_groups'][0]['lr']}")
def train(train_loader, model, criterion, optimizer, epoch, scheduler=None, switch_iter=float('inf')):
global iters
"""
Run one train epoch
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(args.gpu)
input_var = input.cuda(args.gpu)
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# for CLR policy
if scheduler:
scheduler.step()
iters += 1
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
def validate(val_loader, model, criterion):
global acc_list
"""
Run evaluation
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.cuda(args.gpu)
input_var = input.cuda(args.gpu)
target_var = target.cuda(args.gpu)
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
# acc_list.append(prec1.item())
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'
.format(top1=top1))
acc_list.append(top1.avg)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
print(str(None))
acc_list = []
main()
plt.plot([i for i in range(len(acc_list))], acc_list, '.-')
plt.xlabel('epoch')
plt.ylabel('test acc')
plt.show()
with open('test_acc_plot.txt','a') as f:
f.write(f'policy: {args.policy}, lr: {args.lr}, bs: {args.batch_size}, e: {args.epochs}, seed: {args.seed} ')
f.write('[')
for j in range(len(acc_list)):
f.write(str(acc_list[j]) + ', ')
f.write(']\n')
# plt.plot([i for i in range(len(acc_list))], acc_list, '.-')
# plt.xlabel('epoch')
# plt.ylabel('test acc')
# plt.show()
|
taldatech/tf-bgd
|
bgd_model.py
|
<filename>bgd_model.py
'''
Bayesian Gradient Descent
Implementation of the BGD algorithm:
The basic assumption is that in each step, the previous posterior distribution is used as the new prior distribution and that the parametric distribution is approximately a Diagonal Gaussian,
that is, all the parameters of the weight vector `theta` are independent.
We define the following:
* `epsilon_i` - a Random Variable (RV) sampled from N(0,1)
* `theta` - the weights which we wish to find their posterior distribution
* `phi` = (mu,sigma) - the parameters which serve as a condition for the distribution of `theta`
* `mu` - the mean of the weights' distribution, initially sampled from `N(0,2/{n_input + n_output}})`
* `sigma` - the STD (Variance's root) of the weights' distribution, initially set to a small constant.
* `K` - the number of sub-networks
* `eta` - hyper-parameter to compenstate for the accumulated error (tunable).
* `L(theta)` - Loss function
* See Jupter Notebook for more details and derivations
'''
import tensorflow as tf
import numpy as np
from datetime import datetime
class BgdModel():
def __init__(self, config, mode):
'''
mode: train or predict
config: dictionary consisting of network's parameters
config uses tf's flags
'''
assert mode.lower() in ['train', 'predict']
self.config = config
self.mode = mode.lower()
self.num_sub_networks = config['num_sub_networks'] # K
self.num_layers = config['num_layers']
self.n_inputs = config['n_inputs']
self.n_outputs = config['n_outputs']
self.hidden_units = config['hidden_units']
self.sigma_0 = config['sigma_0']
self.eta = config['eta']
self.batch_size = config['batch_size']
# Learning Rate Scheduling:
self.decay_steps = config['decay_steps']
self.decay_rate = config['decay_rate']
self.dtype = tf.float16 if config['use_fp16'] else tf.float32 # for faster learning
self.build_model()
def build_model(self):
'''
Builds the BNN model.
'''
print("building model..")
self.graph = tf.Graph()
with self.graph.as_default():
self.init_placeholders()
self.build_variables()
self.build_dnn()
self.build_losses()
self.build_grads()
self.build_eval()
self.build_predictions()
# Merge all the training summaries
self.summary_op = tf.summary.merge_all()
def init_placeholders(self):
'''
Initialize the place holders to ineract with the outside world.
'''
print("initializing placeholders...")
# inputs: [batch_size, data]
self.inputs = tf.placeholder(tf.float32, shape=(None,self.n_inputs), name="inputs")
# outputs: [batch_size, data]
self.targets = tf.placeholder(tf.float32, shape=(None,self.n_outputs), name="outputs")
def build_variables(self):
'''
Builds the variables used in the network, trainable and random-variables.
'''
print("building variables...")
with tf.name_scope("variables"):
self.global_step = tf.Variable(0, trainable=False, name="global_step", dtype=tf.float32)
self.global_step_op = \
tf.assign(self.global_step, self.global_step + 1)
self.global_epoch_step = tf.Variable(0, trainable=False, name='global_epoch_step')
self.global_epoch_step_op = \
tf.assign(self.global_epoch_step, self.global_epoch_step + 1)
# learning rate:
self.eta_rate = tf.train.exponential_decay(np.float32(self.eta), self.global_step,
self.decay_steps, self.decay_rate)
self.mu_s = self.build_mu_s()
self.sigma_s = self.build_sigma_s()
self.epsilons_s = self.build_epsilons_s()
self.theta_s = self.build_theta_s()
self.num_weights = (self.n_inputs + 1) * self.hidden_units + \
(self.hidden_units + 1) * (self.hidden_units) * (self.num_layers - 1) + \
(self.hidden_units + 1) * self.n_outputs
def build_mu_layer(self, n_inputs, n_outputs, n_outputs_connections, name=None):
'''
This function creates the trainable mean variables for a layer
'''
if name is not None:
name_ker = "mu_ker_" + name
name_bias = "mu_bias_" + name
else:
name_ker = "mu_ker"
name_bias = "mu_bias"
# Reminder: we add 1 because of the bias
mu_ker = tf.Variable(tf.random_normal(shape=(n_inputs, n_outputs), mean=0.0,
stddev=(tf.sqrt(2 / (n_inputs + 1 + n_outputs_connections)))
),name=name_ker,trainable=False)
mu_bias = tf.Variable(tf.random_normal(shape=(n_outputs,), mean=0.0,
stddev=(tf.sqrt(2 / (n_inputs + 1 + n_outputs_connections)))
), name=name_bias, trainable=False)
return mu_ker, mu_bias
def build_mu_s(self):
'''
This function builds the mean variables for the whole network.
Returns a list of mean variables.
'''
mu_s = []
for i in range(self.num_layers + 1):
if not i:
# This might be wrong, since for one layer there should be one output. so
# instead of n_hidden we should change to `n_input of next layer`
if ( i + 1 == self.num_layers):
mu_ker, mu_bias = self.build_mu_layer(self.n_inputs, self.hidden_units, self.n_outputs, name="hid_0")
else:
mu_ker, mu_bias = self.build_mu_layer(self.n_inputs, self.hidden_units, self.hidden_units, name="hid_0")
elif (i == self.num_layers):
mu_ker, mu_bias = self.build_mu_layer(self.hidden_units, self.n_outputs, self.n_outputs, name="out")
else:
if ( i + 1 == self.num_layers):
mu_ker, mu_bias = self.build_mu_layer(self.hidden_units, self.hidden_units, self.n_outputs, name="hid_" + str(i))
else:
mu_ker, mu_bias = self.build_mu_layer(self.hidden_units, self.hidden_units, self.hidden_units, name="hid_" + str(i))
mu_s += [mu_ker, mu_bias]
return mu_s
def build_sigma_layer(self, n_inputs, n_outputs, sigma_0=0.001 ,name=None):
'''
This function creates the trainable variance variables for a layer
'''
if name is not None:
name_ker = "sigma_ker_" + name
name_bias = "sigma_bias_" + name
else:
name_ker = "sigma_ker"
name_bias = "sigma_bias"
sigma_ker = tf.Variable(tf.fill((n_inputs, n_outputs), sigma_0), name=name_ker, trainable=False)
sigma_bias = tf.Variable(tf.fill((n_outputs,), sigma_0), name=name_bias, trainable=False)
return sigma_ker, sigma_bias
def build_sigma_s(self):
'''
This function builds the variance variables for the whole network.
Returns a list of variance variables.
'''
sigma_s = []
for i in range(self.num_layers + 1):
if not i:
sigma_ker, sigma_bias = self.build_sigma_layer(self.n_inputs, self.hidden_units, sigma_0=self.sigma_0 ,name="hid_0")
elif (i == self.num_layers):
sigma_ker, sigma_bias = self.build_sigma_layer(self.hidden_units, self.n_outputs, sigma_0=self.sigma_0, name="out")
else:
sigma_ker, sigma_bias = self.build_sigma_layer(self.hidden_units, self.hidden_units, sigma_0=self.sigma_0, name="hid_" + str(i))
sigma_s += [sigma_ker, sigma_bias]
return sigma_s
def build_epsilons_layer(self, n_inputs, n_outputs, K, name=None):
'''
This function creates the epsilons random variables for a layer in each sub-network k
'''
if name is not None:
name_ker = "epsilons_ker_" + name
name_bias = "epsilons_bias_" + name
else:
name_ker = "epsilons_ker"
name_bias = "epsilons_bias"
epsilons_ker = [tf.random_normal(shape=(n_inputs, n_outputs), mean=0.0, stddev=1,
name=name_ker + "_" + str(i)) for i in range(K)]
epsilons_bias = [tf.random_normal(shape=(n_outputs,), mean=0.0, stddev=1,
name=name_bias + "_" + str(i)) for i in range(K)]
return epsilons_ker, epsilons_bias
def build_epsilons_s(self):
'''
This function builds the epsilons random variables for the whole network.
Returns a list of lists of epsilons variables.
'''
epsilons_s = []
for i in range(self.num_layers + 1):
if not i:
epsilons_ker, epsilons_bias = self.build_epsilons_layer(self.n_inputs, self.hidden_units, self.num_sub_networks ,name="hid_0")
elif (i == self.num_layers):
epsilons_ker, epsilons_bias = self.build_epsilons_layer(self.hidden_units, self.n_outputs, self.num_sub_networks, name="out")
else:
epsilons_ker, epsilons_bias = self.build_epsilons_layer(self.hidden_units, self.hidden_units, self.num_sub_networks, name="hid_" + str(i))
epsilons_s += [epsilons_ker, epsilons_bias]
return epsilons_s
def build_theta_layer(self, mu, sigma, epsilons, K, name=None):
'''
This function creates the thea variables for a layer in each sub-network k.
Indices for mu, sigma, epsilons:
0 - kernel
1 - bias
'''
if name is not None:
name_ker = "theta_ker_" + name
name_bias = "theta_bias_" + name
else:
name_ker = "theta_ker"
name_bias = "theta_bias"
theta_ker = [tf.identity(mu[0] + tf.multiply(epsilons[0][j], sigma[0]),
name=name_ker + "_" + str(j)) for j in range(K)]
theta_bias = [tf.identity(mu[1] + tf.multiply(epsilons[1][j], sigma[1]),
name=name_bias + "_" + str(j)) for j in range(K)]
return theta_ker, theta_bias
def build_theta_s(self):
'''
This function builds the theta variables for the whole network.
Returns a list of lists of theta variables.
'''
theta_s = []
for i in range(0, 2 * (self.num_layers + 1) ,2):
if (i == 2 * self.num_layers):
theta_ker, theta_bias = self.build_theta_layer(self.mu_s[i:i + 2],
self.sigma_s[i:i + 2],
self.epsilons_s[i:i + 2],
self.num_sub_networks,
name="out")
else:
theta_ker, theta_bias = self.build_theta_layer(self.mu_s[i:i + 2],
self.sigma_s[i:i + 2],
self.epsilons_s[i:i + 2],
self.num_sub_networks,
name="hid_" + str(i))
theta_s += [theta_ker, theta_bias]
return theta_s
def build_theta_layer_boundries(self, mu, sigma, K, name=None):
'''
This function creates the max and min thea variables for a layer in each sub-network k.
Indices for mu, sigma, epsilons:
0 - kernel
1 - bias
'''
if name is not None:
name_ker = "theta_ker_" + name
name_bias = "theta_bias_" + name
else:
name_ker = "theta_ker"
name_bias = "theta_bias"
theta_ker_max = [tf.identity(mu[0] + sigma[0],
name=name_ker + "_max_" + str(j)) for j in range(K)]
theta_bias_max = [tf.identity(mu[1] + sigma[1],
name=name_bias + "_max_" + str(j)) for j in range(K)]
theta_ker_min = [tf.identity(mu[0] - sigma[0],
name=name_ker + "_min_" + str(j)) for j in range(K)]
theta_bias_min = [tf.identity(mu[1] - sigma[1],
name=name_bias + "_min_" + str(j)) for j in range(K)]
return theta_ker_min, theta_bias_min, theta_ker_max, theta_bias_max
def build_theta_s_boundries(self):
'''
This function builds the max and min theta variables for the whole network.
Returns a list of lists of theta variables.
'''
theta_s_min = []
theta_s_max = []
for i in range(0, 2 * (self.num_layers + 1) ,2):
if (i == 2 * self.num_layers):
theta_ker_min, theta_bias_min, theta_ker_max, theta_bias_max = self.build_theta_layer_boundries(self.mu_s[i:i + 2],
self.sigma_s[i:i + 2],
self.num_sub_networks,
name="out")
else:
theta_ker_min, theta_bias_min, theta_ker_max, theta_bias_max = self.build_theta_layer_boundries(self.mu_s[i:i + 2],
self.sigma_s[i:i + 2] ,
self.num_sub_networks,
name="hid_" + str(i))
theta_s_min += [theta_ker_min, theta_bias_min]
theta_s_max += [theta_ker_max, theta_bias_max]
return theta_s_min, theta_s_max
def build_hidden_layers(self, inputs, n_layers, n_hidden, K, activation=tf.nn.relu):
'''
This function builds and denses the hidden layers of the network.
Returns the layers and their corresponding outputs.
'''
hiddens_func = []
hiddens_out = []
for i in range(n_layers):
if not i:
hid_funcs = [tf.layers.Dense(n_hidden, name="hidden_0_" + str(k), activation=activation) for k in range(K)]
hid_out = [hid_funcs[k](inputs) for k in range(K)]
hiddens_func.append(hid_funcs)
hiddens_out.append(hid_out)
else:
hid_funcs = [tf.layers.Dense(n_hidden, name="hidden_" + str(i) + "_" + str(k),
activation=activation) for k in range(K)]
hid_out = [hid_funcs[k](hiddens_out[i - 1][k]) for k in range(K)]
hiddens_func.append(hid_funcs)
hiddens_out.append(hid_out)
return hiddens_func, hiddens_out
def build_dnn(self):
'''
This function builds the deep network's layout in terms of layers.
'''
print("building layers...")
with tf.name_scope("dnns"):
self.hiddens_funcs, self.hiddens_out = self.build_hidden_layers(self.inputs,
self.num_layers,
self.hidden_units,
self.num_sub_networks)
self.out_funcs = [tf.layers.Dense(self.n_outputs, name="outputs_" + str(i), activation=None) \
for i in range(self.num_sub_networks)]
self.outputs = [self.out_funcs[k](self.hiddens_out[-1][k]) for k in range(self.num_sub_networks)]
total_hidden_params = sum([self.hiddens_funcs[i][0].count_params() for i in range(self.num_layers)])
graph_params_count = total_hidden_params + self.out_funcs[0].count_params()
if (graph_params_count != self.num_weights):
print("Number of actual parameters ({}) different from the calculated number ({})".format(
graph_params_count, self.num_weights))
def build_losses(self):
'''
This functions builds the error and losses of the network.
'''
print("configuring loss...")
with tf.name_scope("loss"):
errors = [(self.outputs[i] - self.targets) for i in range(self.num_sub_networks)]
self.losses = [0.5 * tf.reduce_sum(tf.square(errors[i]), name="loss_" + str(i)) \
for i in range(self.num_sub_networks)]
def grad_mu_sigma(self, gradients_tensor, mu, sigma, epsilons, eta):
# Calculate number of sub-networks = samples:
K = len(epsilons[0])
'''
We need to sum over K, that is, for each weight in num_weights, we calculate
the average/weighted average over K.
gradients_tensor[k] is the gradients of sub-network k out of K.
Note: in order to apply the gradients later, we should keep the variables in gradient_tensor apart.
'''
# Number of separated variables in each network (in order to update each one without changing the shape)
num_vars = sum(1 for gv in gradients_tensor[0] if gv[0] is not None)
mu_n = []
sigma_n = []
# filter non-relavent variables
for k in range(len(gradients_tensor)):
gradients_tensor[k] = [gradients_tensor[k][i] for i in range(len(gradients_tensor[k]))
if gradients_tensor[k][i][0] is not None]
for var_layer in range(num_vars):
var_list = [tf.reshape(gradients_tensor[k][var_layer][0], [-1]) for k in range(K)]
E_L_theta = tf.reduce_mean(var_list, axis=0)
var_list = [tf.reshape(gradients_tensor[k][(var_layer)][0] * epsilons[var_layer][k], [-1]) for k in range(K)]
E_L_theta_epsilon = tf.reduce_mean(var_list, axis=0)
# reshape it back to its original shape
new_mu = mu[var_layer] - eta * tf.square(sigma[var_layer]) * tf.reshape(E_L_theta, mu[var_layer].shape)
mu_n.append(new_mu)
E_L_theta_epsilon = tf.reshape(E_L_theta_epsilon, sigma[var_layer].shape)
new_sigma = sigma[var_layer] * tf.sqrt(1 + tf.square(0.5 * sigma[var_layer] * E_L_theta_epsilon)) - 0.5 * tf.square(sigma[var_layer]) * E_L_theta_epsilon
sigma_n.append(new_sigma)
return mu_n, sigma_n
def build_grads(self):
'''
This functions builds the gradients update nodes of the network.
'''
print("configuring optimization and gradients...")
with tf.name_scope("grads"):
optimizer = tf.train.GradientDescentOptimizer(self.eta)
gradients = [optimizer.compute_gradients(loss=self.losses[i]) for i in range(self.num_sub_networks)]
mu_n, sigma_n = self.grad_mu_sigma(gradients, self.mu_s, self.sigma_s, self.epsilons_s, self.eta_rate)
self.grad_op = [self.mu_s[i].assign(mu_n[i]) for i in range(len(self.mu_s))] + \
[self.sigma_s[i].assign(sigma_n[i]) for i in range(len(self.sigma_s))]
def build_eval(self):
'''
This function builds the model's evaluation nodes.
'''
print("preparing evaluation...")
with tf.name_scope("eval"):
self.accuracy = tf.reduce_mean([tf.reduce_mean(self.losses[i]) for i in range(self.num_sub_networks)])
def build_predictions(self):
'''
This function builds the model's prediction nodes.
'''
print("preparing predictions")
with tf.name_scope("prediction"):
self.predictions = tf.reduce_mean(self.outputs, axis=0)
self.mean, self.variance = tf.nn.moments(tf.convert_to_tensor(self.outputs), axes=[0])
self.std = tf.sqrt(self.variance)
self.max_output = tf.reduce_max(self.outputs, axis=0)
self.min_output = tf.reduce_min(self.outputs, axis=0)
def weights_init(self, sess):
'''
Initialize BNN weights.
'''
for k in range(self.num_sub_networks):
weights_init = [self.theta_s[i][k].eval() for i in range(len(self.theta_s))]
for i in range(self.num_layers):
self.hiddens_funcs[i][k].set_weights([weights_init[2 * i], weights_init[2 * i + 1]])
self.out_funcs[k].set_weights([weights_init[-2], weights_init[-1]])
def train(self, sess, inputs, outputs):
'''
Execute a single training step.
Returns train step accuracy.
'''
sess.run(self.grad_op, feed_dict={self.inputs: inputs, self.targets: outputs})
sess.run(self.global_step_op)
for k in range(self.num_sub_networks):
weights_calc = [self.theta_s[i][k].eval() for i in range(len(self.theta_s))]
for i in range(self.num_layers):
self.hiddens_funcs[i][k].set_weights([weights_calc[2 * i], weights_calc[2 * i + 1]])
self.out_funcs[k].set_weights([weights_calc[-2], weights_calc[-1]])
acc_train = self.accuracy.eval(feed_dict={self.inputs: inputs, self.targets: outputs})
return acc_train
def calc_accuracy(self, sess, inputs, outputs):
'''
Returns the accuracy over the inputs using the BNN's current weights.
'''
return self.accuracy.eval(feed_dict={self.inputs: inputs, self.targets: outputs})
def predict(self, sess, inputs):
'''
Returns predictions for the inputs using the BNN's current weights.
'''
return self.predictions.eval(feed_dict={self.inputs: inputs})
def calc_confidence(self, sess, inputs):
'''
Returns the upper and lower confidence for the inputs using the BNN's current weights.
'''
stan_dv = self.std.eval(feed_dict={self.inputs: inputs})
upper_conf = stan_dv
lower_conf = -1 * stan_dv
return upper_conf, lower_conf
def save(self, sess, path, var_list=None, global_step=None):
# var_list = None returns the list of all saveable variables
saver = tf.train.Saver(var_list)
save_path = saver.save(sess, save_path=path, global_step=global_step)
print('model saved at %s' % save_path)
def restore(self, sess, path, var_list=None):
# var_list = None returns the list of all saveable variables
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path=path)
print('model restored from %s' % path)
|
taldatech/tf-bgd
|
bgd_regression_example.py
|
# imports
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from bgd_model import BgdModel
from matplotlib import pyplot as plt
from datetime import datetime
import time
import os
import json
import shutil
from collections import OrderedDict
from random import shuffle
import argparse
# Globals:
# write_log = False
FLAGS = tf.app.flags.FLAGS
def set_train_flags(num_sub_networks=10, hidden_units=100, num_layers=1, eta=1.0, sigma_0=0.0001,
batch_size=5, epochs=40, n_inputs=1, n_outputs=1, decay_steps=10000, decay_rate=1/10,
display_step=100, save_freq=200):
tf.app.flags.FLAGS.__flags.clear()
# Network parameters
tf.app.flags.DEFINE_integer('num_sub_networks', num_sub_networks, 'Number of hidden units in each layer')
tf.app.flags.DEFINE_integer('hidden_units', hidden_units, 'Number of hidden units in each layer')
tf.app.flags.DEFINE_integer('num_layers', num_layers , 'Number of layers')
# Training parameters
tf.app.flags.DEFINE_float('eta', eta, 'eta parameter (step size)')
tf.app.flags.DEFINE_float('sigma_0', sigma_0, 'Initialization for sigma parameter')
tf.app.flags.DEFINE_integer('batch_size', batch_size, 'Batch size')
tf.app.flags.DEFINE_integer('max_epochs', epochs, 'Maximum # of training epochs')
tf.app.flags.DEFINE_integer('n_inputs', n_inputs, 'Inputs dimension')
tf.app.flags.DEFINE_integer('n_outputs', n_inputs, 'Outputs dimension')
tf.app.flags.DEFINE_integer('decay_steps', decay_steps, 'Decay steps for learning rate scheduling')
tf.app.flags.DEFINE_float('decay_rate', decay_rate, 'Decay rate for learning rate scheduling')
tf.app.flags.DEFINE_integer('display_freq', display_step, 'Display training status every this iteration')
tf.app.flags.DEFINE_integer('save_freq', save_freq, 'Save model checkpoint every this iteration')
tf.app.flags.DEFINE_string('model_dir', './model/', 'Path to save model checkpoints')
tf.app.flags.DEFINE_string('summary_dir', './model/summary', 'Path to save model summary')
tf.app.flags.DEFINE_string('model_name', 'linear_reg_bgd.ckpt', 'File name used for model checkpoints')
# Ignore Cmmand Line
tf.app.flags.DEFINE_string('w', '', '')
tf.app.flags.DEFINE_string('s', '', '')
tf.app.flags.DEFINE_string('e', '', '')
tf.app.flags.DEFINE_string('b', '', '')
tf.app.flags.DEFINE_string('n', '', '')
tf.app.flags.DEFINE_string('l', '', '')
tf.app.flags.DEFINE_string('t', '', '')
tf.app.flags.DEFINE_string('g', '', '')
tf.app.flags.DEFINE_string('f', '', '')
tf.app.flags.DEFINE_string('r', '', '')
tf.app.flags.DEFINE_string('k', '', '')
tf.app.flags.DEFINE_string('y', '', '')
tf.app.flags.DEFINE_string('u', '', '')
tf.app.flags.DEFINE_boolean('use_fp16', False, 'Use half precision float16 instead of float32 as dtype')
# Runtime parameters
tf.app.flags.DEFINE_boolean('allow_soft_placement', True, 'Allow device soft placement')
tf.app.flags.DEFINE_boolean('log_device_placement', False, 'Log placement of ops on devices')
def set_predict_flags(checkpoint=-1):
tf.app.flags.FLAGS.__flags.clear()
latest_ckpt = tf.train.latest_checkpoint('./model/')
if (checkpoint == -1):
ckpt = latest_ckpt
else:
ckpt = './model/linear_reg_bgd.ckpt-' + str(checkpoint)
tf.app.flags.DEFINE_string('model_path',ckpt, 'Path to a specific model checkpoint.')
# Runtime parameters
tf.app.flags.DEFINE_boolean('allow_soft_placement', True, 'Allow device soft placement')
tf.app.flags.DEFINE_boolean('log_device_placement', False, 'Log placement of ops on devices')
# Ignore Cmmand Line
tf.app.flags.DEFINE_string('w', '', '')
tf.app.flags.DEFINE_string('s', '', '')
tf.app.flags.DEFINE_string('e', '', '')
tf.app.flags.DEFINE_string('b', '', '')
tf.app.flags.DEFINE_string('n', '', '')
tf.app.flags.DEFINE_string('l', '', '')
tf.app.flags.DEFINE_string('t', '', '')
tf.app.flags.DEFINE_string('g', '', '')
tf.app.flags.DEFINE_string('f', '', '')
tf.app.flags.DEFINE_string('r', '', '')
tf.app.flags.DEFINE_string('k', '', '')
tf.app.flags.DEFINE_string('y', '', '')
tf.app.flags.DEFINE_string('u', '', '')
def create_model(FLAGS):
config = OrderedDict(sorted((dict([(key,val.value) for key,val in FLAGS.__flags.items()])).items()))
model = BgdModel(config, 'train')
return model
def restore_model(session, model, FLAGS):
ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
if (ckpt):
print("Found a checkpoint state...")
print(ckpt.model_checkpoint_path)
if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)):
print('Reloading model parameters..')
model.restore(session, ckpt.model_checkpoint_path)
else:
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
print('Created new model parameters..')
session.run(tf.global_variables_initializer())
def batch_gen(x, y, batch_size):
if (len(x) != len(y)):
print("Error generating batches, source and target lists do not match")
return
total_samples = len(x)
curr_batch_size = 0
x_batch = []
y_batch = []
for i in range(len(x)):
if (curr_batch_size < batch_size):
x_batch.append(x[i])
y_batch.append(y[i])
curr_batch_size += 1
else:
yield(x_batch, y_batch)
x_batch = [x[i]]
y_batch = [y[i]]
curr_batch_size = 1
yield(x_batch, y_batch)
def batch_gen_random(x, y, batch_size):
if (len(x) != len(y)):
print("Error generating batches, source and target lists do not match")
return
total_samples = len(x)
curr_batch_size = 0
xy = list(zip(x,y))
shuffle(xy)
x_batch = []
y_batch = []
for i in range(len(xy)):
if (curr_batch_size < batch_size):
x_batch.append(xy[i][0])
y_batch.append(xy[i][1])
curr_batch_size += 1
else:
yield(x_batch, y_batch)
x_batch = [xy[i][0]]
y_batch = [xy[i][1]]
curr_batch_size = 1
yield(x_batch, y_batch)
def train(X_train, y_train, X_test, y_test, write_log=False):
avg_error_train = []
avg_error_valid = []
batch_size = FLAGS.batch_size
# Create a new model or reload existing checkpoint
model = create_model(FLAGS)
# Initiate TF session
with tf.Session(graph=model.graph,config=tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement,
gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
restore_model(sess, model, FLAGS)
input_size = X_train.shape[0] + X_test.shape[0]
test_size = X_test.shape[0]
total_batches = input_size // batch_size
print("# Samples: {}".format(input_size))
print("Total batches: {}".format(total_batches))
# Split data to training and validation sets
num_validation = test_size
total_valid_batches = num_validation // batch_size
total_train_batches = total_batches - total_valid_batches
print("Total validation batches: {}".format(total_valid_batches))
print("Total training batches: {}".format(total_train_batches))
if (write_log):
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
logdir = "{}/run-{}/".format(root_logdir, now)
# TensorBoard-compatible binary log string called a summary
error_summary = tf.summary.scalar('Step-Loss', model.accuracy)
# Write summaries to logfiles in the log directory
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
step_time = 0.0
start_time = time.time()
global_start_time = start_time
# Training loop
print('Training..')
for epoch in range(FLAGS.max_epochs):
if (model.global_epoch_step.eval() >= FLAGS.max_epochs):
print('Training is already complete.', \
'current epoch:{}, max epoch:{}'.format(model.global_epoch_step.eval(), FLAGS.max_epochs))
break
batches_gen = batch_gen_random(X_train, y_train, batch_size)
batch_acc_train = []
batch_acc_test = []
for batch_i, batch in enumerate(batches_gen):
X_batch = batch[0]
Y_batch = batch[1]
# Execute a single training step
batch_acc_train = model.train(sess, X_batch, Y_batch)
batch_acc_test = model.calc_accuracy(sess, X_test, y_test)
if (write_log):
summary_str = error_summary.eval(feed_dict={model.inputs: X_batch, model.targets: Y_batch})
file_writer.add_summary(summary_str, model.global_step.eval())
if (model.global_step.eval() % FLAGS.display_freq == 0):
time_elapsed = time.time() - start_time
step_time = time_elapsed / FLAGS.display_freq
print("Epoch: ", model.global_epoch_step.eval(),
"Batch: {}/{}".format(batch_i, total_train_batches),
"Train Mean Error:", batch_acc_train,
"Valid Mean Error:", batch_acc_test)
# Save the model checkpoint
if (model.global_step.eval() % FLAGS.save_freq == 0):
print('Saving the model..')
checkpoint_path = os.path.join(FLAGS.model_dir, FLAGS.model_name)
model.save(sess, checkpoint_path, global_step=model.global_step)
json.dump(model.config,
open('%s-%d.json' % (checkpoint_path, model.global_step.eval()), 'w'),
indent=2)
# Increase the epoch index of the model
model.global_epoch_step_op.eval()
print('Epoch {0:} DONE'.format(model.global_epoch_step.eval()))
avg_error_train.append(np.mean(batch_acc_train))
avg_error_valid.append(np.mean(batch_acc_test))
if (write_log):
file_writer.close()
print('Saving the last model..')
checkpoint_path = os.path.join(FLAGS.model_dir, FLAGS.model_name)
model.save(sess, checkpoint_path, global_step=model.global_step)
json.dump(model.config,
open('%s-%d.json' % (checkpoint_path, model.global_step.eval()), 'w'),
indent=2)
total_time = time.time() - global_start_time
print('Training Terminated, Total time: {} seconds'.format(total_time))
return avg_error_train, avg_error_valid
def load_config(FLAGS):
config = json.load(open('%s.json' % FLAGS.model_path, 'r'))
for key, value in FLAGS.__flags.items():
config[key] = value.value
return config
def load_model(config):
model = BgdModel(config, 'predict')
return model
def restore_model_predict(session, model):
if tf.train.checkpoint_exists(FLAGS.model_path):
print('Reloading model parameters..')
model.restore(session, FLAGS.model_path)
else:
raise ValueError('No such file:[{}]'.format(FLAGS.model_path))
def predict(inputs):
# Load model config
config = load_config(FLAGS)
# Load configured model
model = load_model(config)
with tf.Session(graph=model.graph,config=tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement,
gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
# Reload existing checkpoint
restore_model_predict(sess, model)
print("Predicting results for inputs...")
# Prepare results dict
results = {}
# Predict
results['predictions'] = model.predict(sess, inputs)
# Statistics
results['max_out'] = model.max_output.eval(feed_dict={model.inputs: inputs})
results['min_out'] = model.min_output.eval(feed_dict={model.inputs: inputs})
upper_confidence, lower_confidence = model.calc_confidence(sess, inputs)
results['upper_confidence'] = upper_confidence
results['lower_confidence'] = lower_confidence
results['avg_sigma'] = np.mean([s.eval() for s in model.sigma_s])
print("Finished predicting.")
return results
def main():
parser = argparse.ArgumentParser(
description="train and test BGD regression of y=x^3")
parser.add_argument("-w", "--write_log", help="save log for tensorboard",
action="store_true")
parser.add_argument("-u", "--reset", help="reset, start training from scratch",
action="store_true")
parser.add_argument("-s", "--step", type=int,
help="display step to show training progress, default: 10")
parser.add_argument("-k", "--num_sub_nets", type=int,
help="number of sub networks (K parameter), default: 10")
parser.add_argument("-e", "--epochs", type=int,
help="number of epochs to run, default: 40")
parser.add_argument("-b", "--batch_size", type=int,
help="batch size, default: 1")
parser.add_argument("-n", "--neurons", type=int,
help="number of hidden units, default: 100")
parser.add_argument("-l", "--layers", type=int,
help="number of layers in each rnn, default: 1")
parser.add_argument("-t", "--eta", type=float,
help="eta parameter ('learning rate'), deafult: 50.0")
parser.add_argument("-g", "--sigma", type=float,
help="sigma_0 parameter, default: 0.002")
parser.add_argument("-f", "--save_freq", type=int,
help="frequency to save checkpoints of the model, default: 200")
parser.add_argument("-r", "--decay_rate", type=float,
help="decay rate of eta (exponential scheduling), default: 1/10")
parser.add_argument("-y", "--decay_steps", type=int,
help="decay steps fof eta (exponential scheduling), default: 10000")
args = parser.parse_args()
# Prepare the dataset
input_size = 25
train_size = (np.ceil(0.8 * input_size)).astype(np.int)
test_size = input_size - train_size
# Generate dataset
X = np.random.uniform(low=-4, high=4, size=input_size)
y = np.power(X,3) + np.random.normal(0, 3, size=input_size)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
y_original = np.power(X, 3)
X_sorted = X[X.argsort()]
y_orig_sorted = y_original[X.argsort()]
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
X_train = X_train.reshape(-1,1)
X_test = X_test.reshape(-1,1)
X_real_test = np.linspace(-6, 6, 2000)
X_real_test = X_real_test.reshape(-1,1)
if (args.write_log):
write_log = True
else:
write_log = False
if (args.step):
display_step = args.step
else:
display_step = 10
if (args.num_sub_nets):
K = args.num_sub_nets
else:
K = 10
if (args.epochs):
epochs = args.epochs
else:
epochs = 40
if (args.batch_size):
batch_size = args.batch_size
else:
batch_size = 1
if (args.neurons):
num_units = args.neurons
else:
num_units = 100
if (args.layers):
num_layers = args.layers
else:
num_layers = 1
if (args.eta):
eta = args.eta
else:
eta = 50.0
if (args.sigma):
sigma = args.sigma
else:
sigma = 0.002
if (args.save_freq):
save_freq = args.save_freq
else:
save_freq = 200
if (args.decay_rate):
decay_rate = args.decay_rate
else:
decay_rate = 1/10
if (args.decay_steps):
decay_steps = args.decay_steps
else:
decay_steps = 10000
if (args.reset):
try:
shutil.rmtree('./model/')
except FileNotFoundError:
pass
set_train_flags(num_sub_networks=K, hidden_units=num_units, num_layers=num_layers, eta=eta, sigma_0=sigma,
batch_size=batch_size, epochs=epochs, n_inputs=1, n_outputs=1, decay_steps=decay_steps, decay_rate=decay_rate,
display_step=display_step, save_freq=save_freq)
avg_error_train, avg_error_valid = train(X_train, y_train, X_test, y_test, write_log=write_log)
set_predict_flags()
y_real_test_res = predict(X_real_test)
print("Maximum uncertainty: ",abs(max(y_real_test_res['upper_confidence'])))
# Visualize Error:
# plt.rcParams['figure.figsize'] = (15,20)
# SSE
plt.subplot(2,1,1)
plt.plot(range(len(avg_error_train)), avg_error_train, label="Train")
plt.plot(range(len(avg_error_valid)), avg_error_valid, label="Valid")
plt.xlabel('Epoch')
plt.ylabel('Mean Error')
plt.title('Train and Valid Mean Error vs Epoch')
plt.legend()
plt.subplot(2,1,2)
# Predictions of train and test vs original
X_train_sorted = X_train[X_train.T.argsort()]
y_noisy_sorted = y[X.argsort()]
y_real = np.power(X_real_test, 3)
plt.scatter(X_sorted, y_noisy_sorted, label='Noisy data', c='k')
plt.plot(X_sorted, y_orig_sorted, linestyle='-', marker='o', label='True data')
plt.plot(X_real_test, y_real_test_res['predictions'], linestyle='-', label= 'Test prediction')
plt.plot(X_real_test, y_real, linestyle='-', label= 'y = x^3')
low_conf = y_real_test_res['predictions'][:,0] + 100 * y_real_test_res['lower_confidence'][:,0]
up_conf = y_real_test_res['predictions'][:,0] + 100 * y_real_test_res['upper_confidence'][:,0]
plt.fill_between(X_real_test[:,0], low_conf, up_conf, interpolate=True, color='pink', alpha=0.5)
plt.legend()
plt.xlabel('X')
plt.ylabel('y')
plt.title(('$y=x^3$ for original input and BP predictions for noisy input'))
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
rzhao271/css-none-scraper
|
scraper.py
|
<reponame>rzhao271/css-none-scraper
import ast
import re
import requests
from lxml import etree
valid_property_re = re.compile('^[^(@:]+$')
# scrape_properties: void -> [(link, property-name)]
def scrape_properties():
main_page_url = 'https://developer.mozilla.org/en-US/docs/Web/CSS/Reference'
content = requests.get(main_page_url).text
tree = etree.HTML(content)
nodes = tree.xpath('//div[@class=\'index\']/ul/li')
processed_nodes = []
for node in nodes:
link = node.xpath('a')[0].get('href')
text = node.xpath('a/code')[0].text
if valid_property_re.match(text):
processed_nodes.append((link, text))
return processed_nodes
# scrape_prop
#: str -> initial_value: str|None
def scrape_prop(page_url):
content = requests.get(page_url).text
tree = etree.HTML(content)
try:
node = tree.xpath('//table[@class=\'properties\']/tbody/tr')[0]
heading = node.xpath('th/a')[0].text
value = etree.tostring(node.xpath('td')[0]).decode('UTF-8')
if heading.lower() == 'initial value':
return value
else:
return None
except:
return None
def main():
scrape = 1
# Scrape properties along with their initial values
properties_iv_dict = dict()
if scrape:
properties = scrape_properties()
for link, name in properties:
initial_value = scrape_prop(f'https://developer.mozilla.org/{link}')
properties_iv_dict[name] = initial_value
with open('prop-iv-dict.txt', 'w') as f:
f.write(str(properties_iv_dict))
else:
with open('prop-iv-dict.txt', 'r') as f:
properties_iv_dict = ast.literal_eval(f.read())
properties_with_none_value = [key for key, val in properties_iv_dict.items()
if val is not None and '<code>none</code>' in val]
print('\n'.join(properties_with_none_value))
if __name__ == '__main__':
main()
|
jD91mZM2/MCWASM
|
src/instructions.py
|
<reponame>jD91mZM2/MCWASM
from cmds import CmdGenerator
from value_types import Type, Types, Value
import wasm
CONDITION_COUNTER = 0
def InstructionHandler(func):
def first_invocation(*args):
def second_invocation(cmd, ins):
return func(*args, cmd, ins)
return second_invocation
return first_invocation
class InstructionTable:
def __init__(self, ctx, wasm_function, namespace):
self.wasm_function = wasm_function
self.conditions = ["if score returned wasm = zero wasm"]
self.ctx = ctx
self.namespace = namespace
self.output = [wasm_function.name]
self.snippets = 0
self.types = Types()
# Set up the initial types. `local_frame_push` and other functions
# won't touch the type list - they rather rely on the new function
# setting up the same type list by information from WASM.
self.types.locals = Type.from_wasm(wasm_function.type.param_types)
self.types.locals += Type.from_wasm_locals(wasm_function.body.locals)
# See the spec:
# https://webassembly.github.io/spec/core/binary/instructions.html
self.handlers = {
wasm.OP_GET_LOCAL: self.local_get(),
wasm.OP_SET_LOCAL: self.local_set(),
wasm.OP_TEE_LOCAL: self.local_tee(),
wasm.OP_CALL: self.call(),
# Conditions & Loops
wasm.OP_IF: self.if_(),
wasm.OP_ELSE: self.else_(),
wasm.OP_RETURN: self.return_(),
wasm.OP_END: self.end(),
# Consts
wasm.OP_I32_CONST: self.const(Type.I32),
wasm.OP_I64_CONST: self.const(Type.I64),
wasm.OP_F32_CONST: self.const(Type.F32),
wasm.OP_F64_CONST: self.const(Type.F64),
# i32 data types
wasm.OP_I32_EQZ: self.eqz(),
wasm.OP_I32_EQ: self.cmp("="),
wasm.OP_I32_LT_S: self.cmp("<"),
wasm.OP_I32_GT_S: self.cmp(">"),
wasm.OP_I32_ADD: self.operation("+="),
wasm.OP_I32_SUB: self.operation("-="),
wasm.OP_I32_MUL: self.operation("*="),
wasm.OP_I32_DIV_U: self.operation("/="),
wasm.OP_I32_REM_U: self.operation("%="),
# i64 data types
wasm.OP_I64_EQZ: self.eqz(),
wasm.OP_I64_EQ: self.cmp("="),
wasm.OP_I64_LT_S: self.cmp("<"),
wasm.OP_I64_GT_S: self.cmp(">"),
wasm.OP_I64_ADD: self.operation("+="),
wasm.OP_I64_SUB: self.operation("-="),
wasm.OP_I64_MUL: self.operation("*="),
wasm.OP_I64_DIV_U: self.operation("/="),
wasm.OP_I64_REM_U: self.operation("%="),
}
def prologue(self):
return None
def epilogue(self):
cmd = CmdGenerator([])
cmd.set_scoreboard(f"returned", 0)
return cmd.output
def handle(self, instruction):
default_out = self.output[-1]
cmd = CmdGenerator(self.conditions[:], types=self.types)
if instruction.op.id in self.handlers:
print(instruction.op.mnemonic, self.types.stack)
cmd.execute(
'tellraw @a "[WASM] Executing: '
+ wasm.format_instruction(instruction)
+ '"'
)
specified_out = self.handlers[instruction.op.id](cmd, instruction)
if specified_out is not None:
return cmd.output, specified_out
else:
cmd.execute(
'tellraw @a {"text":"[WASM] TODO: '
+ wasm.format_instruction(instruction)
+ '","color":"red"}'
)
return cmd.output, default_out
@InstructionHandler
def const(self, type, cmd, ins):
cmd.stack().push(Value(type, ins.imm.value))
@InstructionHandler
def operation(self, op, cmd, _ins):
cmd.stack().operation(op)
@InstructionHandler
def eqz(self, cmd, _ins):
cmd.stack().load_to_scoreboard("lhs")
cmd.stack().set(Value.i32(0))
with cmd.execute_param("if score lhs wasm = zero wasm"):
cmd.stack().set(Value.i32(1))
@InstructionHandler
def cmp(self, op, cmd, _ins):
cmd.stack().load_to_scoreboard("rhs")
cmd.stack().drop()
cmd.stack().load_to_scoreboard("lhs")
cmd.stack().set(Value.i32(0))
with cmd.execute_param(f"if score lhs wasm {op} rhs wasm"):
cmd.stack().set(Value.i32(1))
@InstructionHandler
def if_(self, cmd, _ins):
with cmd.no_execute_params():
cmd.conditions().push(Value.i32(0))
cmd.conditions().set_from(cmd.stack())
cmd.conditions().load_to_scoreboard("condition")
cmd.stack().drop()
snippet = f"{self.wasm_function.name}_snippet_{self.snippets}"
self.snippets += 1
cmd.comment("Conditional is split into separate function")
with cmd.execute_param(
f"unless score condition wasm = zero wasm"
):
cmd.function(self.namespace, snippet)
self.output.append(snippet)
@InstructionHandler
def else_(self, cmd, _ins):
cmd.conditions().load_to_scoreboard("condition")
snippet = f"{self.wasm_function.name}_snippet_{self.snippets}"
self.snippets += 1
cmd.comment("Else branch is also split into separate function")
with cmd.execute_param(
f"if score condition wasm = zero wasm"
):
cmd.function(self.namespace, snippet)
self.output[-1] = snippet
return self.output[-2]
@InstructionHandler
def end(self, cmd, _ins):
# Beware: This may be the whole function's end
if len(self.output) > 1:
self.output.pop()
with cmd.no_execute_params():
cmd.conditions().drop()
return self.output[-1]
@InstructionHandler
def return_(self, cmd, _ins):
self.types.stack.pop()
cmd.set_scoreboard(f"returned", 1)
@InstructionHandler
def call(self, cmd, ins):
func = self.ctx.function(ins.imm.function_index)
# Map stack to local variables
cmd.local_frame_push(Type.from_wasm(func.type.param_types))
for i in range(func.type.param_count):
cmd.local_set(i)
cmd.stack().drop()
# Actually execute function
cmd.function(self.namespace, func.name)
# Drop the stack frame
cmd.local_frame_drop()
# Register return value type
self.types.stack.append(Type.from_wasm(func.type.return_type))
@InstructionHandler
def local_set(self, cmd, ins):
cmd.local_set(ins.imm.local_index)
cmd.stack().drop()
@InstructionHandler
def local_get(self, cmd, ins):
cmd.local_get(ins.imm.local_index)
@InstructionHandler
def local_tee(self, cmd, ins):
cmd.local_set(ins.imm.local_index)
|
jD91mZM2/MCWASM
|
src/transpiler.py
|
from collections import defaultdict, namedtuple
from enum import Enum
import wasm
from cmds import CmdGenerator
from instructions import InstructionTable
from value_types import Type
class ExportKind(Enum):
FUNCTION = 0x0
TABLE = 0x1
MEMORY = 0x2
GLOBAL = 0x3
Function = namedtuple("Function", ["name", "type", "body"])
Export = namedtuple("Export", ["name", "kind", "value"])
condition_counter = 0
class Context:
def __init__(self, bytecode):
sections = iter(wasm.decode_module(bytecode))
# First section is the header
header, header_data = next(sections)
print(header.to_string(header_data)) # TODO remove
self.header = header
self.header_data = header_data
# Following sections are specified at
# https://webassembly.github.io/spec/core/binary/modules.html#sections
self.types = []
self.func_types = []
self.functions = []
self.exports = []
for section, section_data in sections:
# Debug print. TODO remove or use proper logging interface
print(section.to_string(section_data))
if type(section) == wasm.Section:
if section_data.id == wasm.SEC_CODE:
self.functions += section_data.payload.bodies
elif section_data.id == wasm.SEC_TYPE:
self.types += section_data.payload.entries
elif section_data.id == wasm.SEC_FUNCTION:
self.func_types += section_data.payload.types
elif section_data.id == wasm.SEC_EXPORT:
self.exports += section_data.payload.entries
def export(self, i):
export = self.exports[i]
# TODO support more types?
if export.kind == ExportKind.FUNCTION.value:
return Export(
name=bytearray(export.field_str).decode("UTF-8"),
kind=ExportKind.FUNCTION,
value=self.function(export.index),
)
def iter_exports(self):
return map(self.export, range(len(self.exports)))
def function(self, i):
return Function(
name="func_" + str(i),
type=self.types[self.func_types[i]],
body=self.functions[i],
)
def iter_functions(self):
return map(self.function, range(len(self.functions)))
def transpile(self, func, namespace):
outputs = defaultdict(lambda: [])
instruction_table = InstructionTable(self, func, namespace)
if func.body.local_count > 0:
cmd = CmdGenerator([])
cmd.comment("Reserve space for local variables (other than args)")
cmd.local_frame_reserve(Type.from_wasm_locals(func.body.locals))
outputs[func.name].append(cmd.output)
prologue = instruction_table.prologue()
if prologue is not None:
outputs[func.name].append(prologue)
for instruction in wasm.decode_bytecode(func.body.code):
commands, out = instruction_table.handle(instruction)
outputs[out].append(commands)
epilogue = instruction_table.epilogue()
if epilogue is not None:
outputs[func.name].append(epilogue)
for out in outputs:
outputs[out] = "\n".join(outputs[out])
return outputs
|
jD91mZM2/MCWASM
|
src/cmds.py
|
from value_types import Type, Value
class WithGuard:
def __init__(self, restore):
self.restore = restore
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.restore()
class CmdGenerator:
def __init__(self, execute_params, types=None):
self.output = ""
self.types = types
self.execute_params = execute_params
# Add a comment to the output, just to keep the output somewhat navigatable
def comment(self, text):
self.output += "# " + text + "\n"
# Execute a raw Minecraft command. For portability reasons with upcoming
# releases, this should be executed from as few places as possible.
def execute(self, line):
params = list(filter(lambda x: x is not None, self.execute_params))
if params:
self.output += (
"execute " + " ".join(params) + " run "
+ line + "\n"
)
else:
self.output += line + "\n"
# Adds a parameter to the `execute` command that may surround the
# function. Supposed to be used with a `with` statement which drops the
# parameter when the with construct is exited.
def execute_param(self, param):
self.execute_params.append(param)
return WithGuard(lambda: self.execute_params.pop())
# Removes all `execute` parameters temporarily, so this will run regardless
# of if the function has been returned or anything!
def no_execute_params(self):
old = self.execute_params
self.execute_params = []
def restore():
self.execute_params = old
return WithGuard(restore)
# Set a static value to the scoreboard
def set_scoreboard(self, score, value):
self.execute(f"scoreboard players set {score} wasm {value}")
# Return the main stack
def stack(self):
return Stack(self, "Stack", self.types.stack)
# Return the stack of conditionals
def conditions(self):
return Stack(self, "Conditions", self.types.conditions)
# Add a new local variable list, setting each value to zero initially
#
# NOTE: Unlike the stack, this does not update the type list. So make sure
# you don't rely on frames for much more than calling a function.
def local_frame_push(self, types):
nbt = map(
lambda t: (
f"{t[0].name}: [{', '.join([str(Value(t[0], 0))] * t[1])}]"
),
Type.count(types).items(),
)
self.execute(
f"data modify storage wasm Locals append value "
f"{{{', '.join(nbt)}}}"
)
# Drop the top local variable list.
#
# NOTE: Unlike the stack, this does not update the type list. So make sure
# you don't rely on frames for much more than calling a function.
def local_frame_drop(self):
self.execute("data remove storage wasm Locals[-1]")
# Reserve extra local variable space
#
# NOTE: Unlike the stack, this does not update the type list. So make sure
# you don't rely on frames for much more than calling a function.
def local_frame_reserve(self, types):
for ty in types:
self.execute(
f"data modify storage wasm Locals[-1].{ty.name} append "
f"value {Value(ty, 0)}"
)
# Get local
def local_get(self, local_index):
ty = self.types.locals[local_index]
self.types.stack.append(ty)
self.execute(
f"data modify storage wasm Stack.{ty.name} append "
f"from storage wasm Locals[-1].{ty.name}[{local_index}]"
)
# Set local
def local_set(self, local_index):
ty = self.types.stack[-1]
assert ty == self.types.locals[local_index]
self.execute(
f"data modify storage wasm Locals[-1].{ty.name}[{local_index}] "
f"set from storage wasm Stack.{ty.name}[-1]"
)
# Run a function
def function(self, namespace, func):
self.execute(f"function {namespace}:{func}")
class Stack:
def __init__(self, cmd, name, types):
self.cmd = cmd
self.name = name
self.types = types
# Push a static value to the stack
def push(self, value):
self.types.append(value.type)
self.cmd.execute(
f"data modify storage wasm {self.name}.{value.tyname} "
f"append value {value}"
)
# Discard the top of the stack
def drop(self):
ty = self.types.pop()
self.cmd.execute(f"data remove storage wasm {self.name}.{ty.name}[-1]")
# Copy the top level value from another stack
def push_from(self, other):
ty = other.types[-1]
self.types.append(ty)
self.cmd.execute(
f"data modify storage wasm {self.name}.{ty.name} append from "
f"storage wasm {other.name}.{ty.name}[-1]"
)
# Set the top value of the stack
def set(self, value):
value = value.cast(self.types[-1])
self.cmd.execute(
f"data modify storage wasm {self.name}.{value.tyname}[-1] "
f"set value {value}"
)
# Copy the top level value to another stack, and assign it in-place
def set_from(self, other):
if self.types[-1] == other.types[-1]:
self.drop()
self.push_from(other)
else:
ty = self.types[-1]
self.cmd.execute(
f"data modify storage wasm {self.name}.{ty.name}[-1] set from "
f"storage wasm {other.name}.{ty.name}[-1]"
)
# Load a value from the stack to the scoreboard, where the index is an
# offset from the top of the stack.
def load_to_scoreboard(self, score):
ty = self.types[-1]
with self.cmd.execute_param(f"store result score {score} wasm"):
self.cmd.execute(
f"data get storage wasm {self.name}.{ty.name}[-1]"
)
# Load a value from the scoreboard to the stack, where the index is an
# offset from the top of the stack.
def load_from_scoreboard(self, score):
ty = self.types[-1]
with self.cmd.execute_param(
f"store result storage wasm {self.name}.{ty.name}[-1] "
f"{ty.mc_name} 1"
):
self.cmd.execute(f"scoreboard players get {score} wasm")
# Run a mathematical operation on the top two values on the stack
def operation(self, op):
# Note: Since it's a stack we want to grab the second operand first!
self.load_to_scoreboard("rhs")
self.drop()
self.load_to_scoreboard("lhs")
ty = self.types[-1]
with self.cmd.execute_param(
f"store result storage wasm {self.name}.{ty.name}[-1] "
f"{ty.mc_name} 1"
):
self.cmd.execute(
f"scoreboard players operation lhs wasm {op} rhs wasm"
)
|
jD91mZM2/MCWASM
|
src/value_types.py
|
<reponame>jD91mZM2/MCWASM<gh_stars>1-10
from collections import defaultdict, namedtuple
import wasm
class Type(namedtuple(
"Type",
["name", "python_type", "format_spec", "mc_name"]
)):
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def from_wasm(type_ids):
if not isinstance(type_ids, list):
return {
wasm.LANG_TYPE_I32: Type.I32,
wasm.LANG_TYPE_I64: Type.I64,
wasm.LANG_TYPE_F32: Type.F32,
wasm.LANG_TYPE_F64: Type.F64,
}[type_ids]
else:
return list(map(Type.from_wasm, type_ids))
def from_wasm_locals(local_entries):
types = []
for entry in local_entries:
types += [Type.from_wasm(entry.type)] * entry.count
return types
def count(types):
counted = defaultdict(lambda: 0)
for ty in types:
counted[ty] += 1
return counted
Type.I32 = Type("i32", int, "{}", "int")
Type.I64 = Type("i64", int, "{}l", "long")
Type.F32 = Type("f32", float, "{}f", "float")
Type.F64 = Type("f64", float, "{}d", "double")
class Types:
def __init__(self):
self.stack = []
self.conditions = []
self.locals = []
class Value:
def __init__(self, type, value):
self.type = type
self.value = value
def i32(value):
return Value(Type.I32, value)
def i64(value):
return Value(Type.I64, value)
def f32(value):
return Value(Type.F32, value)
def f64(value):
return Value(Type.F64, value)
@property
def tyname(self):
return self.type.name
def cast(self, type):
return Value(type, type.python_type(self.value))
def __str__(self):
return self.type.format_spec.format(self.value)
def __repr__(self):
return self.__str__()
|
jD91mZM2/MCWASM
|
src/main.py
|
<reponame>jD91mZM2/MCWASM<gh_stars>1-10
from argparse import ArgumentParser
from pathlib import Path
from string import Template
import json
import shutil
import sys
import transpiler
parser = ArgumentParser(description="Convert WASM to .mcfunction")
parser.add_argument("input", help="The .wasm file to input")
parser.add_argument(
"out_dir",
type=Path,
help="The functions directory to pop all functions",
)
parser.add_argument(
"--namespace",
help="The datapack's namespace",
)
parser.add_argument(
"--force",
action="store_true",
help="Don't prompt before deleting any previous directory",
)
args = parser.parse_args()
with open(args.input, "rb") as f:
bytecode = f.read()
ctx = transpiler.Context(bytecode)
# Start generation: Copy over template
if args.out_dir.exists():
if not args.force:
print(
"Directory already exists! Are you sure you wish "
"to delete its contents?"
)
print("Pass --force to be noninteractive")
verification = input("Enter 'yes': ")
if verification != "yes":
print("Did not verify")
sys.exit(1)
shutil.rmtree(args.out_dir)
shutil.copytree(
Path(__file__).parent.with_name("template"),
args.out_dir,
)
if args.namespace is None:
args.namespace = args.out_dir.name
data_dir = args.out_dir.joinpath("data")
namespace_dir = data_dir.joinpath(args.namespace)
shutil.move(data_dir.joinpath("$namespace"), namespace_dir)
# Substitute tag
load_file = data_dir.joinpath("minecraft", "tags", "functions", "load.json")
with load_file.open("r+") as f:
original = f.read()
processed = Template(original).substitute(namespace=args.namespace)
f.seek(0)
f.truncate(0)
f.write(processed)
# Generate functions
functions_dir = namespace_dir.joinpath("functions")
for func in ctx.iter_functions():
outputs = ctx.transpile(func, args.namespace)
for out, text in outputs.items():
path = functions_dir.joinpath(out + ".mcfunction")
with path.open("w") as f:
f.write(text)
# Create tags for all exports
tag_dir = namespace_dir.joinpath("tags", "functions")
tag_dir.mkdir(parents=True)
for export in filter(lambda e: e is not None, ctx.iter_exports()):
path = tag_dir.joinpath(export.name + ".json")
with path.open("w") as f:
f.write(json.dumps({
"values": [
f"{args.namespace}:{export.value.name}",
f"{args.namespace}:post_function"
]
}))
|
whtsky/font-rename
|
font_rename.py
|
<reponame>whtsky/font-rename<filename>font_rename.py
#!/usr/bin/env python3
import argparse
from pathlib import Path
from typing import List
import cchardet as chardet
from fontTools.ttLib import TTCollection
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._n_a_m_e import NameRecord
from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e as NameTable
PREFERRED_IDS = (
(3, 1, 0x0C04),
(3, 1, 0x0804),
(3, 1, 0x0404),
(3, 1, 0x0411),
(1, 0, 0),
)
FAMILY_RELATED_IDS = dict(
LEGACY_FAMILY=1,
TRUETYPE_UNIQUE_ID=3,
FULL_NAME=4,
POSTSCRIPT_NAME=6,
PREFERRED_FAMILY=16,
WWS_FAMILY=21,
)
PREFERRED_NAME_IDS = (
FAMILY_RELATED_IDS["FULL_NAME"],
FAMILY_RELATED_IDS["POSTSCRIPT_NAME"],
FAMILY_RELATED_IDS["PREFERRED_FAMILY"],
FAMILY_RELATED_IDS["LEGACY_FAMILY"],
)
def decode_name(name: NameRecord) -> str:
try:
return name.toUnicode().strip()
except:
raw = name.toBytes()
guess = chardet.detect(raw)
return raw.decode(guess["encoding"]).strip()
def get_current_family_name(table: NameTable) -> str:
for plat_id, enc_id, lang_id in PREFERRED_IDS:
for name_id in PREFERRED_NAME_IDS:
family_name_rec = table.getName(
nameID=name_id, platformID=plat_id, platEncID=enc_id, langID=lang_id
)
if family_name_rec:
return decode_name(family_name_rec)
for name_id in PREFERRED_NAME_IDS:
results: List[str] = []
for name_record in table.names:
if name_record.nameID == name_id:
results.append(decode_name(name_record))
if results:
return sorted(results, key=len)[-1]
raise ValueError("family name not found; can't add suffix")
def get_font_name(font: TTFont):
return get_current_family_name(font["name"])
def rename_font(filepath: Path, remove_unparsable: bool) -> None:
try:
font = TTFont(str(filepath.resolve()))
except:
if remove_unparsable:
print(f"Failed to parse {filepath}, removing")
filepath.unlink()
return
else:
print(f"Failed to parse {filepath}, ignore")
return
new_path = filepath.parent / f"{get_font_name(font)}{filepath.suffix.lower()}"
if filepath != new_path:
if new_path.exists():
print(f"{new_path} exist, remove: {filepath}")
filepath.unlink()
else:
print(f"{filepath} -> {new_path}")
filepath.rename(new_path)
def unpack_ttc(filepath: Path) -> None:
try:
collection = TTCollection(str(filepath.resolve()))
except:
print(f"Failed to parse {filepath}, ignore")
return
for font in collection.fonts:
ttf_path = filepath.parent / f"{get_font_name(font)}.ttf"
font.save(ttf_path)
print(f"{filepath} -> {ttf_path}")
filepath.unlink()
def unpack_otc(filepath: Path) -> None:
try:
collection = TTCollection(str(filepath.resolve()))
except:
print(f"Failed to parse {filepath}, ignore")
return
for font in collection.fonts:
ttf_path = filepath.parent / f"{get_font_name(font)}.otf"
font.save(ttf_path)
print(f"{filepath} -> {ttf_path}")
filepath.unlink()
def handle_file(filepath: Path, remove_unparsable: bool) -> None:
suffix = filepath.suffix.lower()
if suffix == ".ttc":
unpack_ttc(filepath)
elif suffix == ".otc":
unpack_otc(filepath)
else:
rename_font(filepath, remove_unparsable=remove_unparsable)
def handle_path(path: Path, remove_unparsable: bool) -> None:
if path.stem.startswith("."):
return
if path.is_dir():
for f in path.iterdir():
handle_path(f, remove_unparsable=remove_unparsable)
else:
handle_file(path, remove_unparsable=remove_unparsable)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"-ru",
"--remove-unparsable",
dest="remove_unparsable",
action="store_true",
help="Remove unparsable fonts instead of ignore",
)
parser.add_argument("files", nargs="+")
args = parser.parse_args()
for path in args.files:
handle_path(Path(path), remove_unparsable=args.remove_unparsable)
if __name__ == "__main__":
main()
|
Marcelo-Augusto-Silva/Programa-que-encontrar-emails-e-telefones-em-textos
|
principal.py
|
import re
def email(txt):
email = re.compile('([a-zA-Z0-9._%+-]+)' # Nome do usuario
'(@[a-zA-Z0-9]+)' # dominio
'(\.[a-zA-Z]{2,4})') # depois do .com
procurar = email.findall(txt)
return procurar
def numero(num):
numero_celular = re.compile('(\(\d{2}\)|\d{2})?' # Encontrar o DDD do telefone
'( ?\d{5})' # encontrar os cincos primeiros numeros
'(\s|-|\.)?' # ve se ele separa com um - espaco ou um ponto
'(\d{4})') # encontrar os 4 ultimos numeros
procurar = numero_celular.findall(num)
return procurar
texto = str(input('Cole o texto para encontrar emails e telefones '))
numeros = []
emails = []
try:
numeros.append(numero(texto))
if numeros[0][0] == None:
print('erro')
for c in numeros:
print('Encontrei os seguintes numeros ')
for n in range(0,len(c)):
print(''.join(numeros[0][n]))
except:
print('Não encontrei nenhum numero telefonico')
try:
emails.append(email(texto))
if emails[0][0] == None:
print('erro')
for c in emails:
print('Encontrei os seguintes emails')
for n in range(0,len(c)):
print(''.join(emails[0][n]))
except:
print('Não encontrei nenhum email')
|
rberrelleza/cuelake
|
api/workflows/serializers.py
|
import json
from rest_framework import serializers
# from django_celery_beat.models import CrontabSchedule
from workflows.models import Workflow, WorkflowRun, NotebookJob
class WorkflowSerializer(serializers.ModelSerializer):
"""
Serializer for the model NotebookJob
"""
triggerWorkflow = serializers.SerializerMethodField()
lastRun = serializers.SerializerMethodField()
schedule = serializers.SerializerMethodField()
notebooks = serializers.SerializerMethodField()
def get_triggerWorkflow(self, obj):
"""
Gets name of depends on Workflow
"""
if not obj.triggerWorkflow:
return None
return {'id': obj.triggerWorkflow.id, 'name': obj.triggerWorkflow.name}
def get_lastRun(self, obj):
"""
Gets last run time of workflow
"""
workflowRuns = obj.workflowrun_set.order_by("-startTimestamp")
if workflowRuns.count():
return {"status": workflowRuns[0].status, "startTimestamp": workflowRuns[0].startTimestamp}
else:
return None
def get_schedule(self, obj):
""" Get schedule"""
if not obj.crontab:
return None
return {'id': obj.crontab.id, 'name': str(obj.crontab)}
def get_notebooks(self, obj):
"""Gets notebooks in workflow"""
return list(NotebookJob.objects.filter(workflow=obj).values_list("notebookId", flat=True))
class Meta:
model = Workflow
fields = ["id", "name", "triggerWorkflow", "triggerWorkflowStatus", "lastRun", "schedule", "notebooks"]
class WorkflowRunSerializer(serializers.ModelSerializer):
"""
Serializer for the model WorkflowRun
"""
class Meta:
model = WorkflowRun
fields = ["id", "status", "workflow", "startTimestamp", "endTimestamp"]
# class RunStatusSerializer(serializers.ModelSerializer):
# """
# Serializer for the model RunStatus
# """
# logsJSON = serializers.SerializerMethodField()
# def get_logsJSON(self, obj):
# """
# Gets logs in JSON form
# """
# return json.loads(obj.logs)
# class Meta:
# model = RunStatus
# fields = ["id", "notebookId", "startTimestamp", "status", "logsJSON", "runType"]
|
rberrelleza/cuelake
|
api/workflows/views.py
|
from django.http import HttpRequest
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view
from workflows.services import WorkflowServices, WorkflowActions
class Workflows(APIView):
"""
Class to get and post workflows
"""
def get(self, request, offset: int):
"""Gets all workflows"""
res = WorkflowServices.getWorkflows(offset)
return Response(res.json())
def post(self, request):
data = request.data
name = data.get("name", "")
scheduleId = data.get("scheduleId", "")
triggerWorkflowId = data.get("triggerWorkflowId", "")
triggerWorkflowStatus = data.get("triggerWorkflowStatus", "")
notebookIds = data.get("notebookIds", [])
if 'id' in data and data['id']:
res = WorkflowServices.updateWorkflow(data['id'], name, scheduleId, triggerWorkflowId, triggerWorkflowStatus, notebookIds)
else:
res = WorkflowServices.createWorkflow(name, scheduleId, triggerWorkflowId, triggerWorkflowStatus, notebookIds)
return Response(res.json())
class Workflow(APIView):
"""
Class to get and post workflows
"""
def delete(self, request, workflowId: int):
res = WorkflowServices.deleteWorkflow(workflowId)
return Response(res.json())
class WorkflowRun(APIView):
"""
Class to get and post WorkflowRun
"""
def get(self, request, workflowId: int, offset: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowServices.getWorkflowRuns(workflowId, offset)
return Response(res.json())
class RunWorkflow(APIView):
"""
Class to manually run workflows
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowActions.runWorkflow(workflowId)
return Response(res.json())
class StopWorkflow(APIView):
"""
Class to manually stop workflows
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowActions.stopWorkflow(workflowId)
return Response(res.json())
class WorkflowRunLog(APIView):
"""
Class to get and post WorkflowRun
"""
def get(self, request, workflowId: int):
"""Gets all workflows runs associated with given workflow
:param workflowId: id of Workflows.Workflow
"""
res = WorkflowServices.getWorkflowRunLogs(workflowId)
return Response(res.json())
class UpdateTriggerWorkflow(APIView):
"""
Class to update trigger workflow associated with workflow
"""
def post(self, request, workflowId: int):
"""
Updated trigger workflow
"""
data = request.data
triggerWorkflowId = data.get("triggerWorkflowId", "")
triggerWorkflowStatus = data.get("triggerWorkflowStatus", "")
res = WorkflowServices.updateTriggerWorkflow(workflowId, triggerWorkflowId, triggerWorkflowStatus)
return Response(res.json())
class UpdateSchedule(APIView):
"""
Class to update schedule associated with workflow
"""
def post(self, request, workflowId: int):
"""
Updated trigger workflow
"""
data = request.data
scheduleId = data.get("scheduleId", "")
res = WorkflowServices.updateSchedule(workflowId, scheduleId)
return Response(res.json())
|
rberrelleza/cuelake
|
api/genie/admin.py
|
<reponame>rberrelleza/cuelake<filename>api/genie/admin.py
from django.contrib import admin
from genie.models import NotebookJob, RunStatus, Connection, ConnectionType, ConnectionParam, ConnectionParamValue, NotebookTemplate
class RunStatusAdmin(admin.ModelAdmin):
# Adding starttimestamp in a new modelAdmin class as its a readonly field
# to make it visible in the admin panel
readonly_fields = ('startTimestamp',)
admin.site.register(NotebookJob)
admin.site.register(RunStatus, RunStatusAdmin)
admin.site.register(Connection)
admin.site.register(ConnectionType)
admin.site.register(ConnectionParam)
admin.site.register(ConnectionParamValue)
admin.site.register(NotebookTemplate)
|
rberrelleza/cuelake
|
api/genie/migrations/0005_auto_20210414_1316.py
|
<gh_stars>0
# Generated by Django 3.1.7 on 2021-04-14 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genie', '0004_auto_20210414_1012'),
]
operations = [
migrations.AlterField(
model_name='connectionparam',
name='properties',
field=models.JSONField(blank=True, null=True),
),
]
|
rberrelleza/cuelake
|
api/workflows/migrations/0003_auto_20210426_1919.py
|
<gh_stars>0
# Generated by Django 3.1.8 on 2021-04-26 19:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('workflows', '0002_auto_20210421_0926'),
]
operations = [
migrations.RenameField(
model_name='workflow',
old_name='dependsOnWorkflow',
new_name='triggerWorkflow',
),
migrations.RenameField(
model_name='workflow',
old_name='dependsOnWorkflowStatus',
new_name='triggerWorkflowStatus',
),
]
|
rberrelleza/cuelake
|
api/system/migrations/0003_auto_20210418_0738.py
|
# Generated by Django 3.1.7 on 2021-04-18 07:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0002_auto_20210418_0735'),
]
operations = [
migrations.AlterField(
model_name='accountsetting',
name='value',
field=models.TextField(blank=True),
),
]
|
rberrelleza/cuelake
|
api/genie/migrations/0002_auto_20210318_1404.py
|
<reponame>rberrelleza/cuelake<filename>api/genie/migrations/0002_auto_20210318_1404.py
# Generated by Django 3.1.7 on 2021-03-18 14:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('genie', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='notebookjob',
name='notebookId',
field=models.CharField(db_index=True, max_length=50, unique=True),
),
migrations.CreateModel(
name='RunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('logs', models.TextField(default='{}')),
('status', models.CharField(max_length=7)),
('notebookJob', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='genie.notebookjob')),
],
),
]
|
rberrelleza/cuelake
|
api/genie/migrations/0009_merge_0008_runstatus_worflowrun_0008_schedule.py
|
<reponame>rberrelleza/cuelake
# Generated by Django 3.2 on 2021-05-04 02:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('genie', '0008_runstatus_worflowrun'),
('genie', '0008_schedule'),
]
operations = [
]
|
rberrelleza/cuelake
|
api/genie/tasks.py
|
<gh_stars>0
import json
import datetime as dt
import dateutil.parser as dp
import requests
import polling
from celery import shared_task
from django.conf import settings
from genie.models import NotebookJob, RunStatus, NOTEBOOK_STATUS_SUCCESS, NOTEBOOK_STATUS_ERROR, NOTEBOOK_STATUS_RUNNING, NOTEBOOK_STATUS_FINISHED, NOTEBOOK_STATUS_ABORT
from system.services import NotificationServices
from utils.zeppelinAPI import Zeppelin
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
@shared_task
def runNotebookJob(notebookId: str, runStatusId: int = None, runType: str = "Scheduled"):
"""
Celery task to run a zeppelin notebook
:param notebookId: ID of the zeppelin notebook which to run
:param runStatusId: ID of genie.runStatus model
"""
if not runStatusId:
runStatus = RunStatus.objects.create(notebookId=notebookId, status=NOTEBOOK_STATUS_RUNNING, runType=runType)
else:
runStatus = RunStatus.objects.get(id=runStatusId)
runStatus.startTimestamp = dt.datetime.now()
runStatus.save()
try:
# Check if notebook is already running
isRunning, notebookName = checkIfNotebookRunning(notebookId)
if(isRunning):
runStatus.status=NOTEBOOK_STATUS_ERROR
runStatus.message="Notebook already running"
runStatus.save()
else:
# Clear notebook results
Zeppelin.clearNotebookResults(notebookId)
response = Zeppelin.runNotebookJob(notebookId)
if response:
try:
polling.poll(
lambda: checkIfNotebookRunningAndStoreLogs(notebookId, runStatus) != True, step=3, timeout=3600
)
except Exception as ex:
runStatus.status = NOTEBOOK_STATUS_ERROR
runStatus.message = str(ex)
runStatus.save()
NotificationServices.notify(notebookName=notebookName, isSuccess=False, message=str(ex))
else:
runStatus.status=NOTEBOOK_STATUS_ERROR
runStatus.message = "Failed running notebook"
runStatus.save()
except Exception as ex:
runStatus.status=NOTEBOOK_STATUS_ERROR
runStatus.message = str(ex)
runStatus.save()
NotificationServices.notify(notebookName=notebookName, isSuccess=False, message=str(ex))
def checkIfNotebookRunning(notebookId: str):
response = Zeppelin.getNotebookDetails(notebookId)
isNotebookRunning = response.get("info", {}).get("isRunning", False)
notebookName = response.get("name", "Undefined")
return isNotebookRunning, notebookName
def checkIfNotebookRunningAndStoreLogs(notebookId, runStatus):
response = Zeppelin.getNotebookDetails(notebookId)
runStatus.logs = json.dumps(response)
runStatus.save()
isNotebookRunning = response.get("info", {}).get("isRunning", False)
if not isNotebookRunning:
setNotebookStatus(response, runStatus)
return isNotebookRunning
def setNotebookStatus(response: dict, runStatus: RunStatus):
paragraphs = response.get("paragraphs", [])
notebookName = response.get("name", "Undefined")
for paragraph in paragraphs:
if paragraph.get("status") != "FINISHED":
runStatus.status=NOTEBOOK_STATUS_ERROR
runStatus.save()
NotificationServices.notify(notebookName=notebookName, isSuccess=False, message=paragraph.get("title") + " " + paragraph.get("id") + " failed")
return
runStatus.status=NOTEBOOK_STATUS_SUCCESS
runStatus.save()
NotificationServices.notify(notebookName=notebookName, isSuccess=True, message="Run successful")
|
rberrelleza/cuelake
|
api/genie/serializers.py
|
<reponame>rberrelleza/cuelake
import json
from rest_framework import serializers
from django_celery_beat.models import CrontabSchedule
from genie.models import NotebookJob, RunStatus, Connection, ConnectionType, NotebookTemplate, Schedule
class NotebookJobSerializer(serializers.ModelSerializer):
"""
Serializer for the model NotebookJob
"""
class Meta:
model = NotebookJob
fields = ["id", "notebookId"]
class RunStatusSerializer(serializers.ModelSerializer):
"""
Serializer for the model RunStatus
"""
logsJSON = serializers.SerializerMethodField()
def get_logsJSON(self, obj):
"""
Gets logs in JSON form
"""
return json.loads(obj.logs)
class Meta:
model = RunStatus
fields = ["id", "notebookId", "startTimestamp", "status", "logsJSON", "runType"]
class ScheduleSerializer(serializers.ModelSerializer):
"""
Serializer for the model CrontabSchedule
"""
schedule = serializers.SerializerMethodField()
crontab = serializers.SerializerMethodField()
timezone = serializers.SerializerMethodField()
notebookCount = serializers.SerializerMethodField()
workflowCount = serializers.SerializerMethodField()
def get_schedule(self, obj):
"""
Gets string form of the crontab
"""
return str(obj)
def get_timezone(self, obj):
""" Gets schedule timezone"""
return str(obj.timezone)
def get_crontab(self, obj):
"""Gets schedule crontab """
return str(obj.crontab)
def count(self, obj):
"""Count number of workflow and notebook assinged with schedule """
workflow= 0
notebook = 0
schedule = Schedule.objects.get(id= obj.id)
scheduleJob = list(schedule.periodictask_set.values())
for listItem in scheduleJob:
if "task" in listItem and listItem["task"]:
notebook+=1
if "task" in listItem and not listItem["task"]:
workflow +=1
return [notebook,workflow]
def get_notebookCount(self,obj):
"""Gets assigned notebook count """
scheduleCount= self.count(obj)
return scheduleCount[0]
def get_workflowCount(self, obj):
"""Gets assigned workflow count """
scheduleCount= self.count(obj)
return scheduleCount[1]
class Meta:
model = Schedule
fields = ["id", "schedule","name","timezone","crontab","notebookCount","workflowCount"]
# Connection Serializers
class ConnectionSerializer(serializers.ModelSerializer):
connectionTypeId = serializers.SerializerMethodField()
connectionType = serializers.SerializerMethodField()
def get_connectionTypeId(self, obj):
return obj.connectionType.id
def get_connectionType(self, obj):
return obj.connectionType.name
class Meta:
model = Connection
fields = [
"id",
"name",
"description",
"connectionTypeId",
"connectionType",
]
class ConnectionDetailSerializer(serializers.ModelSerializer):
params = serializers.SerializerMethodField()
connectionTypeId = serializers.SerializerMethodField()
connectionType = serializers.SerializerMethodField()
def get_params(self, obj):
params = {}
for val in obj.cpvc.all():
params[val.connectionParam.name] = val.value if not val.connectionParam.isEncrypted else "**********"
return params
def get_connectionTypeId(self, obj):
return obj.connectionType.id
def get_connectionType(self, obj):
return obj.connectionType.name
class Meta:
model = Connection
fields = [
"id",
"name",
"description",
"params",
"connectionTypeId",
"connectionType",
]
class ConnectionTypeSerializer(serializers.ModelSerializer):
params = serializers.SerializerMethodField()
def get_params(self, obj):
paramList = []
for param in obj.connectionTypeParam.all():
params = {}
params["id"] = param.id
params["name"] = param.name
params["label"] = param.label
params["isEncrypted"] = param.isEncrypted
params["properties"] = param.properties
paramList.append(params)
return paramList
class Meta:
model = ConnectionType
fields = ["id", "name", "params"]
class NotebookTemplateSerializer(serializers.ModelSerializer):
"""
Serializer for the model NotebookJob
"""
class Meta:
model = NotebookTemplate
fields = ["id", "name", "formJson"]
|
rberrelleza/cuelake
|
api/workflows/services.py
|
<gh_stars>0
from typing import List
import asyncio
import json
import pytz
import time
from django.db import transaction
import polling
from workflows.models import (
Workflow,
WorkflowRun,
NotebookJob,
STATUS_SUCCESS,
STATUS_FAILURE,
STATUS_ALWAYS,
STATUS_RUNNING,
STATUS_RECEIVED,
STATUS_ABORTED
)
from workflows.serializers import WorkflowSerializer, WorkflowRunSerializer
from utils.apiResponse import ApiResponse
from utils.zeppelinAPI import Zeppelin
from genie.tasks import runNotebookJob as runNotebookJobTask
from genie.services import NotebookJobServices
from genie.models import RunStatus, NOTEBOOK_STATUS_RUNNING, NOTEBOOK_STATUS_SUCCESS
from django_celery_beat.models import CrontabSchedule
# Name of the celery task which calls the zeppelin api
CELERY_TASK_NAME = "genie.tasks.runNotebookJob"
class WorkflowServices:
"""
Class containing services related to NotebookJob model
"""
@staticmethod
def getWorkflows(offset: int = 0):
"""
Service to fetch and serialize Workflows
:param offset: Offset for fetching NotebookJob objects
"""
LIMIT = 10
res = ApiResponse(message="Error retrieving workflows")
workflows = Workflow.objects.filter(enabled=True).order_by("-id")
total = workflows.count()
data = WorkflowSerializer(workflows[offset:LIMIT], many=True).data
res.update(
True,
"Workflows retrieved successfully",
{"total": total, "workflows": data},
)
return res
@staticmethod
@transaction.atomic
def createWorkflow(
name: str,
scheduleId: int,
triggerWorkflowId: int,
triggerWorkflowStatus: str,
notebookIds: List[int],
):
"""
Creates workflow
:param name: name of new workflow
:param scheduleId: crontab id
:param triggerWorkflowId: id of workflow which triggers this workflow
:param triggerWorkflowStatus: ["success", "failure", "always"] required
status of triggerWorkflow to trigger this workflow
:param notebookIds: notebookIds for workflow
"""
res = ApiResponse(message="Error in creating workflow")
if not scheduleId:
# to avoid error: 'One of clocked, interval, crontab, or solar must be set.'
crontab = CrontabSchedule.objects.create()
workflow = Workflow.objects.create(
name=name,
crontab_id=scheduleId if scheduleId else crontab.id,
triggerWorkflow_id=triggerWorkflowId,
triggerWorkflowStatus=triggerWorkflowStatus,
)
if not scheduleId:
# removing fake crontab id & deleting it
workflow.crontab_id = scheduleId
workflow.save()
crontab.delete()
notebookJobs = [
NotebookJob(workflow=workflow, notebookId=notebookId)
for notebookId in notebookIds
]
NotebookJob.objects.bulk_create(notebookJobs)
res.update(True, "Workflow created successfully", workflow.id)
return res
@staticmethod
@transaction.atomic
def updateWorkflow(
id: int,
name: str,
scheduleId: int,
triggerWorkflowId: int,
triggerWorkflowStatus: str,
notebookIds: List[int],
):
"""
Updates workflow
:param name: name of new workflow
:param scheduleId: crontab id
:param triggerWorkflowId: id of workflow which triggers this workflow
:param triggerWorkflowStatus: ["success", "failure", "always"] required
status of triggerWorkflow to trigger this workflow
:param notebookIds: notebookIds for workflow
"""
res = ApiResponse(message="Error in updating workflow")
workflow = Workflow.objects.filter(id=id).update(
name=name,
crontab_id=scheduleId,
triggerWorkflow_id=triggerWorkflowId,
triggerWorkflowStatus=triggerWorkflowStatus,
)
NotebookJob.objects.filter(workflow_id=id).delete()
notebookJobs = [
NotebookJob(workflow_id=id, notebookId=notebookId)
for notebookId in notebookIds
]
NotebookJob.objects.bulk_create(notebookJobs)
try:
if workflow:
res.update(True, "Workflow updated successfully", workflow)
except:
res.update(False, "Error in updating workflow")
return res
@staticmethod
def deleteWorkflow(workflowId: int):
"""
Delete workflow
:param workflowId: id of Workflows.Workflow
"""
res = ApiResponse(message="Error in deleting workflow logs")
count = Workflow.objects.filter(id=workflowId).update(enabled=False)
res.update(True, "Workflow delete successfully")
return res
@staticmethod
def getWorkflowRuns(workflowId: int, offset: int):
"""
Service to fetch and serialize workflows runs
:param workflowId: id of Workflows.Workflow
"""
LIMIT = 10
res = ApiResponse(message="Error in retrieving workflow logs")
workflowRuns = WorkflowRun.objects.filter(enabled=True, workflow=workflowId).order_by("-id")
total = workflowRuns.count()
data = WorkflowRunSerializer(workflowRuns[offset:LIMIT], many=True).data
res.update(
True,
"WorkflowRuns retrieved successfully",
{"total": total, "workflowRuns": data},
)
return res
@staticmethod
def getWorkflowRunLogs(workflowRunId: int):
"""
Service to fetch logs related to given workflowRun
:param workflowRunId: if of model workflows.workflowRun
"""
res = ApiResponse(message="Error in retrieving workflow logs")
workflowRun = WorkflowRun.objects.get(id=workflowRunId)
total = []
res.update(
True,
"WorkflowRuns retrieved successfully",
{"total": total, "workflowRunLogs": []},
)
return res
@staticmethod
def updateTriggerWorkflow(workflowId: int, triggerWorkflowId: int, triggerWorkflowStatus: int):
"""Update given workflow's trigger workflow"""
res = ApiResponse(message="Error in updating trigger workflow")
updateStatus = Workflow.objects.filter(id=workflowId).update(triggerWorkflow_id=triggerWorkflowId, triggerWorkflowStatus=triggerWorkflowStatus)
res.update(True, "Trigger workflow updated successfully", updateStatus)
return res
@staticmethod
def updateSchedule(workflowId: int, scheduleId: int):
"""Update given workflow's schedule"""
res = ApiResponse(message="Error in updating workflow schedule")
updateStatus = Workflow.objects.filter(id=workflowId).update(crontab_id=scheduleId)
res.update(True, "Workflow schedule updated successfully", True)
return res
@staticmethod
def runWorkflow(workflowId: int, workflowRunId: int = None):
"""
Runs workflow
"""
# TODO If workflow already
notebookIds = list(
NotebookJob.objects.filter(workflow_id=workflowId).values_list(
"notebookId", flat=True
)
)
if workflowRunId:
workflowRun = WorkflowRun.objects.get(id=workflowRunId)
workflowRun.status = STATUS_RUNNING
workflowRun.save()
else:
workflowRun = WorkflowRun.objects.create(
workflow_id=workflowId, status=STATUS_RUNNING
)
notebookRunStatusIds = []
for notebookId in notebookIds:
runStatus = RunStatus.objects.create(
notebookId=notebookId, status=NOTEBOOK_STATUS_RUNNING, runType="Scheduled"
)
runNotebookJobTask.delay(notebookId=notebookId, runStatusId=runStatus.id)
notebookRunStatusIds.append(runStatus.id)
workflowStatus = polling.poll(
lambda: WorkflowServices.__checkGivenRunStatuses(notebookRunStatusIds)
!= "stillRunning",
step=3,
timeout=3600,
)
if WorkflowRun.objects.get(id=workflowRunId).status == STATUS_ABORTED:
return []
if workflowStatus:
workflowRun.status = STATUS_SUCCESS
workflowRun.save()
else:
workflowRun.status = STATUS_FAILURE
workflowRun.save()
dependentWorkflowIds = list(
Workflow.objects.filter(
triggerWorkflow_id=workflowId,
triggerWorkflowStatus__in=[STATUS_ALWAYS, workflowRun.status],
).values_list("id", flat=True)
)
return dependentWorkflowIds
@staticmethod
def __checkGivenRunStatuses(notebookRunStatusIds: List[int]):
"""Check if given runStatuses are status is SUCCESS"""
if (
len(notebookRunStatusIds)
== RunStatus.objects.filter(id__in=notebookRunStatusIds)
.exclude(status=NOTEBOOK_STATUS_RUNNING)
.count()
):
return (
len(notebookRunStatusIds)
== RunStatus.objects.filter(
id__in=notebookRunStatusIds, status=NOTEBOOK_STATUS_SUCCESS
).count()
)
return "stillRunning"
class WorkflowActions:
@staticmethod
def runWorkflow(workflowId: int):
"""
Runs given workflow
"""
from workflows.tasks import runWorkflowJob
res = ApiResponse(message="Error in running workflow")
existingWorkflows = WorkflowRun.objects.filter(workflow_id=workflowId).order_by(
"-startTimestamp"
)
if existingWorkflows.count() and existingWorkflows[0].status in [
STATUS_RUNNING,
STATUS_RECEIVED,
]:
res.update(False, "Can't run already running workflow")
return res
workflowRun = WorkflowRun.objects.create(
workflow_id=workflowId, status=STATUS_RECEIVED
)
runWorkflowJob.delay(workflowId=workflowId, workflowRunId=workflowRun.id)
res.update(True, "Ran workflow successfully")
return res
@staticmethod
def stopWorkflow(workflowId: int):
"""
Stops given workflow
"""
res = ApiResponse(message="Error in stopping workflow")
notebookIds = list(
NotebookJob.objects.filter(workflow_id=workflowId).values_list(
"notebookId", flat=True
)
)
workflowRuns = WorkflowRun.objects.filter(workflow_id=workflowId).order_by("-startTimestamp")
if workflowRuns.count():
workflowRun = workflowRuns[0]
workflowRun.status = STATUS_ABORTED
workflowRun.save()
notebookIds = Workflow.objects.get(id=workflowId).notebookjob_set.all().values_list("notebookId", flat=True)
responses = [ NotebookJobServices.stopNotebookJob(notebookId) for notebookId in notebookIds ]
res.update(True, "Stopped workflow successfully")
return res
# @staticmethod
# def addNotebook(payload):
# res = ApiResponse(message="Error adding notebook")
# notebookTemplate = NotebookTemplate.objects.get(id=payload.get("notebookTemplateId", 0))
# context = payload # Storing payload in context variable so that it can be used for rendering
# # Handling connection variables
# if payload.get("sourceConnection", False):
# connection = Connection.objects.get(id=payload["sourceConnection"])
# connectionParams = connection.cpvc.all()
# for cp in connectionParams:
# paramName = cp.connectionParam.name
# context["sourceConnection_" + paramName] = cp.value
# if payload.get("targetConnection", False):
# connection = Connection.objects.get(id=payload["sourceConnection"])
# connectionParams = connection.cpvc.all()
# for cp in connectionParams:
# paramName = cp.connectionParam.name
# context["sourceConnection_" + paramName] = cp.value
# # Handling S3 path - Splitting it to get the table name
# if payload.get("destinationTableS3Path", False):
# destinationTableName = payload["destinationTableS3Path"].rsplit('/', 1)[1]
# warehouseLocation = payload["destinationTableS3Path"].rsplit('/', 1)[0]
# context["destinationTableName"] = destinationTableName
# context["warehouseLocation"] = warehouseLocation
# # Adding a temp table name to the context
# context["tempTableName"] = "tempTable_" + str(round(time.time() * 1000))
# notebook = Template(notebookTemplate.template).render(Context(context))
# response = Zeppelin.addNotebook(notebook)
# if response:
# res.update(True, "Notebook added successfully")
# return res
|
rberrelleza/cuelake
|
api/genie/migrations/0003_auto_20210412_0719.py
|
<gh_stars>0
# Generated by Django 3.1.7 on 2021-04-12 07:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genie', '0002_auto_20210318_1404'),
]
operations = [
migrations.RenameField(
model_name='runstatus',
old_name='timestamp',
new_name='startTimestamp',
),
migrations.AddField(
model_name='runstatus',
name='endTimestamp',
field=models.DateTimeField(default=None, null=True),
),
migrations.AddField(
model_name='runstatus',
name='message',
field=models.CharField(default=None, max_length=5000, null=True),
),
]
|
rberrelleza/cuelake
|
api/utils/zeppelinAPI.py
|
<filename>api/utils/zeppelinAPI.py
import asyncio
import aiohttp
import requests
from django.conf import settings
from rest_framework import response
NOTEBOOKS_ENDPOINT = "api/notebook"
NOTEBOOK_STATUS_ENDPOINT = "api/notebook/job"
class ZeppelinAPI:
"""
Functionalities around zeppelin APIs
"""
def getAllNotebooks(self):
"""
Return all notebooks from zeppelin
"""
response = requests.get(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}")
return self.__parseResponse(response)
async def getNotebookStatus(self, notebookId: str):
"""
Async method to return status of all paragraphs from a notebook
"""
url = f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOK_STATUS_ENDPOINT}/{notebookId}"
defaultResponse = {"id": notebookId}
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
responseJSON = await response.json()
if responseJSON["status"] == "OK":
return responseJSON["body"]
else:
return defaultResponse
except Exception as ex:
return defaultResponse
def getNotebookDetails(self, notebookId: str):
"""
Return all paragraphs from a notebook
"""
response = requests.get(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/{notebookId}")
return self.__parseResponse(response)
def runNotebookJob(self, notebookId: str):
"""
Run all paragraphs from a notebook
"""
response = requests.post(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/job/{notebookId}")
return self.__parseResponse(response)
def stopNotebookJob(self, notebookId: str):
"""
Stop all paragraphs from a notebook
"""
response = requests.delete(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/job/{notebookId}")
return self.__parseResponse(response)
def clearNotebookResults(self, notebookId: str):
"""
Clear all paragraph results from a notebook
"""
response = requests.put(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/{notebookId}/clear")
return self.__parseResponse(response)
def addNotebook(self, notebook: dict):
"""
Clear all paragraph results from a notebook
"""
response = requests.post(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}", notebook)
return self.__parseResponse(response)
def cloneNotebook(self, notebookId: str, payload: dict):
"""
Clear all paragraph results from a notebook
"""
response = requests.post(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/{notebookId}", payload)
print(response.json())
print(payload)
return self.__parseResponse(response)
def deleteNotebook(self, notebookId: str):
"""
Clear all paragraph results from a notebook
"""
response = requests.delete(f"{settings.ZEPPELIN_HOST}:{settings.ZEPPELIN_PORT}/{NOTEBOOKS_ENDPOINT}/{notebookId}")
return self.__parseResponse(response)
def __parseResponse(self, response):
"""
Parses the response returned by zeppelin APIs
"""
try:
responseJSON = response.json()
if responseJSON["status"] == "OK":
return responseJSON.get("body", True)
else:
return False
except Exception as ex:
return False
# Export initalized class
Zeppelin = ZeppelinAPI()
|
rberrelleza/cuelake
|
api/system/migrations/0002_auto_20210418_0735.py
|
<filename>api/system/migrations/0002_auto_20210418_0735.py<gh_stars>0
# Generated by Django 3.1.7 on 2021-04-18 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('system', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='accountsetting',
name='label',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='accountsetting',
name='type',
field=models.CharField(default='text', max_length=200),
),
migrations.AlterField(
model_name='accountsetting',
name='key',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
rberrelleza/cuelake
|
api/workflows/admin.py
|
from django.contrib import admin
from workflows.models import Workflow, WorkflowRun, NotebookJob
class WorkflowRunAdmin(admin.ModelAdmin):
# Adding starttimestamp in a new modelAdmin class as its a readonly field
# to make it visible in the admin panel
readonly_fields = ('startTimestamp',)
admin.site.register(Workflow)
admin.site.register(WorkflowRun, WorkflowRunAdmin)
admin.site.register(NotebookJob)
|
rberrelleza/cuelake
|
api/workflows/tasks.py
|
import json
import datetime as dt
import dateutil.parser as dp
import requests
import polling
from celery import shared_task
from django.conf import settings
from system.services import NotificationServices
from workflows.services import WorkflowServices
from workflows.models import WorkflowRun, STATUS_RECEIVED, STATUS_FAILURE
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
@shared_task
def runWorkflowJob(workflowId: int, workflowRunId: int = None):
"""
Celery task to run a Workflow
:param workflowId: ID of Workflows.workflow model
"""
try:
dependentWorkflowIds = WorkflowServices.runWorkflow(workflowId=workflowId, workflowRunId=workflowRunId)
for workflowId in dependentWorkflowIds:
workflowRun = WorkflowRun.objects.create(workflow_id=workflowId, status=STATUS_RECEIVED)
runWorkflowJob.delay(workflowId=workflowId, workflowRunId=workflowRun.id)
except Exception as ex:
WorkflowRun.objects.filter(id=workflowRunId).update(status=STATUS_FAILURE)
|
rberrelleza/cuelake
|
api/utils/druidSpecGenerator.py
|
<reponame>rberrelleza/cuelake
# Class to generate Druid ingestion spec
import json
import os
import logging
from typing import List
from urllib.parse import urlparse
import boto3
import pyarrow.parquet as pq
import s3fs
PARQUET_STRING = ["STRING", "ENUM", "UUID"]
PARQUET_COMPLEX_TYPE = ["LIST", "STRUCT"]
PARQUET_INTEGER = ["INT"]
PARQUET_TIMESTAMP = ["TIME", "TIMESTAMP", "DATE"]
PARQUET_DECIMAL = ["DECIMAL"]
logger = logging.getLogger(__name__)
class DruidIngestionSpecGenerator:
"""
Class to handle functionality around Druid ingestion spec generation
"""
@staticmethod
def getIngestionSpec(datasetLocation, datasourceName, datasetSchema):
"""
Method to generate Druid ingestion spec
Method doesn't support complex druid data types right now.
:param datasetLocation: S3 location of the dataset
:param datasourceName Name of the dataset
:param datasetSchema The schema of the dataset
:returns DruidSpec
"""
logger.info("Generating Druid spec for dataset at: %s", datasetLocation)
logger.info("Schema for dataset: %s", datasetSchema)
return json.dumps(
{
"type": "index",
"spec": {
"dataSchema": {
"dataSource": datasourceName,
"timestampSpec": {
"column": DruidIngestionSpecGenerator._getTimestampColumn(datasetSchema),
"format": "millis",
"missingValue": None,
},
"dimensionsSpec": {
"dimensions": DruidIngestionSpecGenerator._getDimensions(datasetSchema)
},
"metricsSpec": DruidIngestionSpecGenerator._getMetrics(datasetSchema),
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "MONTH",
"queryGranularity": "MINUTE",
"rollup": True,
"intervals": None,
},
"transformSpec": {"filter": None, "transforms": []},
},
"ioConfig": {
"type": "index",
"inputSource": {
"type": "s3",
"uris": None,
"prefixes": [datasetLocation],
"objects": None,
},
"inputFormat": {
"type": "parquet",
"flattenSpec": {"useFieldDiscovery": True, "fields": []},
"binaryAsString": False,
},
"appendToExisting": False,
},
"tuningConfig": {
"type": "index_parallel",
"maxRowsPerSegment": 5000000,
"maxRowsInMemory": 1000000,
"maxBytesInMemory": 0,
"maxTotalRows": None,
"numShards": None,
"splitHintSpec": None,
"partitionsSpec": {
"type": "dynamic",
"maxRowsPerSegment": 5000000,
"maxTotalRows": None,
},
"indexSpec": {
"bitmap": {"type": "concise"},
"dimensionCompression": "lz4",
"metricCompression": "lz4",
"longEncoding": "longs",
},
"indexSpecForIntermediatePersists": {
"bitmap": {"type": "concise"},
"dimensionCompression": "lz4",
"metricCompression": "lz4",
"longEncoding": "longs",
},
"maxPendingPersists": 0,
"forceGuaranteedRollup": False,
"reportParseExceptions": False,
"pushTimeout": 0,
"segmentWriteOutMediumFactory": None,
"maxNumConcurrentSubTasks": 1,
"maxRetry": 3,
"taskStatusCheckPeriodMs": 1000,
"chatHandlerTimeout": "PT10S",
"chatHandlerNumRetries": 5,
"maxNumSegmentsToMerge": 100,
"totalNumMergeTasks": 10,
"logParseExceptions": False,
"maxParseExceptions": 2147483647,
"maxSavedParseExceptions": 0,
"buildV9Directly": True,
"partitionDimensions": [],
},
},
"context": {"forceTimeChunkLock": True},
"dataSource": datasourceName,
}
)
@staticmethod
def _getTimestampColumn(datasetSchema) -> str:
"""
Method to extract the timestamp column from the spec.
In case of multiple timestamp columns in the spec, any of the
random timestamp columns will be returned.
:param datasetSchema Schema of the dataset
:returns Name of the timestamp column
"""
logger.info("Fetching the timestamp column from the schema")
timestampColumn = None
for obj in datasetSchema:
if obj.logical_type.type.upper() in PARQUET_TIMESTAMP:
timestampColumn = obj.name
break
return timestampColumn
@staticmethod
def _getMetrics(datasetSchema):
"""
Method to fetch the metrics from the dremio schema
:param datasetSchema Scheme of the dataset
"""
logger.info("Fetching all the metrics from the schema")
metrics = [{"type": "count", "name": "count"}]
for obj in datasetSchema:
metric = None
if obj.logical_type.type.upper() in PARQUET_INTEGER:
typeMetric = "longSum"
metric = {
"type": typeMetric,
"name": obj.name,
"fieldName": obj.name,
"expression": None,
}
elif obj.logical_type.type.upper() in PARQUET_DECIMAL:
typeMetric = "doubleSum"
metric = {
"type": typeMetric,
"name": obj.name,
"fieldName": obj.name,
"expression": None,
}
if metric is not None:
metrics.append(metric)
return metrics
@staticmethod
def _getDimensions(datasetSchema):
"""
Method to fetch the metrics from the dremio schema
:param datasetSchema Scheme of the dataset
"""
logger.info("Fetching all the dimensions from the schema")
dimensions = []
timestampColumns = []
for obj in datasetSchema:
if obj.logical_type.type.upper() in PARQUET_STRING:
dimension = {
"type": "string",
"name": obj.name,
"multiValueHandling": "SORTED_ARRAY",
"createBitmapIndex": True,
}
dimensions.append(dimension)
elif obj.logical_type.type.upper() in PARQUET_TIMESTAMP:
dimension = {
"type": "string",
"name": obj.name,
"multiValueHandling": "SORTED_ARRAY",
"createBitmapIndex": True,
}
timestampColumns.append(dimension)
# the rest of the timestamp columns are being ingnored. Add those to the dim list
dimensions.extend(timestampColumns[1:])
return dimensions
@staticmethod
def _getSchemaForDatasourceInS3(datasetLocation: str):
"""
Gets schema for a datasource in S3 bucket at staging location
"""
parsedInfo = urlparse(datasetLocation)
bucket = parsedInfo.netloc
key = parsedInfo.path
s3 = boto3.client("s3")
files = s3.list_objects(
Bucket=bucket, Prefix=key.lstrip('/')
)
if len(files["Contents"]) > 0:
try:
fileName = files["Contents"][-1]["Key"]
schema = pq.ParquetDataset(
bucket + "/" + fileName,
filesystem=s3fs.S3FileSystem(
anon=False
),
).schema
return schema
except Exception as ex:
logger.error(str(ex))
return []
else:
return []
|
rberrelleza/cuelake
|
api/genie/services.py
|
import asyncio
import json
import pytz
import time
from typing import List
from django.template import Template, Context
# from django_celery_beat.models import CrontabSchedule
from genie.models import NotebookJob, RunStatus, Connection, ConnectionType, ConnectionParam, ConnectionParamValue, NotebookTemplate, Schedule
from genie.serializers import NotebookJobSerializer, ScheduleSerializer, RunStatusSerializer, ConnectionSerializer, ConnectionDetailSerializer, ConnectionTypeSerializer, NotebookTemplateSerializer
from utils.apiResponse import ApiResponse
from utils.zeppelinAPI import Zeppelin
from utils.druidSpecGenerator import DruidIngestionSpecGenerator
from genie.tasks import runNotebookJob as runNotebookJobTask, checkIfNotebookRunning
# Name of the celery task which calls the zeppelin api
CELERY_TASK_NAME = "genie.tasks.runNotebookJob"
GET_NOTEBOOKJOBS_LIMIT = 10
RUN_STATUS_LIMIT = 10
class NotebookJobServices:
"""
Class containing services related to NotebookJob model
"""
@staticmethod
async def _fetchNotebookStatuses(notebooks: list):
"""
Async method to fetch notebook status details for multiple notebooks
Returns a dict with notebook ids as keys
:param notebooks: List of notebook describing dicts each containing the 'id' field
"""
notebookStatuses = {}
for future in asyncio.as_completed([Zeppelin.getNotebookStatus(notebook["id"]) for notebook in notebooks]):
status = await future
notebookStatuses[status["id"]] = status
return notebookStatuses
@staticmethod
def getNotebooks(offset: int = 0):
"""
Service to fetch and serialize NotebookJob objects
Number of NotebookJobs fetched is stored as the constant GET_NOTEBOOKJOBS_LIMIT
:param offset: Offset for fetching NotebookJob objects
"""
res = ApiResponse(message="Error retrieving notebooks")
notebooks = Zeppelin.getAllNotebooks()
if notebooks:
notebookCount = len(notebooks)
notebooks = notebooks[offset: offset + GET_NOTEBOOKJOBS_LIMIT]
notebookIds = [notebook["id"] for notebook in notebooks]
notebookJobs = NotebookJob.objects.filter(notebookId__in=notebookIds)
for notebook in notebooks:
notebook["name"] = notebook["path"]
notebookJob = next((notebookJob for notebookJob in notebookJobs if notebookJob.name == notebook["id"]), False)
if notebookJob:
notebook["isScheduled"] = True
notebook["schedule"] = str(notebookJob.crontab)
notebook["isActive"] = notebookJob.enabled
notebook["notebookJobId"] = notebookJob.id
else:
notebook["isScheduled"] = False
notebookRunStatus = RunStatus.objects.filter(notebookId=notebook["id"]).order_by("-startTimestamp").first()
if notebookRunStatus:
notebook["lastRun"] = RunStatusSerializer(notebookRunStatus).data
res.update(True, "NotebookJobs retrieved successfully", {"notebooks": notebooks, "count": notebookCount})
return res
@staticmethod
def getNotebooksLight():
""" Gets concise notebook data"""
res = ApiResponse(message="Error retrieving notebooks")
notebooks = Zeppelin.getAllNotebooks()
res.update(True, "Notebooks retrieved successfully", notebooks)
return res
@staticmethod
def addNotebook(payload):
res = ApiResponse(message="Error adding notebook")
notebookTemplate = NotebookTemplate.objects.get(id=payload.get("notebookTemplateId", 0))
context = payload # Storing payload in context variable so that it can be used for rendering
# Handling connection variables
if payload.get("sourceConnection", False):
connection = Connection.objects.get(id=payload["sourceConnection"])
context["sourceConnection_type"] = connection.connectionType.name
connectionParams = connection.cpvc.all()
for cp in connectionParams:
paramName = cp.connectionParam.name
context["sourceConnection_" + paramName] = cp.value
if payload.get("targetConnection", False):
connection = Connection.objects.get(id=payload["sourceConnection"])
context["targetConnection_type"] = connection.connectionType.name
connectionParams = connection.cpvc.all()
for cp in connectionParams:
paramName = cp.connectionParam.name
context["targetConnection" + paramName] = cp.value
# Handling S3 path - Splitting it to get the table name
if payload.get("destinationTableS3Path", False):
destinationTableName = payload["destinationTableS3Path"].rsplit('/', 1)[1]
warehouseLocation = payload["destinationTableS3Path"].rsplit('/', 1)[0]
context["destinationTableName"] = destinationTableName
context["warehouseLocation"] = warehouseLocation
# Adding a temp table name to the context
context["tempTableName"] = "tempTable_" + str(round(time.time() * 1000))
notebook = Template(notebookTemplate.template).render(Context(context))
response = Zeppelin.addNotebook(notebook)
if response:
res.update(True, "Notebook added successfully")
return res
@staticmethod
def getNotebookJobDetails(notebookId: int, runStatusOffset: int = 0):
"""
Service to fetch run details and logs of the selected NotebookJob
:param notebookId: ID of the NotebookJob
:param runStatusOffset: Offset for fetching NotebookJob run statuses
"""
res = ApiResponse()
notebookJobData = {}
runStatuses = RunStatus.objects.filter(notebookId=notebookId).order_by("-startTimestamp")[runStatusOffset: runStatusOffset + RUN_STATUS_LIMIT]
notebookRunCount = RunStatus.objects.filter(notebookId=notebookId).count()
notebookJobData["runStatuses"] = RunStatusSerializer(runStatuses, many=True).data
notebookJobData["count"] = notebookRunCount
res.update(True, "NotebookJobs retrieved successfully", notebookJobData)
return res
@staticmethod
def addNotebookJob(notebookId: str, scheduleId: int):
"""
Service to add a new NotebookJob
:param notebookId: ID of the notebook for which to create job
:param scheduleId: ID of schedule
"""
res = ApiResponse()
scheduleObj = Schedule.objects.get(crontabschedule_ptr_id=scheduleId)
NotebookJob.objects.update_or_create(name=notebookId, notebookId=notebookId, defaults={"crontab":scheduleObj, "task":CELERY_TASK_NAME, "args":f'["{notebookId}"]'})
res.update(True, "NotebookJob added successfully", None)
return res
@staticmethod
def updateNotebookJob(notebookJobId: int, scheduleId: int):
"""
Service to update crontab of an existing NotebookJob
:param notebookId: ID of the NotebookJob for which to update crontab
:param scheduleId: ID of schedule
"""
res = ApiResponse()
scheduleObj = Schedule.objects.get(crontabschedule_ptr_id=scheduleId)
NotebookJob.objects.filter(id=notebookJobId).update(crontab=scheduleObj)
res.update(True, "NotebookJob updated successfully", None)
return res
@staticmethod
def deleteNotebookJob(notebookId: int):
"""
Service to update crontab of an existing NotebookJob
:param notebookId: ID of the Notebook for which to delete
"""
res = ApiResponse()
NotebookJob.objects.filter(name=notebookId).delete()
res.update(True, "NotebookJob deleted successfully", None)
return res
@staticmethod
def toggleNotebookJob(notebookId: int, enabled: bool):
"""
Service to update crontab of an existing NotebookJob
:param notebookId: ID of the NotebookJob for which to update crontab
:param scheduleId: ID of schedule
"""
res = ApiResponse()
NotebookJob.objects.filter(notebookId=notebookId).update(enabled=enabled)
res.update(True, "NotebookJob updated successfully", None)
return res
@staticmethod
def getSchedules():
"""
Service to get all schedule objects
"""
res = ApiResponse()
schedules = Schedule.objects.all()
data = ScheduleSerializer(schedules, many=True).data
res.update(True, "Schedules fetched successfully", data)
return res
@staticmethod
def addSchedule(cron: str, timezone: str = None, name: str = ""):
"""
Service to add Schedule
:param cron: Crontab in string format
:param timezone: Timezone string for which to configure Schedule
:param name: Name of schedule provided by user
"""
res = ApiResponse()
cronElements = cron.split()
if len(cronElements) != 5:
res.update(False, "Crontab must contain five elements")
return res
timezone = timezone if timezone else "UTC"
Schedule.objects.create(
minute=cronElements[0],
hour=cronElements[1],
day_of_month=cronElements[2],
month_of_year=cronElements[3],
day_of_week=cronElements[4],
timezone=timezone,
name=name,
)
res.update(True, "Schedule added successfully", None)
return res
@staticmethod
def getSingleSchedule(scheduleId: int):
"""
Service to get singleSchedule
:param scheduleId: int
"""
res = ApiResponse()
schedules = Schedule.objects.filter(crontabschedule_ptr_id=scheduleId)
data = ScheduleSerializer(schedules, many=True).data
res.update(True, "Schedules fetched successfully", data)
return res
@staticmethod
def updateSchedule(id, cron, timezone, name):
"""
Service to update Schedule
param id: int
param cron: Crontab in string format
param timezone: Timezone in string format
param name: String
"""
res = ApiResponse()
schedules = Schedule.objects.get(crontabschedule_ptr_id=id)
schedules.cron = cron
schedules.timezone = timezone
schedules.name = name
schedules.save()
res.update(True, "Schedules updated successfully", [])
return res
@staticmethod
def deleteSchedule(scheduleId: int):
""" Service to delete schedule of given scheduleId """
res = ApiResponse()
Schedule.objects.filter(crontabschedule_ptr_id=scheduleId).delete()
res.update(True, "Schedules deleted successfully", [])
return res
@staticmethod
def getTimezones():
"""
Service to fetch all pytz timezones
"""
res = ApiResponse()
timezones = pytz.all_timezones
res.update(True, "Timezones fetched successfully", timezones)
return res
@staticmethod
def runNotebookJob(notebookId: str):
"""
Service to run notebook job
"""
res = ApiResponse("Error in running notebook")
runNotebookJobTask.delay(notebookId=notebookId, runType="Manual")
res.update(True, "Notebook triggered successfully", None)
return res
@staticmethod
def stopNotebookJob(notebookId: str):
"""
Service to run notebook job
"""
res = ApiResponse(message="Error in stopping notebook")
# Updating runStatus that the task was aborted
response = Zeppelin.stopNotebookJob(notebookId)
if response:
res.update(True, "Notebook stopped successfully", None)
return res
@staticmethod
def clearNotebookResults(notebookId: str):
"""
Service to run notebook job
"""
res = ApiResponse(message="Error in clearing notebook")
response = Zeppelin.clearNotebookResults(notebookId)
if response:
res.update(True, "Notebook cleared successfully", None)
return res
@staticmethod
def cloneNotebook(notebookId: str, payload: dict):
"""
Service to run notebook job
"""
res = ApiResponse(message="Error in cloning notebook")
response = Zeppelin.cloneNotebook(notebookId, json.dumps(payload))
if response:
res.update(True, "Notebook cloned successfully", None)
return res
@staticmethod
def deleteNotebook(notebookId: str):
"""
Service to run notebook job
"""
res = ApiResponse(message="Error in cloning notebook")
response = Zeppelin.deleteNotebook(notebookId)
if response:
res.update(True, "Notebook deleted successfully", None)
return res
class Connections:
@staticmethod
def getConnections():
res = ApiResponse()
connections = Connection.objects.all()
serializer = ConnectionSerializer(connections, many=True)
res.update(True, "Connections retrieved successfully", serializer.data)
return res
@staticmethod
def getConnection(connection_id):
res = ApiResponse()
connections = Connection.objects.get(id=connection_id)
serializer = ConnectionDetailSerializer(connections)
res.update(True, "Connection retrieved successfully", serializer.data)
return res
@staticmethod
def addConnection(payload):
res = ApiResponse()
connectionType = ConnectionType.objects.get(id=payload["connectionType_id"])
connection = Connection.objects.create(
name=payload["name"], description=payload["description"], connectionType=connectionType
)
for param in payload["params"]:
cp = ConnectionParam.objects.get(name=param, connectionType=connectionType)
ConnectionParamValue.objects.create(
connectionParam=cp, value=payload["params"][param], connection=connection
)
res.update(True, "Connection added successfully")
return res
@staticmethod
def removeConnection(connection_id):
res = ApiResponse()
Connection.objects.get(id=connection_id).delete()
res.update(True, "Connection deleted successfully")
return res
@staticmethod
def updateConnection(connection_id, payload):
res = ApiResponse()
Connection.objects.filter(id=connection_id).update(
name=payload.get("name", ""),
description=payload.get("description", ""),
connectionType=ConnectionType.objects.get(id=payload["connectionType_id"]),
)
connection = Connection.objects.get(id=connection_id)
# ToDo: delete params related to this & then update
for param in payload["params"]:
cp = ConnectionParam.objects.get(id=param["paramId"])
# if cp.isEncrypted:
# encryptionObject= AESCipher()
# param['paramValue'] = encryptionObject.encrypt(param['paramValue'])
ConnectionParamValue.objects.create(
connectionParam=cp, value=param["paramValue"], connection=connection
)
res.update(True, "Connection updated successfully")
return res
@staticmethod
def getConnectionTypes():
res = ApiResponse()
connectionTypes = ConnectionType.objects.all()
serializer = ConnectionTypeSerializer(connectionTypes, many=True)
res.update(True, "Successfully retrieved connection types", serializer.data)
return res
class NotebookTemplateService:
@staticmethod
def getNotebookTemplates():
res = ApiResponse()
templates = NotebookTemplate.objects.all()
serializer = NotebookTemplateSerializer(templates, many=True)
res.update(True, "Connections retrieved successfully", serializer.data)
return res
@staticmethod
def getDatasetDetails(datasetLocation, datasourceName):
"""
Service to fetch S3 dataset details
:param datasetLocation: Location of the S3 bucket
"""
res = ApiResponse()
schema = DruidIngestionSpecGenerator._getSchemaForDatasourceInS3(datasetLocation)
ingestionSpec = DruidIngestionSpecGenerator.getIngestionSpec(
datasetLocation=datasetLocation, datasourceName=datasourceName, datasetSchema=schema
)
s3DatasetSchema = list(map(lambda x: {"columnName": x.name, "dataType": x.logical_type.type}, schema))
datasetDetails = {
"dremioSchema": s3DatasetSchema,
"druidIngestionSpec": ingestionSpec
}
res.update(True, "Dataset schema retrieved successfully", datasetDetails)
return res
|
rberrelleza/cuelake
|
api/genie/migrations/0004_auto_20210414_1012.py
|
<reponame>rberrelleza/cuelake
# Generated by Django 3.1.7 on 2021-04-14 10:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('genie', '0003_auto_20210412_0719'),
]
operations = [
migrations.CreateModel(
name='Connection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='ConnectionParam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=200, null=True)),
('label', models.CharField(blank=True, max_length=200, null=True)),
('isEncrypted', models.BooleanField(default=False)),
('properties', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='ConnectionType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='ConnectionParamValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField()),
('connection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cpvc', to='genie.connection')),
('connectionParam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cpvcp', to='genie.connectionparam')),
],
),
migrations.AddField(
model_name='connectionparam',
name='connectionType',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connectionTypeParam', to='genie.connectiontype'),
),
migrations.AddField(
model_name='connection',
name='connectionType',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connectionTypeConnection', to='genie.connectiontype'),
),
]
|
rberrelleza/cuelake
|
api/workflows/migrations/0001_initial.py
|
<filename>api/workflows/migrations/0001_initial.py
# Generated by Django 3.1.8 on 2021-04-19 13:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_celery_beat', '0015_edit_solarschedule_events_choices'),
]
operations = [
migrations.CreateModel(
name='Workflow',
fields=[
('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_celery_beat.periodictask')),
('dependsOnWorkflowStatus', models.CharField(default='success', max_length=50)),
('dependsOnWorkflow', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflows.workflow')),
],
bases=('django_celery_beat.periodictask',),
),
migrations.CreateModel(
name='WorkflowRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(default='success', max_length=50)),
('startTimestamp', models.DateTimeField(auto_now_add=True)),
('endTimestamp', models.DateTimeField(default=None, null=True)),
('workflow', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflows.workflow')),
],
),
migrations.CreateModel(
name='NotebookJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notebookId', models.CharField(default='000000000', max_length=20)),
('workflow', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflows.workflow')),
],
),
]
|
rberrelleza/cuelake
|
api/genie/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path("notebooks/<int:offset>", views.NotebookView.as_view(), name="notebookView"),
path("notebooksLight", views.NotebooksLight.as_view(), name="notebookView"),
path("notebook/<str:notebookId>", views.NotebookOperationsView.as_view(), name="notebookView"),
path("notebook/actions/<str:notebookId>", views.NotebookActionsView.as_view(), name="notebookView"),
path("notebook", views.NotebookView.as_view(), name="notebookView"),
path("notebookjob/<str:notebookId>", views.NotebookJobView.as_view(), name="notebookJobView"),
path("notebookjob/", views.NotebookJobView.as_view(), name="notebookJobView"),
path("notebookTemplates/", views.NotebookTemplateView.as_view(), name="notebookTemplateView"),
path("schedules/", views.ScheduleView.as_view(), name="scheduleView"),
path("schedules/<int:scheduleId>", views.schedule, name="getSingleSchedule"),
path("timezones/", views.TimzoneView.as_view(), name="timezoneView"),
# =====================================================================================================
# Connections
# =====================================================================================================
path("connections", views.connections, name="connections"),
path("connection/<int:connection_id>", views.connection, name="connection"),
path("connectiontypes", views.connectionTypes, name="connectionTypes"),
# =====================================================================================================
# Connections
# =====================================================================================================
path("datasetDetails", views.datasetDetails, name="datasetDetails"),
]
|
rberrelleza/cuelake
|
api/workflows/models.py
|
<reponame>rberrelleza/cuelake<gh_stars>0
from django.db import models
from django_celery_beat.models import PeriodicTask
STATUS_SUCCESS = "success"
STATUS_FAILURE = "failure"
STATUS_ALWAYS = "always"
STATUS_RUNNING = "running"
STATUS_RECEIVED = "received" # just before triggering job
STATUS_ABORTED = "aborted"
class Workflow(PeriodicTask):
"""
Subclass of django_celery_beat.models.PeriodicTask
"""
triggerWorkflow = models.ForeignKey("self", null=True, blank=True, on_delete=models.CASCADE, db_index=True)
triggerWorkflowStatus = models.CharField(max_length=50, default=STATUS_SUCCESS)
class WorkflowRun(models.Model):
workflow = models.ForeignKey(Workflow, on_delete=models.CASCADE, db_index=True)
status = models.CharField(max_length=50, default=STATUS_SUCCESS)
startTimestamp = models.DateTimeField(auto_now_add=True)
endTimestamp = models.DateTimeField(null=True, default=None)
class NotebookJob(models.Model):
workflow = models.ForeignKey(Workflow, on_delete=models.CASCADE, db_index=True)
notebookId = models.CharField(max_length=20, default="000000000")
# dependsOn notebookId = models.CharField(max_length=20, default="000000000")
# dependsOn notebookStatus =
|
rberrelleza/cuelake
|
api/genie/migrations/0001_initial.py
|
# Generated by Django 3.1.7 on 2021-03-12 14:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_celery_beat', '0015_edit_solarschedule_events_choices'),
]
operations = [
migrations.CreateModel(
name='NotebookJob',
fields=[
('periodictask_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_celery_beat.periodictask')),
('notebookId', models.CharField(db_index=True, max_length=50)),
],
bases=('django_celery_beat.periodictask',),
),
]
|
rberrelleza/cuelake
|
api/genie/migrations/0008_runstatus_worflowrun.py
|
<gh_stars>0
# Generated by Django 3.1.8 on 2021-04-19 13:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflows', '0001_initial'),
('genie', '0007_auto_20210417_1807'),
]
operations = [
migrations.AddField(
model_name='runstatus',
name='worflowRun',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='workflows.workflowrun'),
),
]
|
rberrelleza/cuelake
|
api/genie/views.py
|
from django.http import HttpRequest
from rest_framework.views import APIView
from rest_framework.response import Response
from genie.services import NotebookJobServices, Connections, NotebookTemplateService
from rest_framework.decorators import api_view
class NotebookOperationsView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def post(self, request, notebookId):
res = NotebookJobServices.cloneNotebook(notebookId, request.data)
return Response(res.json())
def delete(self, request, notebookId):
res = NotebookJobServices.deleteNotebook(notebookId)
return Response(res.json())
class NotebookActionsView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def post(self, request, notebookId):
res = NotebookJobServices.runNotebookJob(notebookId)
return Response(res.json())
def delete(self, request, notebookId):
res = NotebookJobServices.stopNotebookJob(notebookId)
return Response(res.json())
def put(self, request, notebookId):
res = NotebookJobServices.clearNotebookResults(notebookId)
return Response(res.json())
class NotebooksLight(APIView):
"""
Get concise notebook data
"""
def get(self, request):
res = NotebookJobServices.getNotebooksLight()
return Response(res.json())
class NotebookView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def get(self, request, offset: int):
res = NotebookJobServices.getNotebooks(offset)
return Response(res.json())
def post(self, request):
res = NotebookJobServices.addNotebook(request.data)
return Response(res.json())
class NotebookJobView(APIView):
"""
Class to get, add and update a NotebookJob details
The put and post methods only require request body and not path parameters
The get method requires the notebookJobId as the path parameter
"""
def get(self, request, notebookId=None):
offset = int(request.GET.get("offset", 0))
res = NotebookJobServices.getNotebookJobDetails(notebookId=notebookId, runStatusOffset=offset)
return Response(res.json())
def post(self, request):
notebookId = request.data["notebookId"]
scheduleId = request.data["scheduleId"]
res = NotebookJobServices.addNotebookJob(notebookId=notebookId, scheduleId=scheduleId)
return Response(res.json())
def put(self, request):
notebookId = request.data["notebookId"]
if "scheduleId" in request.data:
scheduleId = request.data["scheduleId"]
res = NotebookJobServices.updateNotebookJob(notebookId=notebookId, scheduleId=scheduleId)
elif "enabled" in request.data:
enabled = request.data["enabled"]
res = NotebookJobServices.toggleNotebookJob(notebookId=notebookId, enabled=enabled)
return Response(res.json())
def delete(self, request, notebookId=None):
res = NotebookJobServices.deleteNotebookJob(notebookId=notebookId)
return Response(res.json())
class ScheduleView(APIView):
"""
Class to get and add available crontab schedules
"""
def get(self, request):
res = NotebookJobServices.getSchedules()
return Response(res.json())
def post(self, request):
name = request.data["name"]
cron = request.data["crontab"]
timezone = request.data["timezone"]
res = NotebookJobServices.addSchedule(cron=cron, timezone=timezone, name=name)
return Response(res.json())
def put(self,request):
id = request.data["id"]
name = request.data["name"]
cron = request.data["crontab"]
timezone = request.data["timezone"]
res = NotebookJobServices.updateSchedule(id=id, cron=cron, timezone=timezone, name=name)
return Response(res.json())
@api_view(["GET", "PUT", "DELETE"])
def schedule(request: HttpRequest, scheduleId: int) -> Response:
"""
Method for crud operations on a single connection
:param request: HttpRequest
:param connection_id: Connection Id
"""
if request.method == "GET":
res = NotebookJobServices.getSingleSchedule(scheduleId)
return Response(res.json())
if request.method == "DELETE":
res = NotebookJobServices.deleteSchedule(scheduleId)
return Response(res.json())
class TimzoneView(APIView):
"""
Class to get standard pytz timezones
"""
def get(self, request):
res = NotebookJobServices.getTimezones()
return Response(res.json())
# TODO
# Change connection views to class
@api_view(["GET", "POST"])
def connections(request: HttpRequest) -> Response:
"""
Method to get or add connection
:param request: HttpRequest
"""
if request.method == "GET":
res = Connections.getConnections()
return Response(res.json())
elif request.method == "POST":
res = Connections.addConnection(request.data)
return Response(res.json())
@api_view(["GET", "PUT", "DELETE"])
def connection(request: HttpRequest, connection_id: int) -> Response:
"""
Method for crud operations on a single connection
:param request: HttpRequest
:param connection_id: Connection Id
"""
if request.method == "GET":
res = Connections.getConnection(connection_id)
return Response(res.json())
elif request.method == "DELETE":
res = Connections.removeConnection(connection_id)
return Response(res.json())
elif request.method == "PUT":
res = Connections.updateConnection(connection_id, request.data)
return Response(res.json())
@api_view(["GET", "POST"])
def connectionTypes(request: HttpRequest) -> Response:
"""
Method to get all connection types
:param request: HttpRequest
"""
if request.method == "GET":
res = Connections.getConnectionTypes()
return Response(res.json())
@api_view(["POST"])
def datasetDetails(request: HttpRequest) -> Response:
"""
Method to get dataset details from s3 location
:param request: HttpRequest
"""
datasetLocation = request.data.get("datasetLocation")
datasourceName = request.data.get("datasourceName")
res = NotebookTemplateService.getDatasetDetails(datasetLocation, datasourceName)
return Response(res.json())
class NotebookTemplateView(APIView):
def get(self, request):
res = NotebookTemplateService.getNotebookTemplates()
return Response(res.json())
|
Sen/ledger
|
test/dummy/db/migrate/20191028102323_create_ookkee_entries.rb
|
class CreateOokkeeEntries < ActiveRecord::Migration[6.0]
def change
create_table :ookkee_entries do |t|
t.integer :sheet_id
t.string :sheet_name # assets, liability, etc
t.integer :account_id # cash, bank payable
t.string :entry_type # credit/debit
t.decimal :amount, scale: 10, precision: 30
t.string :trackable_type
t.integer :trackable_id
t.timestamps
end
add_index :ookkee_entries, :sheet_id
add_index :ookkee_entries, :entry_type
add_index :ookkee_entries, :sheet_name
add_index :ookkee_entries, :account_id
add_index :ookkee_entries, :trackable_type
add_index :ookkee_entries, :trackable_id
end
end
|
Sen/ledger
|
lib/ookkee/repos/entry_repo.rb
|
require 'bigdecimal'
module Ookkee
class EntryRepo
def build_from_factory(sheet, factory)
entry = sheet.entries.build
entry.account = factory.attributes[:account]
entry.trackable = factory.attributes[:trackable]
entry.sheet_name = factory.attributes[:sheet_name]
entry.entry_type = factory.attributes[:entry_type]
entry.amount = BigDecimal(factory.attributes[:amount] || 0)
entry
end
def sheets_with_user(user)
Ookkee::Sheet.where(user_id: user.id, user_type: user.class.name).order('created_at DESC')
end
def account_entries_with_user(account, user)
Ookkee::Entry.joins(:account, :sheet)
.where(sheet_name: account.sheet_name)
.where('ookkee_accounts.name = ?', account.name)
.where('ookkee_sheets.user_id = ? and ookkee_sheets.user_type = ?', user.id, user.class.name)
.order('ookkee_entries.created_at DESC')
.select("ookkee_entries.*,
SUM(
CASE WHEN ookkee_entries.entry_type = 'debit' then ookkee_entries.amount
WHEN ookkee_entries.entry_type = 'credit' then ookkee_entries.amount * -1
END
) OVER (ORDER BY ookkee_entries.id) AS balance_cache")
end
def calculate_account_balance(account, user)
entries = account_entries_with_user(account, user)
entries.where('entry_type = ?', 'debit').sum(:amount) - entries.where('entry_type = ?', 'credit').sum(:amount)
end
end
end
|
Sen/ledger
|
lib/ookkee/liability.rb
|
<filename>lib/ookkee/liability.rb
module Ookkee
class Liability < Sheet
end
end
|
Sen/ledger
|
lib/ookkee/revenue.rb
|
module Ookkee
class Revenue < Sheet
end
end
|
Sen/ledger
|
lib/tasks/ookkee_tasks.rake
|
# desc "Explaining what the task does"
namespace :ookkee do
task :install do
end
end
|
Sen/ledger
|
lib/ookkee/model/sheet.rb
|
<gh_stars>0
module Ookkee
class Sheet < ActiveRecord::Base
self.table_name = "ookkee_sheets"
has_many :entries, class_name: 'Ookkee::Entry', foreign_key: :sheet_id
belongs_to :user, polymorphic: true
validates :title, presence: true
validate :credit_and_debit_must_be_exists
validate :ledger_equalities
Entry::ENTRY_TYPES.each do |type|
define_method "#{type}_filter" do |*args|
args[0].select { |entry| entry.entry_type == type }
end
end
Entry::SHEET_NAMES.each do |name|
define_method "#{name}_filter" do |*args|
args[0].select { |entry| entry.sheet_name == name }
end
end
private
def credit_and_debit_must_be_exists
if credit_filter(entries).count == 0 || debit_filter(entries).count == 0
errors.add(:base, "credit and debut must be exists")
end
end
def ledger_equalities
assets = ledger_left(assets_filter(entries))
expenses = ledger_left(expenses_filter(entries))
liabilities = ledger_right(liabilities_filter(entries))
equity = ledger_right(equity_filter(entries))
revenue = ledger_right(revenue_filter(entries))
if assets + expenses != liabilities + equity + revenue
errors.add(:base, "credits amount must equal to debits amount")
end
end
def ledger_left(collection)
debits = debit_filter(collection).inject(0) { |sum, entry| sum + entry.amount }
credits = credit_filter(collection).inject(0) { |sum, entry| sum + entry.amount }
debits - credits
end
def ledger_right(collection)
-1 * ledger_left(collection)
end
end
end
|
Sen/ledger
|
lib/ookkee.rb
|
<gh_stars>0
require "ookkee/railtie"
require 'ookkee/model/entry'
require 'ookkee/model/sheet'
require 'ookkee/model/account'
require 'ookkee/builder'
require 'ookkee/builder_proxy'
require 'ookkee/factory'
# require 'ookkee/stack'
require 'ookkee/repos/entry_repo'
require 'ookkee/usecase'
module Ookkee
# Your code goes here...
end
|
Sen/ledger
|
lib/ookkee/usecase.rb
|
module Ookkee
class Usecase
class << self
def account_balance_with_user(account, user)
repo.calculate_account_balance(account, user)
end
def account_entries(account, user)
repo.account_entries_with_user(account, user)
end
def user_sheets(user)
repo.sheets_with_user(user)
end
private
def repo
@repo ||= EntryRepo.new
end
end
end
end
|
Sen/ledger
|
test/dummy/db/migrate/20191028101521_create_ookkee_sheets.rb
|
class CreateOokkeeSheets < ActiveRecord::Migration[6.0]
def change
create_table :ookkee_sheets do |t|
t.string :title
t.string :transaction_number
t.string :user_type
t.integer :user_id
t.timestamps
end
add_index :ookkee_sheets, :transaction_number
add_index :ookkee_sheets, :user_type
add_index :ookkee_sheets, :user_id
end
end
|
Sen/ledger
|
lib/ookkee/builder_proxy.rb
|
module Ookkee
class BuilderProxy
def initialize(options)
@builder = options[:builder]
end
def credit(account)
factory = Factory.new
yield factory
factory.entry_type 'credit'
factory.account account
factory.sheet_name account.sheet_name
@builder.registry << factory
end
def debit(account)
factory = Factory.new
yield factory
factory.entry_type 'debit'
factory.account account
factory.sheet_name account.sheet_name
@builder.registry << factory
end
end
end
|
Sen/ledger
|
lib/ookkee/equity.rb
|
module Ookkee
class Equity
end
end
|
Sen/ledger
|
lib/ookkee/model/entry.rb
|
module Ookkee
class Entry < ActiveRecord::Base
self.table_name = "ookkee_entries"
ENTRY_TYPES = %w[credit debit]
SHEET_NAMES = %w[assets liabilities equity revenue expenses]
belongs_to :sheet, class_name: 'Ookkee::Sheet', foreign_key: :sheet_id
belongs_to :account, class_name: 'Ookkee::Account', foreign_key: :account_id
belongs_to :trackable, polymorphic: true
validates :entry_type, presence: true, \
inclusion: {
in: ENTRY_TYPES,
message: "%{value} is not a valid entry type"
}
validates :sheet_name, presence: true, \
inclusion: {
in: SHEET_NAMES,
message: "%{value} is not a valid account name"
}
end
end
|
Sen/ledger
|
test/ookkee/builder_test.rb
|
<gh_stars>0
require 'test_helper'
class Ookkee::BuilderTest < ActiveSupport::TestCase
setup do
@account = Ookkee::Account.new
@account.name = 'cash'
@account.sheet_name = 'assets'
@account.save!
@user = User.create!(name: 'tester')
@order = Order.create!(title: 'test order')
end
test "klass" do
assert_equal Ookkee::Builder.new.class.name, 'Ookkee::Builder'
end
test 'build' do
balance_account = Ookkee::Account.new
balance_account.name = 'balance'
balance_account.sheet_name = 'assets'
balance_account.save!
bank_payable_account = Ookkee::Account.new
bank_payable_account.name = 'bank payable'
bank_payable_account.sheet_name = 'liabilities'
bank_payable_account.save!
sheet = Ookkee::Builder.build do |b|
b.title 'abcd'
b.transaction_number 'dfdfdf'
b.user @user
b.credit bank_payable_account do |cr|
cr.amount 200
cr.trackable @order
end
b.debit balance_account do |dr|
dr.amount 200
dr.trackable @order
end
end
sheet.save!
assert_equal sheet.title, 'abcd'
assert_equal sheet.transaction_number, 'dfdfdf'
assert_equal sheet.user_id, @user.id
assert_equal sheet.user_type, 'User'
credit = sheet.entries.where(entry_type: 'credit').first
assert_equal credit.amount, 200
assert_equal credit.trackable_id, @order.id
assert_equal credit.trackable_type, 'Order'
assert_equal credit.account.name, 'bank payable'
assert_equal credit.account.sheet_name, 'liabilities'
debit = sheet.entries.where(entry_type: 'debit').first
assert_equal debit.amount, 200
assert_equal debit.trackable_id, @order.id
assert_equal debit.trackable_type, 'Order'
assert_equal debit.account.name, 'balance'
assert_equal debit.account.sheet_name, 'assets'
end
end
|
Sen/ledger
|
lib/ookkee/model/account.rb
|
<filename>lib/ookkee/model/account.rb
module Ookkee
class Account < ActiveRecord::Base
self.table_name = "ookkee_accounts"
validates :name, presence: true
validates :sheet_name, presence: true, \
inclusion: {
in: Entry::SHEET_NAMES,
message: "%{value} is not a valid account name"
}
end
end
|
Sen/ledger
|
test/ookkee/sheet_test.rb
|
<gh_stars>0
require 'test_helper'
class Ookkee::SheetTest < ActiveSupport::TestCase
test "truth" do
assert_equal Ookkee::Sheet.new.class.name, 'Ookkee::Sheet'
end
end
|
Sen/ledger
|
lib/ookkee/expense.rb
|
module Ookkee
class Expense < Sheet
end
end
|
Sen/ledger
|
test/ookkee/account_test.rb
|
<filename>test/ookkee/account_test.rb
require 'test_helper'
class Ookkee::AccountTest < ActiveSupport::TestCase
test "truth" do
assert_equal Ookkee::Account.new.class.name, 'Ookkee::Account'
end
test "create" do
count = Ookkee::Account.count
sheet = Ookkee::Account.new
sheet.name = 'cash'
sheet.sheet_name = 'assets'
sheet.save!
assert_equal Ookkee::Account.count, count + 1
end
end
|
Sen/ledger
|
lib/ookkee/builder.rb
|
module Ookkee
class Builder
attr_accessor :registry
def initialize
@registry = []
@attributes = {}
end
def self.build
instance = self.new
yield instance
instance.build_activerecord_objects
end
def title(value)
@attributes[:title] = value
end
def transaction_number(value)
@attributes[:transaction_number] = value
end
def user(value)
@attributes[:user] = value
end
def credit(account, &block)
builder_proxy = BuilderProxy.new({builder: self})
builder_proxy.credit(account, &block)
end
def debit(account, &block)
builder_proxy = BuilderProxy.new({builder: self})
builder_proxy.debit(account, &block)
end
def build_activerecord_objects
sheet = Sheet.new(
title: @attributes[:title],
transaction_number: @attributes[:transaction_number],
user: @attributes[:user]
)
registry.each do |element|
entry_repo.build_from_factory(sheet, element)
end
sheet
end
private
def entry_repo
@entry_repo ||= EntryRepo.new
end
end
end
|
Sen/ledger
|
test/dummy/db/migrate/20191031040323_create_ookkee_accounts.rb
|
class CreateOokkeeAccounts < ActiveRecord::Migration[6.0]
def change
create_table :ookkee_accounts do |t|
t.string :name
t.string :sheet_name # assets, liability, etc
t.timestamps
end
add_index :ookkee_accounts, :sheet_name
add_index :ookkee_accounts, [:name, :sheet_name], unique: true
end
end
|
Sen/ledger
|
test/dummy/db/schema.rb
|
<reponame>Sen/ledger
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# This file is the source Rails uses to define your schema when running `rails
# db:schema:load`. When creating a new database, `rails db:schema:load` tends to
# be faster and is potentially less error prone than running all of your
# migrations from scratch. Old migrations may fail to apply correctly if those
# migrations use external dependencies or application code.
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2019_11_11_041450) do
create_table "ookkee_accounts", force: :cascade do |t|
t.string "name"
t.string "sheet_name"
t.index ["name", "sheet_name"], name: "index_ookkee_accounts_on_name_and_sheet_name", unique: true
t.index ["sheet_name"], name: "index_ookkee_accounts_on_sheet_name"
end
create_table "ookkee_entries", force: :cascade do |t|
t.integer "sheet_id"
t.string "sheet_name"
t.integer "account_id"
t.string "entry_type"
t.decimal "amount", precision: 30, scale: 10
t.string "trackable_type"
t.integer "trackable_id"
t.index ["account_id"], name: "index_ookkee_entries_on_account_id"
t.index ["entry_type"], name: "index_ookkee_entries_on_entry_type"
t.index ["sheet_id"], name: "index_ookkee_entries_on_sheet_id"
t.index ["sheet_name"], name: "index_ookkee_entries_on_sheet_name"
t.index ["trackable_id"], name: "index_ookkee_entries_on_trackable_id"
t.index ["trackable_type"], name: "index_ookkee_entries_on_trackable_type"
end
create_table "ookkee_sheets", force: :cascade do |t|
t.string "title"
t.string "transaction_number"
t.string "user_type"
t.integer "user_id"
t.index ["transaction_number"], name: "index_ookkee_sheets_on_transaction_number"
t.index ["user_id"], name: "index_ookkee_sheets_on_user_id"
t.index ["user_type"], name: "index_ookkee_sheets_on_user_type"
end
create_table "orders", force: :cascade do |t|
t.string "title"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
end
create_table "users", force: :cascade do |t|
t.string "name"
t.datetime "created_at", precision: 6, null: false
t.datetime "updated_at", precision: 6, null: false
end
end
|
Sen/ledger
|
lib/ookkee/asset.rb
|
<filename>lib/ookkee/asset.rb
module Ookkee
class Asset
end
end
|
appcues/data-uploader
|
lib/appcues_data_uploader.rb
|
#!/usr/bin/env ruby
#
# Appcues Data Uploader
#
# Uploads CSV-formatted user profile data to the Appcues API.
# Run `appcues-data-uploader -h` for more information.
#
# Homepage: https://github.com/appcues/data-uploader
#
# Copyright 2018, Appcues, Inc.
#
# Released under the MIT License, whose text is available at:
# https://opensource.org/licenses/MIT
require 'net/http'
require 'csv'
require 'json'
require 'optparse'
require 'appcues_data_uploader/version'
class AppcuesDataUploader
UserActivity = Struct.new(:account_id, :user_id, :profile_update, :events)
UploadOpts = Struct.new(:account_id, :csv_filenames, :quiet, :dry_run)
attr_reader :opts
class << self
## Handles command-line invocation.
def main(argv)
options = UploadOpts.new
option_parser = OptionParser.new do |opts|
opts.banner = <<-EOT
Usage: appcues-data-uploader [options] -a account_id [filename ...]
Uploads profile data from one or more CSVs to the Appcues API.
If no filename or a filename of '-' is given, STDIN is used.
Each CSV should start with a row of header names, including one named something
like "user ID". Other headers will be used verbatim as attribute names.
Attribute values can be boolean ('true' or 'false'), 'null', numeric, or
string-typed.
For example, giving `appcues-data-uploader -a 999` the following CSV data:
user_id,first_name,has_posse,height_in_inches
123,Pete,false,68.5
456,André,true,88
Will result in two profile updates being sent to the API:
{"account_id": "999", "user_id": "123", "profile_update": {"first_name": "Pete", "has_posse": false, "height_in_inches": 68.5}}
{"account_id": "999", "user_id": "456", "profile_update": {"first_name": "André", "has_posse": true, "height_in_inches": 88}}
See https://github.com/appcues/data-uploader for more information.
EOT
opts.separator ""
opts.separator "Options:"
opts.on('-a', '--account-id ACCOUNT_ID', 'Set Appcues account ID') do |account_id|
options.account_id = account_id
end
opts.on('-d', '--dry-run', 'Write requests to STDOUT instead of sending') do
options.dry_run = true
end
opts.on('-q', '--quiet', "Don't write debugging info to STDERR") do
options.quiet = true
end
opts.on('-v', '--version', "Print version information and exit") do
puts "appcues-data-uploader version #{VERSION} (#{VERSION_DATE})"
puts "See https://github.com/appcues/data-uploader for more information."
exit
end
opts.on('-h', '--help', 'Print this message and exit') do
puts opts
exit
end
end
csv_filenames = option_parser.parse(argv)
csv_filenames = ["-"] if csv_filenames == []
options.csv_filenames = csv_filenames
if !options.account_id
STDERR.puts "You must specify an account ID with the -a option."
STDERR.puts "Run `appcues-data-uploader --help` for more information."
exit 1
end
begin
new(options).perform_uploads()
rescue Exception => e
STDERR.puts "#{e.class}: #{e.message}"
exit 255
end
end
end
def initialize(init_opts)
@opts = init_opts.is_a?(UploadOpts) ? init_opts : UploadOpts.new(
init_opts[:account_id] || init_opts["account_id"],
init_opts[:csv_filenames] || init_opts["csv_filenames"],
init_opts[:quiet] || init_opts["quiet"],
init_opts[:dry_run] || init_opts["dry_run"],
)
if !opts.account_id
raise ArgumentError, "account_id is required but missing"
end
if !opts.csv_filenames
raise ArgumentError, "csv_filenames must be a list of filenames"
end
end
def perform_uploads
opts.csv_filenames.each do |filename|
upload_profile_csv(filename)
end
end
private
## Uploads the profile data in the given CSV to the Appcues API.
##
## The CSV should begin with a row of headers, and one of these headers
## must be named something like `user_id` or `userId`.
## Other header names are treated as attribute names.
##
## Numeric, boolean, and null values in this CSV will be converted to their
## appropriate data type.
def upload_profile_csv(csv_filename)
display_filename = csv_filename == '-' ? "STDIN" : "'#{csv_filename}'"
input_fh = csv_filename == '-' ? STDIN : File.open(csv_filename, 'r')
debug "Uploading profiles from #{display_filename} for account #{opts.account_id}..."
user_id_column = nil
user_activities = []
CSV.new(input_fh, headers: true).each do |row|
row_hash = row.to_hash
if !user_id_column
user_id_column = get_user_id_column(row_hash)
end
user_id = row_hash.delete(user_id_column)
profile_update = cast_data_types(row_hash)
user_activities << UserActivity.new(opts.account_id, user_id, profile_update, [])
end
input_fh.close
if opts.dry_run
user_activities.each do |ua|
puts JSON.dump(ua.to_h)
end
else
make_activity_requests(user_activities)
end
debug "Done processing #{display_filename}."
end
## Applies the given UserActivity updates to the Appcues API.
## Retries failed requests, indefinitely.
def make_activity_requests(user_activities)
failed_uas = []
user_activities.each do |ua|
resp = make_activity_request(ua)
if resp.code.to_i / 100 == 2
debug "Request for user_id #{ua.user_id} was successful"
else
debug "Request for user_id #{ua.user_id} failed with code #{resp.code} -- retrying later"
failed_uas << ua
end
end
if failed_uas.count > 0
debug "Retrying #{failed_uas.count} requests."
make_activity_requests(failed_uas)
end
end
## Returns a new profile_update hash where boolean and numeric values
## are cast out of String format. Leaves other values alone.
def cast_data_types(profile_update)
output = {}
profile_update.each do |key, value|
output[key] =
case value
when 'null'
nil
when 'true'
true
when 'false'
false
when /^ -? \d* \. \d+ (?: [eE] [+-]? \d+)? $/x # float
value.to_f
when /^ -? \d+ $/x # integer
value.to_i
else
value
end
end
output
end
## Detects and returns the name used in the CSV header to identify user ID.
## Raises an exception if we can't find it.
def get_user_id_column(row_hash)
row_hash.keys.each do |key|
canonical_key = key.gsub(/[^a-zA-Z]/, '').downcase
return key if canonical_key == 'userid'
end
raise "Couldn't detect user ID column from CSV input. Ensure that the CSV data starts with headers, and one is named like 'user_id'."
end
## Prints a message to STDERR unless we're in quiet mode.
def debug(msg)
STDERR.puts(msg) unless self.opts.quiet
end
## Returns the base URL for the Appcues API.
def appcues_api_url
ENV['APPCUES_API_URL'] || "https://api.appcues.com"
end
## Returns a URL for the given Appcues API UserActivity endpoint.
def activity_url(account_id, user_id)
"#{appcues_api_url}/v1/accounts/#{account_id}/users/#{user_id}/activity"
end
## Makes a POST request to the Appcues API UserActivity endpoint,
## returning the Net::HTTPResponse object.
def make_activity_request(user_activity)
url = activity_url(user_activity.account_id, user_activity.user_id)
post_request(url, {
"profile_update" => user_activity.profile_update,
"events" => user_activity.events
})
end
## Makes a POST request to the given URL,
## returning the Net::HTTPResponse object.
def post_request(url, data, headers = {})
uri = URI(url)
use_ssl = uri.scheme == 'https'
Net::HTTP.start(uri.host, uri.port, use_ssl: use_ssl) do |http|
req_headers = headers.merge({'Content-type' => 'application/json'})
req = Net::HTTP::Post.new(uri.request_uri, req_headers)
req.body = JSON.dump(data)
http.request(req)
end
end
end
|
appcues/data-uploader
|
appcues_data_uploader.gemspec
|
require "./lib/appcues_data_uploader/version"
Gem::Specification.new do |s|
s.name = 'appcues_data_uploader'
s.version = AppcuesDataUploader::VERSION
s.date = AppcuesDataUploader::VERSION_DATE
s.summary = 'Upload CSVs of user profile data to the Appcues API'
s.homepage = 'https://github.com/appcues/data-uploader'
s.authors = ['<NAME>']
s.email = '<EMAIL>'
s.license = 'MIT'
s.has_rdoc = false
s.files = Dir['lib/**/*'] + Dir['bin/**/*']
s.require_path = 'lib'
s.bindir = 'bin'
s.executables << 'appcues-data-uploader'
s.required_ruby_version = '>= 2.0'
end
|
appcues/data-uploader
|
lib/appcues_data_uploader/version.rb
|
class AppcuesDataUploader
VERSION = "0.2.0"
VERSION_DATE = "2018-11-08"
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.