repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
timoschick/form-context-model | fcm/test/test_context_builder.py | import unittest
import numpy as np
import numpy.testing as npt
from context_builder import ContextBuilder, _add_distances
import my_log
logger = my_log.get_logger('root')
class TestContextBuilder(unittest.TestCase):
def test_context_builder_empty_context(self):
embeddings = {
'this': np.array([1, 2, 3]),
'is': np.array([4, 5, 6]),
'word': np.array([7, 8, 9])
}
context_builder = ContextBuilder(embeddings, vector_size=3, max_distance=2)
context_features = context_builder.get_context_features('word2', ['context for word2 with only unk words'])
batched_context_features = context_builder.batchify([context_features])
logger.info(batched_context_features.words_per_context.shape)
logger.info(batched_context_features.distances.shape)
logger.info(batched_context_features.vectors.shape)
logger.info(batched_context_features.number_of_contexts.shape)
def test_context_builder(self):
embeddings = {
'this': np.array([1, 2, 3]),
'is': np.array([4, 5, 6]),
'word': np.array([7, 8, 9])
}
e_this = [1, 2, 3]
e_is = [4, 5, 6]
e_word = [7, 8, 9]
e_null = [0, 0, 0]
context_builder = ContextBuilder(embeddings, vector_size=3, max_distance=2)
context_features = context_builder.get_context_features('word',
['this is the word .', 'the word is this is is is this'])
self.assertEquals(context_features.contexts, [['this', 'is'], ['is', 'this', 'is', 'is', 'is', 'this']])
self.assertEquals(context_features.distances, [[-2, -2], [1, 2, 2, 2, 2, 2]])
context_features = context_builder.get_context_features('word',
['word this is word the word word word is word'])
self.assertEquals(context_features.contexts, [['this', 'is', 'is']])
context_builder = ContextBuilder(embeddings, vector_size=3, max_distance=5)
context_a_features = context_builder.get_context_features('word',
['this is the word',
'the word is this is is is this'])
self.assertEquals(context_a_features.contexts, [['this', 'is'], ['is', 'this', 'is', 'is', 'is', 'this']])
self.assertEquals(context_a_features.distances, [[-3, -2], [1, 2, 3, 4, 5, 5]])
context_b_features = context_builder.get_context_features('this', ['this is', 'is this', 'this word'])
batch_features = context_builder.batchify([context_a_features, context_b_features])
npt.assert_equal(batch_features.number_of_contexts, np.array([2, 3]))
npt.assert_equal(batch_features.words_per_context, np.array([[2, 6, 0], [1, 1, 1]]))
npt.assert_equal(batch_features.distances, np.array(
[
[[-3, -2, 0, 0, 0, 0], [1, 2, 3, 4, 5, 5], [0, 0, 0, 0, 0, 0]],
[[1, 0, 0, 0, 0, 0], [-1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]]
]
))
npt.assert_equal(batch_features.vectors, np.array(
[
[[e_this, e_is, e_null, e_null, e_null, e_null],
[e_is, e_this, e_is, e_is, e_is, e_this],
[e_null, e_null, e_null, e_null, e_null, e_null]],
[[e_is, e_null, e_null, e_null, e_null, e_null],
[e_is, e_null, e_null, e_null, e_null, e_null],
[e_word, e_null, e_null, e_null, e_null, e_null]]
]
))
def test_add_distances(self):
self.assertEquals(_add_distances(['a', 'b', 'c', 'd'], [1], 10),
[(-1, 'a'), (0, 'b'), (1, 'c'), (2, 'd')])
self.assertEquals(_add_distances(['a', 'b', 'b', 'c', 'c', 'c', 'd'], [1, 5], 10),
[(-1, 'a'), (0, 'b'), (1, 'b'), (2, 'c'), (-1, 'c'), (0, 'c'), (1, 'd')])
self.assertEquals(_add_distances(['a', 'b', 'b', 'c', 'c', 'c', 'd'], [1], 10),
[(-1, 'a'), (0, 'b'), (1, 'b'), (2, 'c'), (3, 'c'), (4, 'c'), (5, 'd')])
self.assertEquals(_add_distances(['a', 'b', 'b', 'c', 'c', 'c', 'd'], [1], 3),
[(-1, 'a'), (0, 'b'), (1, 'b'), (2, 'c'), (3, 'c'), (3, 'c'), (3, 'd')])
self.assertEquals(_add_distances(['a', 'b', 'b', 'c', 'c', 'c', 'd'], [6], 3),
[(-3, 'a'), (-3, 'b'), (-3, 'b'), (-3, 'c'), (-2, 'c'), (-1, 'c'), (0, 'd')])
|
timoschick/form-context-model | fcm/train.py | <filename>fcm/train.py
import argparse
import io
import os
import datetime
import socket
import form_context_model as fcm
import my_log
from batch_builder import InputProcessor
logger = my_log.get_logger('root')
def main():
parser = argparse.ArgumentParser()
# required parameters
parser.add_argument('--model', '-m', type=str, required=True,
help="The name of the model")
parser.add_argument('--train_dir', type=str, required=True,
help="The directory in which the buckets for training are stored")
parser.add_argument('--vocab', type=str, required=True,
help="The file in which the vocabulary to be used for training is stored."
"Each line should be of the form <WORD> <COUNT>")
parser.add_argument('--emb_file', type=str, required=True,
help="The file in which the embeddings for mimicking and context embeddings"
"are stored")
parser.add_argument('--emb_dim', type=int, required=True,
help="The number of dimensions for the given embeddings")
# general model parameters
parser.add_argument('--batch_size', '-bs', type=int, default=64)
parser.add_argument('--learning_rate', '-lr', type=float, default=0.01)
parser.add_argument('--num_epochs', '-e', type=int, default=10)
parser.add_argument('--combinator', choices=fcm.COMBINATORS, default=fcm.GATED)
parser.add_argument('--sent_weights', choices=fcm.CONTEXT_WEIGHTS, default=fcm.CLUSTERING)
parser.add_argument('--min_word_count', '-mwc', type=int, default=100)
parser.add_argument('--remove_punctuation', action='store_true')
# context model parameters
parser.add_argument('--smin', type=int, default=20)
parser.add_argument('--smax', type=int, default=20)
parser.add_argument('--distance_embedding', '-de', action='store_true')
# word model parameters
parser.add_argument('--nmin', type=int, default=3)
parser.add_argument('--nmax', type=int, default=5)
parser.add_argument('--dropout', type=float, default=0)
# training parameters
parser.add_argument('--num_buckets', type=int, default=25)
parser.add_argument('--emb_format', type=str, choices=['text', 'gensim'], default='text')
parser.add_argument('--log_file', type=str, default=None)
args = parser.parse_args()
train_files = [args.train_dir + 'train.bucket' + str(i) + '.txt' for i in range(args.num_buckets)]
if args.combinator == fcm.FORM_ONLY:
args.sample_context_words = 1
elif args.combinator == fcm.CONTEXT_ONLY:
args.nmin = 1
args.nmax = 1
if args.log_file is not None:
log_file = io.open(args.log_file, 'a', 1)
else:
model_basename = os.path.basename(args.model)
log_file = io.open('log/' + model_basename + '.log', 'a', 1)
now = datetime.datetime.now()
train_server = str(os.environ.get('STY', 'main')) + '@' + str(socket.gethostname())
log_file.write('-- Training on ' + train_server + ' at ' + now.isoformat() + ' with args = ' + str(args) + ' --\n')
batch_builder = InputProcessor(
word_embeddings_file=args.emb_file,
word_embeddings_format=args.emb_format,
train_files=train_files,
vocab_file=args.vocab,
vector_size=args.emb_dim,
nmin=args.nmin,
nmax=args.nmax,
ngram_dropout=args.dropout,
smin=args.smin,
smax=args.smax,
min_word_count=args.min_word_count,
keep_punctuation=not args.remove_punctuation
)
model = fcm.FormContextModel(
batch_builder=batch_builder,
emb_dim=args.emb_dim,
combinator=args.combinator,
sent_weights=args.sent_weights,
distance_embedding=args.distance_embedding,
learning_rate=args.learning_rate
)
model.train(
num_epochs=args.num_epochs,
batch_size=args.batch_size,
out_path=args.model
)
log_file.write('\n')
log_file.close()
logger.info('Training is complete')
if __name__ == '__main__':
main()
|
timoschick/form-context-model | fcm/batch_builder.py | from abc import ABC, abstractmethod
from typing import Dict, List
import numpy as np
import random
import re
import jsonpickle
import my_log
import utils
from context_builder import ContextBuilder, BatchedContextFeatures
from context_builder import ContextFeatures
from ngram_builder import NGramBuilder, BatchedNGramFeatures
from ngram_builder import NGramFeatures
logger = my_log.get_logger('root')
class ProcessedInput:
def __init__(self, ngram_features: NGramFeatures, context_features: ContextFeatures, target: np.ndarray):
self.ngram_features = ngram_features
self.context_features = context_features
self.target = target
class EndOfDatasetException(Exception):
pass
class AbstractInputProcessor(ABC):
@abstractmethod
def reset(self) -> None:
pass
@abstractmethod
def generate_batch_from_buffer(self, batch_size: int) -> List[ProcessedInput]:
pass
@abstractmethod
def generate_batch_from_input(self, word: str, contexts: List[str]) -> List[ProcessedInput]:
pass
class InputProcessor(AbstractInputProcessor):
def __init__(self, word_embeddings_file: str, word_embeddings_format: str, train_files: List[str], vocab_file: str,
vector_size: int, ngram_threshold: int = 4, nmin: int = 3, nmax: int = 5, ngram_dropout: float = 0,
max_distance: int = 10, min_word_count: int = 100, max_copies: int = 5, smin: int = 20,
smax: int = 20, keep_punctuation=False):
random.seed(1234)
self.word_embeddings_file = word_embeddings_file
self.word_embeddings_format = word_embeddings_format
self.train_files = train_files
self.vocab_file = vocab_file
self.vector_size = vector_size
self.ngram_threshold = ngram_threshold
self.nmin = nmin
self.nmax = nmax
self.ngram_dropout = ngram_dropout
self.max_distance = max_distance
self.min_word_count = min_word_count
self.max_copies = max_copies
self.smin = smin
self.smax = smax
self.keep_punctuation = keep_punctuation
self.word_counts = None # type: Dict[str,int]
self.word_embeddings = None # type: Dict[str, np.ndarray]
self.ngram_builder = None # type: NGramBuilder
self.context_builder = None # type: ContextBuilder
self._setup()
self.buffer = None # type: List[ProcessedInput]
self.train_file_idx = 0
self.reset()
def _setup(self):
self.word_counts = {}
with open(self.vocab_file, 'r', encoding='utf8') as file:
for line in file:
word, count = line.split()
self.word_counts[word] = int(count)
self.word_embeddings = utils.load_embeddings(self.word_embeddings_file, self.word_embeddings_format, self.keep_punctuation)
self.ngram_builder = NGramBuilder(self.vocab_file, ngram_threshold=self.ngram_threshold,
nmin=self.nmin, nmax=self.nmax)
self.context_builder = ContextBuilder(self.word_embeddings, vector_size=self.vector_size,
max_distance=self.max_distance)
def reset(self) -> None:
random.shuffle(self.train_files)
self.buffer = []
self.train_file_idx = 0
def generate_batch_from_buffer(self, batch_size: int) -> List[ProcessedInput]:
if len(self.buffer) < batch_size:
while self._fill_buffer():
continue
if len(self.buffer) < batch_size:
raise EndOfDatasetException()
batch = self.buffer[:batch_size]
batched_ngram_features = self.ngram_builder.batchify([x.ngram_features for x in batch])
batched_context_features = self.context_builder.batchify([x.context_features for x in batch])
batched_targets = [x.target for x in batch]
del (self.buffer[:batch_size])
return _convert_to_tf_input(batched_ngram_features, batched_context_features), batched_targets
def generate_batch_from_input(self, word: str, contexts: List[str]) -> List[ProcessedInput]:
ngram_features = self.ngram_builder.get_ngram_features(word)
context_features = self.context_builder.get_context_features(word, contexts)
batched_ngram_features = self.ngram_builder.batchify([ngram_features])
batched_context_features = self.context_builder.batchify([context_features])
target = np.zeros([1, self.vector_size])
return _convert_to_tf_input(batched_ngram_features, batched_context_features), target
def _get_occurrences(self, word):
if word not in self.word_counts or word not in self.word_embeddings:
return 0
word_count = self.word_counts[word]
return int(max(1, min(word_count / self.min_word_count, self.max_copies)))
def _fill_buffer(self) -> bool:
# select the next train file
if self.train_file_idx == len(self.train_files):
logger.info('Reached the end of the dataset')
return False
logger.info('Processing training file {} of {}'.format(self.train_file_idx + 1, len(self.train_files)))
train_file = self.train_files[self.train_file_idx]
self.train_file_idx += 1
self._fill_buffer_from_file(train_file)
random.shuffle(self.buffer)
logger.info('Done processing training file, batch size is {}'.format(len(self.buffer)))
def _fill_buffer_from_file(self, file):
with open(file, 'r', encoding='utf8') as f:
for line in f:
self._fill_buffer_from_line(line)
def _fill_buffer_from_line(self, line):
comps = re.split(r'\t', line)
word = comps[0]
all_contexts = comps[1:]
random.shuffle(all_contexts)
if len(comps) == 2 and comps[1] == '\n':
return
occurrences = self._get_occurrences(word)
for _ in range(occurrences):
number_of_contexts = random.randint(self.smin, self.smax)
contexts = all_contexts[:number_of_contexts]
self._fill_buffer_from_contexts(word, contexts)
del (all_contexts[:number_of_contexts])
def _fill_buffer_from_contexts(self, word: str, contexts: List[str]):
ngram_features = self.ngram_builder.get_ngram_features(word, dropout_probability=self.ngram_dropout)
context_features = self.context_builder.get_context_features(word, contexts)
target = self.word_embeddings[word]
if not contexts:
return
self.buffer.append(ProcessedInput(ngram_features, context_features, target))
def __getstate__(self):
odict = self.__dict__.copy()
del odict['word_counts']
del odict['word_embeddings']
del odict['ngram_builder']
del odict['context_builder']
del odict['buffer']
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
self._setup()
def save(self, path: str) -> None:
with open(path, 'w', encoding='utf8') as f:
f.write(jsonpickle.encode(self))
@classmethod
def load(cls, path: str) -> 'InputProcessor':
with open(path, 'r', encoding='utf8') as f:
batch_builder = jsonpickle.decode(f.read())
batch_builder._setup()
return batch_builder
def _convert_to_tf_input(ngram_features: BatchedNGramFeatures, context_features: BatchedContextFeatures) \
-> Dict[str, np.ndarray]:
return {'ngrams': ngram_features.ngram_ids,
'ngram_lengths': ngram_features.ngrams_length,
'context_vectors': context_features.vectors,
'context_lengths': context_features.number_of_contexts,
'words_per_context': context_features.words_per_context,
'distances': context_features.distances
}
|
timoschick/form-context-model | fcm/test/test_form_context_model.py | <reponame>timoschick/form-context-model
import os
import re
import unittest
import numpy.testing as npt
import my_log
from batch_builder import InputProcessor
from form_context_model import FormContextModel
logger = my_log.get_logger('root')
class TestFormContextModel(unittest.TestCase):
def setUp(self):
vocab_file = os.path.join(os.path.dirname(__file__), 'data', 'vocab_with_count.txt')
embeddings_file = os.path.join(os.path.dirname(__file__), 'data', 'embeddings.txt')
config = {
'learning_rate': 0.01,
'emb_dim': 3,
'combinator': 'gated',
'sent_weights': 'clustering'
}
batch_builder = InputProcessor(train_files=[], vector_size=3, vocab_file=vocab_file,
word_embeddings_file=embeddings_file, word_embeddings_format='text')
self.form_context_model = FormContextModel(batch_builder=batch_builder, **config)
def test_save_and_load(self):
vec1 = self.form_context_model.infer_vector('word', ['lord word the sword ...'])
self.form_context_model.save(os.path.join(os.path.dirname('__file'), 'data', 'out', 'fcm.model'))
form_context_model_loaded = FormContextModel.load(
os.path.join(os.path.dirname('__file'), 'data', 'out', 'fcm.model'))
vec2 = form_context_model_loaded.infer_vector('word', ['lord word the sword'])
npt.assert_array_almost_equal(vec1, vec2)
def test_infer_vectors(self):
with open(os.path.join(os.path.dirname(__file__), 'data', 'word_contexts.txt'), 'r',
encoding='utf-8') as input_file:
for line in input_file.read().splitlines():
comps = re.split(r'\t', line)
word = comps[0]
context = comps[1:]
context = [c for c in context if c != '']
logger.info('Inferring embedding for "{}" with contexts {}'.format(word, context))
vec = self.form_context_model.infer_vector(word, context)
|
timoschick/form-context-model | fcm/ngram_builder.py | import random
import itertools
import numpy as np
from collections import Counter
from typing import List
import my_log
logger = my_log.get_logger('root')
START_SYMBOL = '<S>'
UNK_TOKEN = 'UNK'
PAD_TOKEN = 'PAD'
UNK_ID = 0
PAD_ID = 1
class NGramFeatures:
def __init__(self, ngrams: List[str], ngram_ids: List[int]):
self.ngrams = ngrams
self.ngram_ids = ngram_ids
self.ngrams_length = len(ngram_ids)
def __repr__(self):
return '{}, {}, {}'.format(self.ngrams, self.ngram_ids, self.ngrams_length)
class BatchedNGramFeatures:
def __init__(self, ngram_ids: np.ndarray, ngrams_length: np.ndarray):
self.ngram_ids = ngram_ids
self.ngrams_length = ngrams_length
def __repr__(self):
return '{}, {}'.format(self.ngram_ids, self.ngrams_length)
class NGramBuilder:
def __init__(self, vocab_file: str, ngram_threshold: int = 4, nmin: int = 3, nmax: int = 5):
self.nmin = nmin
self.nmax = nmax
self.ngram2id = {UNK_TOKEN: UNK_ID, PAD_TOKEN: PAD_ID}
self.id2ngram = [UNK_TOKEN, PAD_TOKEN]
ngram_counts = Counter()
with open(vocab_file, 'r', encoding='utf8') as file:
for line in file:
word = line.split()[0]
ngram_counts.update(self.to_n_gram(word, self.nmin, self.nmax))
for (ngram, count) in ngram_counts.most_common():
if count >= ngram_threshold:
id = len(self.id2ngram)
self.ngram2id[ngram] = id
self.id2ngram.append(ngram)
logger.info('Found a total of {} ngrams with a minimum count of {} and (nmin,nmax)=({},{})'.format(
len(self.id2ngram), ngram_threshold, nmin, nmax))
def get_ngram_features(self, word: str, dropout_probability: float = 0) -> NGramFeatures:
ngrams = self.to_n_gram(word, self.nmin, self.nmax, dropout_probability)
ngram_ids = [self.ngram2id[ngram] if ngram in self.ngram2id else UNK_ID for ngram in ngrams]
return NGramFeatures(ngrams, ngram_ids)
def batchify(self, features: List[NGramFeatures]) -> BatchedNGramFeatures:
ngram_ids = np.array(
list(itertools.zip_longest(*[x.ngram_ids for x in features], fillvalue=PAD_ID)),
dtype=np.int32).T
ngrams_length = np.array([x.ngrams_length for x in features], dtype=np.int32)
return BatchedNGramFeatures(ngram_ids, ngrams_length)
@staticmethod
def to_n_gram(word: str, nmin: int, nmax: int, dropout_probability: float = 0) -> List[str]:
"""
Turns a word into a list of n-grams.
:param word: the word
:param nmin: the minimum number of characters per n-gram
:param nmax: the maximum number of characters per n-gram
:param dropout_probability: the probability of randomly removing an n-gram
:return: the list of n-grams
"""
ngrams = []
letters = [START_SYMBOL] + list(word) + [START_SYMBOL]
for i in range(len(letters)):
for j in range(i + nmin, min(len(letters) + 1, i + nmax + 1)):
ngram = ''.join(letters[i:j])
ngrams.append(ngram)
if dropout_probability > 0:
ngrams = [ngram for ngram in ngrams if random.random() < (1 - dropout_probability)]
if not ngrams:
ngrams = [UNK_TOKEN]
return ngrams
def get_number_of_ngrams(self) -> int:
return len(self.id2ngram)
|
timoschick/form-context-model | fcm/utils.py | import io
import random
import re, string
import numpy as np
from gensim.models import Word2Vec
import my_log
logger = my_log.get_logger('root')
PUNCTUATION_PATTERN = re.compile("^[{}]+$".format(re.escape(string.punctuation)))
START_SYMBOL = '<S>'
random.seed(1234)
def to_n_gram(word, ngram_range, dropout_probability=0):
"""
Turns a word into a list of ngrams.
:param word: the word
:param ngram_range: a list with two elements: the minimum and maximum number of characters per ngram
:return: the corresponding list of ngrams
"""
assert len(ngram_range) == 2
n_min = ngram_range[0]
n_max = ngram_range[1]
ngrams = []
letters = [START_SYMBOL] + list(word) + [START_SYMBOL]
for i in range(len(letters)):
for j in range(i + n_min, min(len(letters) + 1, i + n_max + 1)):
ngram = ''.join(letters[i:j])
ngrams.append(ngram)
if dropout_probability > 0:
ngrams = [ngram for ngram in ngrams if random.random() < (1 - dropout_probability)]
if not ngrams:
ngrams = ['UNK']
return ngrams
def load_embeddings(file_name: str, format: str, keep_punctuation: bool = False):
if format == 'text':
return get_w2v_model_file(filename=file_name, keep_punctuation=keep_punctuation)
else:
return get_w2v_model_gensim(filename=file_name, keep_punctuation=keep_punctuation)
def get_w2v_model_gensim(filename, keep_punctuation: bool = False):
if not keep_punctuation:
raise NotImplementedError('get_w2v_model_gensim is only implemented with keep_punctuation = True')
logger.info('Loading embeddings from %s', filename)
w2v_model = Word2Vec.load(filename)
word2vec = w2v_model.wv
del w2v_model
logger.info('Done loading embeddings')
return word2vec
def get_w2v_model_file(filename, keep_punctuation: bool = False):
logger.info('Loading embeddings from %s', filename)
w2v = DummyDict()
w2v.update({w: v for w, v in _load_vectors(filename, skip=True, keep_punctuation=keep_punctuation)})
w2v.vocab = w2v.keys()
logger.info('Done loading embeddings')
return w2v
def _load_vectors(path, skip=False, keep_punctuation: bool = False):
with io.open(path, 'r', encoding='utf-8') as f:
for line in f:
if skip:
skip = False
else:
index = line.index(' ')
word = line[:index]
if not keep_punctuation and _is_punctuation(word):
continue
yield word, np.array([np.float(entry) for entry in line[index + 1:].split()])
def _is_punctuation(word: str):
return PUNCTUATION_PATTERN.match(word)
class DummyDict(dict):
pass
|
timoschick/form-context-model | fcm/context_builder.py | from typing import List, Dict
import itertools
import numpy as np
import my_log
logger = my_log.get_logger('root')
class ContextFeatures:
def __init__(self, contexts: List[List[str]], distances: List[List[int]]):
self.contexts = contexts
self.distances = distances
self.number_of_contexts = len(contexts)
self.words_per_context = [len(context) for context in contexts]
def __repr__(self):
return '{} , {}'.format(self.contexts, self.distances)
class BatchedContextFeatures:
def __init__(self, vectors, distances, number_of_contexts, words_per_context):
self.vectors = vectors
self.distances = distances
self.number_of_contexts = number_of_contexts
self.words_per_context = words_per_context
def __repr__(self):
return '{}, {}, {}, {}'.format(self.vectors, self.distances, self.number_of_contexts, self.words_per_context)
class ContextBuilder:
def __init__(self, word_embeddings: Dict[str, np.ndarray], vector_size: int, max_distance: int = 10):
self.word_embeddings = word_embeddings
self.vector_size = vector_size
self.max_distance = max_distance
def get_context_features(self, word: str, contexts: List[str]):
if not isinstance(contexts, list):
raise ValueError('A list of contexts must be passed to get_context_features, got {}'.format(contexts))
contexts_as_lists = [] # type: List[List[str]]
distances = [] # type: List[List[int]]
for context in contexts:
words = context.split()
word_indices = [i for i, w in enumerate(words) if w == word]
if not word_indices:
raise ValueError('The word "{}" does not occur in the given context ("{}")'.format(word, context))
context_with_distances = _add_distances(words, word_indices, self.max_distance)
context_with_distances = [(i, w) for i, w in context_with_distances if
w != word and w in self.word_embeddings]
context = [w for i, w in context_with_distances]
distance = [i for i, w in context_with_distances]
if not context:
continue
contexts_as_lists.append(context)
distances.append(distance)
return ContextFeatures(contexts_as_lists, distances)
def batchify(self, features: List[ContextFeatures]) -> BatchedContextFeatures:
number_of_contexts = np.array([x.number_of_contexts for x in features], dtype=np.int32)
max_context_length = np.amax(number_of_contexts, initial=0)
words_per_context = np.array(
list(itertools.zip_longest(*[x.words_per_context for x in features], fillvalue=0)),
dtype=np.int32).T.reshape(len(features), max_context_length)
max_words_per_context = np.amax(words_per_context, initial=0)
vectors_shape = [len(features), max_context_length, max_words_per_context, self.vector_size]
vectors = np.zeros(vectors_shape)
for batch_idx, feature in enumerate(features):
for context_idx, context in enumerate(feature.contexts):
for word_idx, word in enumerate(context):
vectors[batch_idx][context_idx][word_idx] = self.word_embeddings[word]
distances_shape = [len(features), max_context_length, max_words_per_context]
distances = np.zeros(distances_shape)
for batch_idx, feature in enumerate(features):
for context_idx, context_distances in enumerate(feature.distances):
for distance_idx, distance in enumerate(context_distances):
distances[batch_idx][context_idx][distance_idx] = distance
return BatchedContextFeatures(vectors, distances, number_of_contexts, words_per_context)
def _add_distances(list, hit_indices, max_distance):
list_with_distances = [(min([idx - hit_idx for hit_idx in hit_indices], key=abs), x) for idx, x in enumerate(list)]
list_with_distances = [(_squeeze(idx, -max_distance, max_distance), x) for idx, x in list_with_distances]
return list_with_distances
def _squeeze(number, min, max):
if number < min:
return min
if number > max:
return max
return number
|
timoschick/form-context-model | fcm/preprocess.py | <filename>fcm/preprocess.py
import random
from typing import List
from collections import Counter
import nltk
import os
import argparse
import my_log
FILE_NAME = 'train'
SHUFFLED_SUFFIX = '.shuffled'
TOKENIZED_SUFFIX = '.tokenized'
VOCAB_SUFFIX = '.voc'
VOCAB_WITH_COUNTS_SUFFIX = '.vwc'
BUCKET_SUFFIX = '.bucket'
logger = my_log.get_logger('root')
def preprocess_training_file(in_path: str, out_dir: str, shuffle=True, tokenize=True, seed: int = 1234,
min_count_target: int = 100, min_count_context: int = 0, num_buckets: int = 25,
max_contexts_per_word=1000, max_context_size=25,
remove_one_word_lines: bool = True) -> None:
if os.path.exists(out_dir) and os.path.isdir(out_dir):
if os.listdir(out_dir):
logger.warning("Directory {} is not empty".format(out_dir))
else:
raise FileNotFoundError("Directory {} doesn't exists".format(out_dir))
file_root = os.path.join(out_dir, FILE_NAME)
shuffled_path = file_root + SHUFFLED_SUFFIX if shuffle else in_path
tokenized_path = file_root + (SHUFFLED_SUFFIX if shuffle else '') + TOKENIZED_SUFFIX if tokenize else shuffled_path
bucket_path = file_root + BUCKET_SUFFIX
# step 1: shuffle the file
if shuffle:
_shuffle_lines(in_path, shuffled_path, seed, remove_one_word_lines=remove_one_word_lines)
# step 2: tokenize and lowercase the file
if tokenize:
_tokenize_lines(shuffled_path, tokenized_path)
# step 3: create the vocab files required for various succeeding steps
_create_vocab(tokenized_path, file_root + VOCAB_SUFFIX + str(min_count_context),
with_counts=False, min_count=min_count_context)
_create_vocab(tokenized_path, file_root + VOCAB_SUFFIX + str(min_count_target),
with_counts=False, min_count=min_count_target)
_create_vocab(tokenized_path, file_root + VOCAB_WITH_COUNTS_SUFFIX + str(min_count_context),
with_counts=True, min_count=min_count_context)
_create_vocab(tokenized_path, file_root + VOCAB_WITH_COUNTS_SUFFIX + str(min_count_target),
with_counts=True, min_count=min_count_target)
vocab_targets = _load_vocab(file_root + VOCAB_SUFFIX + str(min_count_target), with_counts=False)
# to make all buckets approximately equal in size, we shuffle the vocab before distributing it
_shuffle_vocab(vocab_targets, seed=seed)
# step 4: split the target vocab into buckets
words_per_bucket = int(len(vocab_targets) / num_buckets) + 1
logger.info(
'Distributing {} words into {} buckets with {} words'.format(len(vocab_targets), num_buckets, words_per_bucket))
for i in range(num_buckets):
bucket_targets = vocab_targets[words_per_bucket * i: words_per_bucket * (i + 1)]
if not bucket_targets:
logger.warning('No words (of {}) left for bucket {} of {}'.format(len(vocab_targets), i + 1, num_buckets))
continue
logger.info('Creating bucket {} of {} with {} words'.format(i + 1, num_buckets, len(bucket_targets)))
create_context_file(tokenized_path, bucket_path + str(i) + '.txt', bucket_targets,
max_contexts_per_word=max_contexts_per_word,
max_context_size=max_context_size)
def _shuffle_vocab(vocab: List[str], seed: int) -> None:
random.seed(seed)
random.shuffle(vocab)
def _shuffle_lines(in_path: str, out_path: str, seed: int, remove_one_word_lines: bool) -> None:
random.seed(seed)
logger.info('Loading file {} into memory'.format(in_path))
with open(in_path, 'r', encoding='utf8') as f:
lines = f.read().splitlines()
if remove_one_word_lines:
old_len = len(lines)
lines = [x for x in lines if len(x.split()) > 1]
logger.info('Removed {} lines that contained only one word'.format(old_len - len(lines)))
logger.info('Shuffling all {} lines'.format(len(lines)))
random.shuffle(lines)
logger.info('Saving shuffled lines to file {}'.format(out_path))
with open(out_path, 'w', encoding='utf8') as f:
for line in lines:
f.write(line + '\n')
def _tokenize_lines(in_path: str, out_path: str) -> None:
logger.info('Tokenizing sentences from file {} to {}'.format(in_path, out_path))
with open(in_path, 'r', encoding='utf8') as f_in, open(out_path, 'w', encoding='utf8') as f_out:
line_count = 0
for line in f_in:
f_out.write(_tokenize_line(line) + '\n')
line_count += 1
if line_count % 10000 == 0:
logger.info('Done tokenizing {} lines'.format(line_count))
logger.info('Done with tokenization of {} lines'.format(line_count))
def _tokenize_line(line: str) -> str:
tokens = nltk.word_tokenize(line)
return ' '.join(token.lower() for token in tokens)
def _create_vocab(in_path: str, out_path: str, with_counts: bool = False, min_count: int = -1):
logger.info('Creating vocab from file {} (with_counts={}, min_count={})'.format(in_path, with_counts, min_count))
vocab = Counter()
with open(in_path, 'r', encoding='utf8') as f:
for line in f:
words = line.split()
vocab.update(words)
with open(out_path, 'w', encoding='utf8') as f:
for (word, count) in vocab.most_common():
if count >= min_count:
content = word if not with_counts else word + ' ' + str(count)
f.write(content + '\n')
logger.info('Done writing vocab to {}'.format(out_path))
def _load_vocab(in_path: str, with_counts: bool = False):
if with_counts:
vocab = {}
else:
vocab = []
with open(in_path, 'r', encoding='utf8') as f:
for line in f.read().splitlines():
if with_counts:
word, count = line.split()
vocab[word] = count
else:
vocab.append(line)
return vocab
def create_context_file(in_path: str, out_path: str, vocab: List[str], max_contexts_per_word=1000,
max_context_size=25, tokenize=False):
vocab = set(vocab)
contexts_per_word = {}
for word in vocab:
contexts_per_word[word] = set()
with open(in_path, 'r', encoding='utf8') as f:
line_count = 0
for composite_line in f:
composite_line = composite_line[:-1]
lines = composite_line.split(' .')
for line in lines:
if tokenize:
line = _tokenize_line(line)
words = line.split()
for idx, word in enumerate(words):
if word in vocab and len(contexts_per_word[word]) < max_contexts_per_word:
min_idx = max(0, idx - max_context_size)
max_idx = idx + max_context_size
contexts_per_word[word].add(' '.join(words[min_idx:max_idx]))
line_count += 1
if line_count % 100000 == 0:
logger.info('Done processing {} lines'.format(line_count))
with open(out_path, 'w', encoding='utf8') as f:
for word in contexts_per_word.keys():
contexts = list(contexts_per_word[word])
f.write(word + '\t' + '\t'.join(contexts) + '\n')
logger.info('Done writing bucket to {}'.format(out_path))
def main():
parser = argparse.ArgumentParser()
# required arguments
parser.add_argument('mode', choices=['train', 'test'],
help="Whether to preprocess a file for training or testing (i.e. inferring embeddings for novel words)")
parser.add_argument('--input', type=str, required=True,
help="The path of the raw text file from which (word, context) pairs are to be extracted")
parser.add_argument('--output', type=str, required=True,
help="For training: the output directory in which all files required for training are stored. "
"For testing: the single file in which (word, context) pairs are stored.")
# training + testing arguments
parser.add_argument('--max_contexts_per_word', type=int, default=1000,
help="The maximum number of contexts to be stored per word")
parser.add_argument('--max_context_size', type=int, default=25,
help="The maximum number of context words to the left and right of a word to be considered")
# training arguments
parser.add_argument('--no_shuffle', action='store_true',
help="If set to true, the training dataset is not shuffled.")
parser.add_argument('--no_tokenize', action='store_true',
help="If set to true, the training dataset is not tokenized.")
parser.add_argument('--seed', type=int, default=1234,
help="The seed used for shuffling the training dataset.")
parser.add_argument('--min_count_target', type=int, default=100,
help="The minimum number of occurrences in the --input file for a word to be included in the output")
parser.add_argument('--min_count_context', type=int, default=0,
help="The minimum number of occurrences in the --input file for a word to be used as a context word")
parser.add_argument('--num_buckets', type=int, default=25,
help="The number of training buckets (or chunks) among which the training data is divided")
parser.add_argument('--keep_one_word_lines', action='store_true',
help="If set to true, lines containing only a single word (before tokenization) are kept in the training dataset.")
# testing arguments
parser.add_argument('--words', type=str,
help="The path to a file containing the words for which (word, context) pairs are to be created. "
"Each line must contain exactly one word. Only required if mode == test.")
args = parser.parse_args()
if args.mode == 'train':
preprocess_training_file(args.input, args.output, shuffle=not args.no_shuffle, tokenize=not args.no_tokenize,
seed=args.seed,
min_count_target=args.min_count_target, min_count_context=args.min_count_context,
num_buckets=args.num_buckets,
max_contexts_per_word=args.max_contexts_per_word,
max_context_size=args.max_context_size,
remove_one_word_lines=not args.keep_one_word_lines)
else:
if not args.words:
raise ValueError("--words must be specified when mode == test")
words = _load_vocab(args.words, with_counts=False)
create_context_file(args.input, args.output, words,
max_contexts_per_word=args.max_contexts_per_word,
max_context_size=args.max_context_size, tokenize=not args.no_tokenize)
# preprocess_training_file(r'/nfs/datm/schickt/bert-experiments/fcm/train-2/WestburyLab.Wikipedia.Corpus.txt',
# r'/nfs/datm/schickt/bert-experiments/fcm/train-2/', shuffle=True, tokenize=True)
# create_context_file(r'/nfs/datm/schickt/bert-experiments/fcm/train-2/train.shuffled.tokenized',
# r'/nfs/datm/schickt/bert-experiments/fcm/contexts/base-words-contexts-new-preprocessor-2.txt',
# _load_vocab(r'/nfs/datm/schickt/bert-experiments/vocab-eval-dataset-base-words.txt',
# with_counts=False)
# )
if __name__ == '__main__':
# _shuffle_lines('test/data/tokenize_test.txt', 'test/data/out/tokenize_test.txt', 1234, True)
main()
|
timoschick/form-context-model | fcm/form_context_model.py | from abc import ABC, abstractmethod
from typing import List
import numpy as np
import tensorflow as tf
import jsonpickle
import my_log
from batch_builder import InputProcessor, EndOfDatasetException
# logging options
np.set_printoptions(suppress=True)
tf.logging.set_verbosity(tf.logging.DEBUG)
logger = my_log.get_logger('root')
# constants definition
FORM_ONLY = 'form_only'
CONTEXT_ONLY = 'context_only'
SINGLE_PARAMETER = 'single_parameter'
GATED = 'gated'
DEFAULT = 'default'
CLUSTERING = 'clustering'
COMBINATORS = [FORM_ONLY, CONTEXT_ONLY, SINGLE_PARAMETER, GATED]
CONTEXT_WEIGHTS = [DEFAULT, CLUSTERING]
class RareWordVectorizer(ABC):
@abstractmethod
def train(self, num_epochs: int, batch_size: int,
log_after_n_steps: int = 100, out_path: str = None) -> None:
pass
@abstractmethod
def infer_vector(self, word: str, contexts: List[str]) -> np.ndarray:
pass
@abstractmethod
def save(self, path: str) -> None:
pass
@classmethod
@abstractmethod
def load(cls, path: str) -> 'RareWordVectorizer':
pass
class FormContextModel(RareWordVectorizer):
def __init__(self, batch_builder: InputProcessor, emb_dim: int = None, learning_rate: float = None,
combinator: str = None, sent_weights: str = None, distance_embedding: bool = None):
self.emb_dim = emb_dim
self.learning_rate = learning_rate
self.combinator = combinator
self.sent_weights = sent_weights
self.distance_embedding = distance_embedding
self.batch_builder = batch_builder
self._setup()
def _setup(self):
tf.reset_default_graph()
tf.set_random_seed(1234)
self.features = {
# shape: batch_size x max_ngrams_per_word
'ngrams': tf.placeholder(tf.int32, shape=[None, None]),
# shape: batch_size
'ngram_lengths': tf.placeholder(tf.int32, shape=[None]),
# shape: batch_size x max_contexts_per_word x max_words_per_context x emb_dim
'context_vectors': tf.placeholder(tf.float32, shape=[None, None, None, self.emb_dim]),
# shape: batch_size
'context_lengths': tf.placeholder(tf.int32, shape=[None]),
# shape: batch_size x max_contexts_per_word
'words_per_context': tf.placeholder(tf.int32, shape=[None, None]),
# shape: batch_size x max_contexts_per_word x max_words_per_context
'distances': tf.placeholder(tf.int32, shape=[None, None, None])
}
self.form_embedding = self._form_embedding(self.features)
self.context_embedding = self._context_embedding(self.features, self.form_embedding)
self.form_context_embedding = self._combine_embeddings(
self.form_embedding, self.context_embedding, self.features)
self.targets = tf.placeholder(tf.float32, shape=[None, self.emb_dim])
print(self.targets)
print(self.form_context_embedding)
print(self.emb_dim)
print(self.learning_rate)
self.loss = tf.losses.mean_squared_error(labels=self.targets, predictions=self.form_context_embedding)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.minimize(loss=self.loss, global_step=tf.train.get_global_step())
self.session = tf.Session()
with self.session.as_default():
tf.global_variables_initializer().run()
tf.tables_initializer().run()
def infer_vector(self, word: str, context: List[str]) -> np.ndarray:
batch_inputs, _ = self.batch_builder.generate_batch_from_input(word, context)
feed_dict = {self.features[feature]: batch_inputs[feature] for feature in self.features}
if batch_inputs['context_vectors'].size == 0:
if self.combinator == CONTEXT_ONLY:
logger.warning('Cannot infer embeddings without contexts, returning zeros instead')
return np.zeros(self.emb_dim)
vector = self.session.run(self.form_embedding, feed_dict=feed_dict)
else:
vector = self.session.run(self.form_context_embedding, feed_dict=feed_dict)
vector = np.reshape(vector, [self.emb_dim])
return vector
def train(self, num_epochs: int, batch_size: int, log_after_n_steps: int = 100,
out_path: str = None):
summed_loss, step = 0, 0
for epoch in range(num_epochs):
while True:
try:
batch_inputs, batch_labels = self.batch_builder.generate_batch_from_buffer(batch_size)
feed_dict = {self.targets: batch_labels}
for feature in self.features:
feed_dict[self.features[feature]] = batch_inputs[feature]
_, loss_val = self.session.run([self.train_op, self.loss], feed_dict=feed_dict)
summed_loss += loss_val
step += 1
if step > 0 and step % log_after_n_steps == 0:
logger.info('Step: %d\tLoss: %.17f', step, (summed_loss / log_after_n_steps))
summed_loss = 0
except EndOfDatasetException:
logger.info('Done with epoch %d', epoch)
if out_path is not None:
self.save('{}.e{}'.format(out_path, epoch))
self.batch_builder.reset()
break
def _form_embedding(self, features):
if self.combinator == CONTEXT_ONLY:
return tf.zeros([1])
ngrams = features['ngrams']
word_lengths = features['ngram_lengths']
with tf.device("/cpu:0"):
ngram_embeddings = tf.get_variable("ngram_embeddings",
[self.batch_builder.ngram_builder.get_number_of_ngrams(),
self.batch_builder.vector_size])
ngrams_embedded = tf.nn.embedding_lookup(ngram_embeddings, ngrams, name="ngrams_embedded")
mask = tf.sequence_mask(word_lengths, dtype=tf.float32)
mask = tf.expand_dims(mask, -1)
attention_filtered_sums = tf.reduce_sum(mask, axis=1, keep_dims=True)
attention = mask / attention_filtered_sums
return tf.reduce_sum(ngrams_embedded * attention, axis=1)
def _context_embedding_uniform(self, context_vectors, words_per_context):
mask = tf.sequence_mask(words_per_context, dtype=tf.float32)
mask = tf.expand_dims(mask, -1)
words_per_training_instance = tf.reduce_sum(tf.cast(words_per_context, dtype=tf.float32), axis=1,
keep_dims=True)
words_per_training_instance = tf.expand_dims(tf.expand_dims(words_per_training_instance, -1), -1)
attention = mask / tf.maximum(words_per_training_instance, 1)
return tf.reduce_sum(context_vectors * attention, axis=[1, 2])
def _context_embedding(self, features, form_embedding=None):
if self.combinator == FORM_ONLY:
return tf.zeros([1])
elif self.sent_weights == DEFAULT:
return self._context_embedding_uniform(features['context_vectors'], features['words_per_context'])
mask = tf.sequence_mask(features['words_per_context'], dtype=tf.float32)
mask = tf.expand_dims(mask, -1)
# get weights for each context and expand them to 4 dimensions
context_weights = self._context_weights(features, form_embedding)
context_weights = tf.expand_dims(tf.expand_dims(context_weights, -1), -1)
# get weights for each word and expand them to 4 dimensions
word_weights = self._word_weights(features)
word_weights = tf.expand_dims(word_weights, -1)
weights = mask * (context_weights * word_weights)
if self.distance_embedding:
distance_embeddings = tf.get_variable("distance_embeddings_01", [20 + 1, self.batch_builder.vector_size])
distances_embedded = tf.nn.embedding_lookup(distance_embeddings, features['distances'],
name="distances_embedded_01")
vectors_with_distances = features['context_vectors'] * distances_embedded
context_embedding = tf.reduce_sum(vectors_with_distances * weights, axis=[1, 2])
else:
context_embedding = tf.reduce_sum(features['context_vectors'] * weights, axis=[1, 2])
return context_embedding
def _context_weights(self, features, form_embedding=None):
"""
Returns the weight for each context of each traning instance
The shape is batch_size x max_number_of_contexts
"""
if self.sent_weights == CLUSTERING:
# get one embedding per context
if self.distance_embedding:
logger.info("using distance embeddings")
distance_embeddings = tf.get_variable("distance_embeddings_02",
[20 + 1, self.batch_builder.vector_size])
distances_embedded = tf.nn.embedding_lookup(distance_embeddings, features['distances'],
name="distances_embedded_02")
vectors_with_distances = features['context_vectors'] * distances_embedded
vectors_avg = tf.reduce_sum(vectors_with_distances, axis=2)
else:
vectors_avg = tf.reduce_sum(features['context_vectors'], axis=2)
vectors_avg /= tf.expand_dims(tf.cast(features['words_per_context'], dtype=tf.float32), axis=-1) + 1e-9
# compute the pairwise match between the embeddings obtained from all contexts
vectors_avg = tf.layers.dense(vectors_avg, self.emb_dim, use_bias=None,
activation=None, name='vectors_avg_linear')
match_scores = FormContextModel.attention_fun(vectors_avg, vectors_avg)
mask = tf.sequence_mask(features['context_lengths'], dtype=tf.float32)
mask_key = tf.expand_dims(mask, 1)
mask_query = tf.expand_dims(mask, 2)
# batch_size x max_context_length x max_context_length
match_scores = tf.multiply(match_scores, mask_key)
match_scores = tf.multiply(match_scores, mask_query)
self.prx = tf.identity(match_scores)
# batch_size x max_context_length
match_scores = tf.reduce_sum(match_scores, axis=-1)
# modify the match_scores to sum to 1 for each context
with tf.name_scope("match_scores_softmax"):
match_scores = tf.multiply(match_scores, mask)
match_scores_summed = tf.reduce_sum(match_scores, axis=1, keepdims=True)
match_scores = tf.div(match_scores, match_scores_summed + 1e-9)
return match_scores
def _word_weights(self, features):
"""
Returns the weight for each word within a context, i.e. 1/words_per_context
The shape is batch_size x max_number_of_contexts x max_number_of_words
"""
weights = tf.ones(tf.shape(features['context_vectors'])[:3], dtype=tf.float32)
weights = weights / tf.maximum(1.0,
tf.expand_dims(tf.cast(features['words_per_context'], dtype=tf.float32), -1))
return weights
def _combine_embeddings(self, form_embedding, context_embedding, features):
form_alpha = 0
context_alpha = 0
if self.combinator != FORM_ONLY and self.combinator != CONTEXT_ONLY:
if self.combinator == SINGLE_PARAMETER:
form_alpha = tf.sigmoid(tf.get_variable("alpha_intra", [1]))
context_alpha = 1 - form_alpha
elif self.combinator == GATED:
combined_guess = tf.concat([form_embedding, context_embedding], axis=-1)
alpha_kernel = tf.get_variable("alpha_kernel", [2 * self.emb_dim, 1])
alpha_bias = tf.get_variable("alpha_bias", [1])
form_alpha = tf.matmul(combined_guess, alpha_kernel) + alpha_bias
form_alpha = tf.sigmoid(form_alpha)
context_alpha = 1 - form_alpha
else:
raise ValueError("Combinator {} not implemented".format(self.combinator))
alpha_sum = context_alpha + form_alpha + 1e-9
form_alpha /= alpha_sum
context_alpha /= alpha_sum
if self.combinator != FORM_ONLY:
context_embedding = tf.layers.dense(context_embedding, self.emb_dim, use_bias=None,
activation=None, name='context_embedding_linear')
if self.combinator == FORM_ONLY:
return form_embedding
elif self.combinator == CONTEXT_ONLY:
return context_embedding
else:
form_alpha = tf.Print(form_alpha, [form_alpha, context_alpha], message='(form, context) = ')
return form_alpha * form_embedding + context_alpha * context_embedding
@staticmethod
def attention_fun(Q, K, scaled_=True):
attention = tf.matmul(Q, K, transpose_b=True) # [batch_size, sequence_length, sequence_length]
if scaled_:
d_k = tf.cast(tf.shape(K)[-1], dtype=tf.float32)
attention = tf.divide(attention, tf.sqrt(d_k)) # [batch_size, sequence_length, sequence_length]
return attention
def __getstate__(self):
# TODO solve the other way around (remove unneeded items instead of keeping needed items)
odict = {
'emb_dim': self.emb_dim,
'learning_rate': self.learning_rate,
'combinator': self.combinator,
'sent_weights': self.sent_weights,
'distance_embedding': self.distance_embedding,
'batch_builder': self.batch_builder
}
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
self._setup()
def save(self, path: str) -> None:
logger.info('Saving model to {}'.format(path))
saver = tf.train.Saver()
saver.save(self.session, path)
with open(path + '.config.json', 'w', encoding='utf8') as f:
f.write(jsonpickle.encode(self))
logger.info('Done saving model')
@classmethod
def load(cls, path: str) -> 'FormContextModel':
logger.info('Loading model from {}'.format(path))
with open(path + '.config.json', 'r', encoding='utf8') as f:
model = jsonpickle.decode(f.read())
model._setup()
saver = tf.train.Saver()
saver.restore(model.session, path)
logger.info('Done loading model')
return model
|
timoschick/form-context-model | fcm/infer_vectors.py | <reponame>timoschick/form-context-model
import argparse
import io
import my_log
import re
from form_context_model import FormContextModel
logger = my_log.get_logger('root')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', type=str, required=True)
parser.add_argument('--input', '-i', type=str, required=True)
parser.add_argument('--output', '-o', type=str, required=True)
args = parser.parse_args()
model = FormContextModel.load(args.model)
count = 0
with io.open(args.input, 'r', encoding='utf-8') as input_file, io.open(args.output, 'w',
encoding='utf-8') as output_file:
for line in input_file.read().splitlines():
comps = re.split(r'\t', line)
word = comps[0]
context = comps[1:]
context = [c for c in context if c != '']
vec = model.infer_vector(word, context)
output_file.write(word + ' ' + ' '.join([str(x) for x in vec]) + '\n')
count += 1
if count % 100 == 0:
logger.info('Done processing %d words', count)
if __name__ == '__main__':
main()
|
mj74447/Automation | Automation.py | <reponame>mj74447/Automation
from selenium import webdriver
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('https://apps.who.int/trialsearch')
searchbox = driver.find_element_by_xpath('//*[@id="TextBox1"]')
searchbox.send_keys('lupus')
searchButton = driver.find_element_by_xpath('//*[@id="Button1"]')
searchButton.click()
button2 = driver.find_element_by_xpath('//*[@id="Button7"]')
button2.click()
|
AlexandrosKyriakakis/MarketDataset | alex/alex/spiders/freska.py | <reponame>AlexandrosKyriakakis/MarketDataset
# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy import Request
from alex.items import AlexItem
#from alex.settings import AB_VARS
class DogSpider(CrawlSpider):
name = 'freska'
allowed_domains = ['ab.gr']
#kava_urls = ['https://www.ab.gr/click2shop/Krasia-pota-anapsyktika-nera/c/008?pageNumber={}'.format(x) for x in range(0,54)]
#oporopoleio_urls = ['https://www.ab.gr/click2shop/Oporopoleio/c/001?pageNumber={}'.format(x) for x in range(0,11)]
#kreata_urls = ['https://www.ab.gr/click2shop/Fresko-Kreas-and-Psaria/c/002?pageNumber={}'.format(x) for x in range(0,7)]
#psigeiou_urls = ['https://www.ab.gr/click2shop/Galaktokomika-and-Eidi-Psygeioy/c/003?pageNumber={}'.format(x) for x in range(0,38)]
#proswpikis_urls = ['https://www.ab.gr/click2shop/Eidi-prosopikis-peripoiisis/c/012?pageNumber={}'.format(x) for x in range(0,90)]
katoikidia_urls = ['https://www.ab.gr/click2shop/Gia-katoikidia/c/014?pageNumber={}'.format(x) for x in range(0,12)]
#spitiou_urls = ['https://www.ab.gr/click2shop/Katharistika-Chartika-and-eidi-spitioy/c/013?pageNumber={}'.format(x) for x in range(0,64)]
#urls = kava_urls + oporopoleio_urls + kreata_urls + psigeiou_urls + katoikidia_urls + spitiou_urls
start_urls = katoikidia_urls#urls[:]
rules = (
Rule(LinkExtractor(allow=("Katharistika/","Axesoyar-katharismoy/","Aporrypantika-piaton/","Aporrypantika-plyntirioy-roychon/","Malaktika-roychon/"),deny=("c/")), callback='parse_AB_spitiou', follow=True),
Rule(LinkExtractor(allow=("Leykantika-Enischytika/","Aposkliryntika-plyntirioy/","Aporrypantika-cherioy/","Charti-oikiakis-chrisis/","Eidi-oikiakis-chrisis","Epochiaka/"),deny=('c/')), callback='parse_AB_spitiou', follow=True),
Rule(LinkExtractor(allow=("Entomoktona-Entomoapothitika/","Fylaxi-roychon/","Eidi-sideromatos-aplomatos","Aromatika-Keria/","/Grafiki-yli-Analosima","Dora-Paichnidia/","Eidi-aytokinitoy/"),deny=('c/')), callback='parse_AB_spitiou', follow=True),
Rule(LinkExtractor(allow=("Gia-gates/","Gia-skyloys/","Ygieini-zoon/"),deny=('c/')), callback='parse_AB_katoikidia', follow=True),
Rule(LinkExtractor(allow=("/Krasia/","Anapsyktika","Nera","Pota"),deny=('c/')), callback='parse_AB_kava', follow=True),
Rule(LinkExtractor(allow=("Froyta/","Lachanika/","Nopoi-xiroi-karpoi/"),deny=('c/')), callback='parse_AB_freska', follow=True),
Rule(LinkExtractor(allow=("Loykanika/","Etoimes-Lyseis/","/Kynigi/","Freska-psaria-and-thalassina/"),deny=('c/')), callback='parse_AB_freska', follow=True),
Rule(LinkExtractor(allow=("/Gala/","Giaoyrtia/","Kremes-Glykismata/","Voytyro-Margarini/","Kremes-galaktos-and-santigy/","Freskes-zymes-Fylla-Magia/","Freskoi-Zomoi/","Chymoi-psygeioy/","Ayga"),deny=('c/')), callback='parse_AB_psigeiou', follow=True),
Rule(LinkExtractor(allow=("Fresko-Kreas-and-Psaria","Moschari-gia-tin-katsarola-i-to-foyrno","Moschari-gia-tin-schara-i-to-tigani/","Choirino-gia-tin-schara-i-to-tigani/","Choirino-gia-ti-gastra-tin-katsarola-i-to-foyrno/","Kotopoylo-gia-to-tigani/c","Choirino-gia-ti-gastra-tin-katsarola-i-to-foyrno/","/Kotopoylo-gia-psito-i-vrasto/","Galopoyla/","Kimas/"),deny=('c/')), callback='parse_AB_freska', follow=True),
Rule(LinkExtractor(allow=("Andriki-peripoiisi","Gynaikeia-peripoiisi","Prosopiki-ygieini/","Frontida-somatos/","Frontida-mallion/","Stomatiki-ygieini/","Antiiliaka","Parafarmakeytika"),deny=('c/')), callback='parse_AB_proswpikis', follow=True),
)
def parse_AB_kava(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or producer == "-":
producer = "Τοπικός Παραγωγός"
yield{
"category": "Κάβα",
"name": response.xpath('//h1/text()').get(),
"producer": producer, #re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
def parse_AB_freska(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or producer == "-":
producer = "<NAME>"
yield{
"category": "Φρέσκα Προϊόντα",
"name": response.xpath('//h1/text()').get(),
"producer": producer,#re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
def parse_AB_psigeiou(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or producer == "-":
producer = "<NAME>"
yield{
"category": "Ποϊόντα Ψυγείου",
"name": response.xpath('//h1/text()').get(),
"producer": producer,#re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
def parse_AB_katoikidia(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or producer == "-":
producer = "<NAME>"
yield{
"category": "Κατοικίδια",
"name": response.xpath('//h1/text()').get(),
"producer": producer, #re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
def parse_AB_spitiou(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or producer == "-":
producer = "<NAME>"
yield{
"category": "Είδη Σπιτιού",
"name": response.xpath('//h1/text()').get(),
"producer": producer, #re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
def parse_AB_proswpikis(self,response):
producer = response.xpath('//p[@class="page-title-info"]/text()').get()
producer = re.sub(r'\n|\s\s',"",producer)
if producer is None or len(producer) < 2:
producer = "<NAME>"
yield{
"category": "Προσωπικής Περιποίησης",
"name": response.xpath('//h1/text()').get(),
"producer": producer, #re.sub(r'\n|\s\s',"",response.xpath('//p[@class="page-title-info"]/text()').get()),
"price": re.search(r'(\d+),(\d+)',response.xpath('//span[@class="ultra-bold test-price-property"]/text()').get()).group(0),
"barcode": re.search(r'(\d+)',response.xpath('//div[@class="col-sm-60 product-description-id"]/p/text()|//div[@class="color_grey_5"]/p/text()').get()).group(0),
"url": response.url
}
|
AlexandrosKyriakakis/MarketDataset | alex/alex/spiders/alexandros.py | # -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy import Request
from alex.items import AlexItem
class AlexandrosSpider(CrawlSpider):
name = 'alexandros'
allowed_domains = ['spitogatos.gr']
start_urls = ['https://www.spitogatos.gr/πώληση_Μεζονέτα_Άνω_Κυψέλη_-_Ευελπίδων__Κέντρο_Αθήνας_-l9297911']
#start_urls = ['https://www.xe.gr/property/search?Geo.area_id_new__hierarchy=82271&System.item_type=re_residence&Transaction.type_channel=117518']
rules = (
Rule(LinkExtractor(allow=("spitogatos.gr")), callback='parse', follow=True),
)
def parse(self, response):
print("HEEEEELLLLLLLLLLLLL")
#url = response.url
yield {
"name": response.xpath('//h1/text()').get(),#response.xpath('//div[@id="breadCrumbs"]/span/text()').get(), #re.search(r"(/d+)",name),
"price": response.xpath('//h6[@class="text black inline color bold vertical-middle padding-right-small"]/text()').get(),
}
#def parseInfiniteXe(self,response):
# pages = 1691
# for page in range(0, pages):
# url = 'https://www.xe.gr/property/search?Geo.area_id_new__hierarchy=82271&System.item_type=re_residence&Transaction.type_channel=117518&page={}'.format(page)
# yield Request(url, callback = self.parseItemXe)
#
#def parseItemXe(self,response):
# links = response.xpath('//a[@class="articleLink"]/@href').getall()
# for link in links:
# url = response.urljoin(link)
# yield Request(url,callback=self.parseItem) #"url": response.urljoin(link),
#
#def parseItem(self, response):
# #name = response.xpath('//span[@class="codeInfo"]/text()').get()
# #name = " ".join(" ".join(name))
# yield {
# "name": response.xpath('//span[@class="codeInfo"]/text()').get(), #re.search(r"(/d+)",name),
# "price": response.xpath('//div[@class="price"]/h1/text()').get(),
# }
# |
lkuligin/cs231n | assignment1/classifiers/linear_svm.py | import numpy as np
from random import shuffle
from past.builtins import xrange
def svm_loss_naive(W, X, y, reg):
"""
Structured SVM loss function, naive implementation (with loops).
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
scores = X[i].dot(W)
correct_class_score = scores[y[i]]
wrong_predicted = 0
for j in xrange(num_classes):
if j == y[i]:
continue
margin = scores[j] - correct_class_score + 1 # note delta = 1
if margin > 0:
loss += margin
wrong_predicted += 1
dW[:, j] += X[i, :]
dW[:, y[i]] -= wrong_predicted * X[i, :]
# Right now the loss is a sum over all training examples, but we want it
# to be an average instead so we divide by num_train.
loss /= num_train
# Add regularization to the loss.
loss += reg * np.sum(W * W)
dW = dW / num_train + 2 * reg * W
return loss, dW
def svm_loss_vectorized(W, X, y, reg):
"""
Structured SVM loss function, vectorized implementation.
Inputs and outputs are the same as svm_loss_naive.
"""
loss = 0.0
dW = np.zeros(W.shape) # initialize the gradient as zero
num_train = X.shape[0]
delta = 1.0
#score_kj = sum_n [x_kn * w_nj]
scores = X.dot(W)
scores_correct_class = scores[np.arange(num_train), y]
#margins_kj = max(0, s_kj - s_k_(y_k) + delta)
margins = np.maximum(0, scores - scores_correct_class[:, np.newaxis] + delta)
loss = (margins.sum() - delta * num_train) / num_train + reg * np.sum(W*W)
margins_binary = np.vectorize(lambda x: 1. if x>0 else 0)(margins)
margins_binary[np.arange(num_train), y] = 0
margins_binary[np.arange(num_train), y] = -margins_binary.sum(axis=1)
dW = X.T.dot(margins_binary) / num_train + 2 * reg * W
return loss, dW
|
lkuligin/cs231n | assignment1/classifiers/softmax.py | import numpy as np
from random import shuffle
from past.builtins import xrange
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_classes = W.shape[1]
num_train = X.shape[0]
loss = 0.0
for i in xrange(num_train):
log_denominator = 0.0
scores = X[i].dot(W)
scores -= np.max(scores)
correct_class_score = scores[y[i]]
log_denominator = np.sum(np.exp(scores))
loss += np.log(log_denominator)
for j in xrange(num_classes):
if j == y[i]:
loss -= scores[j]
dW[:, j] += - X[i, :]
dW[:, j] += (np.exp(scores[j]) / log_denominator) * X[i, :]
loss = loss / num_train + reg * np.sum(W * W)
dW = dW / num_train + 2 * reg * W
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
num_train = X.shape[0]
scores = X.dot(W)
#scaling to get numerical stability
scores = np.apply_along_axis(lambda x: x-np.max(x), 1, scores)
scores_correct_class = scores[np.arange(num_train), y]
log_denominator = np.apply_along_axis(lambda x: np.sum(np.exp(x)), 1, scores)
loss = -np.sum(scores_correct_class) + np.sum(np.log(log_denominator))
loss = loss / num_train + reg * np.sum(W * W)
dW = X.T.dot(np.exp(scores) / log_denominator[:, np.newaxis])
correct_classes = np.zeros_like(scores)
correct_classes[np.arange(num_train), y] = -1.
dW += X.T.dot(correct_classes)
dW = dW/ num_train + 2 * reg * W
return loss, dW
|
lkuligin/cs231n | assignment1/classifiers/tests/test_neural_net.py | <gh_stars>0
import os
import sys
import unittest
import numpy as np
sys.path.insert(0,'..')
sys.path.insert(0,'../../../utils')
from neural_net import TwoLayerNet
import test_utils
class NeuralNetTest(test_utils.TestCaseWithParams):
def setUp(self):
self.net = self.kwargs['net']
self.x = self.kwargs['x']
self.y = self.kwargs['y']
self.expected_loss_nt = self.kwargs['expected_loss_nt']
self.expected_loss_wt = self.kwargs['expected_loss_wt']
def test_neural_net_loss_notarget(self):
scores = self.net.loss(self.x)
np.testing.assert_allclose(scores, self.expected_loss_nt, 1e-04)
def test_neural_net_loss_withtarget(self):
loss, _ = self.net.loss(self.x, self.y, reg=0.05)
np.testing.assert_allclose(loss, self.expected_loss_wt, 1e-04)
def test_neural_net_gradient(self):
loss, grads = self.net.loss(self.x, self.y, reg=0.0)
f = lambda w: self.net.loss(self.x, self.y, reg=0.0)[0]
for param_name, grad in grads.iteritems():
grad_numerical = test_utils.eval_numerical_gradient(f, self.net.params[param_name], verbose=False)
np.testing.assert_allclose(grad_numerical, grad, 1e-04)
if __name__ == '__main__':
suite = unittest.TestSuite()
np.random.seed(0)
test_net = TwoLayerNet(4, 10, 3, std=1e-1)
np.random.seed(1)
x = 10 * np.random.randn(5, 4)
y = np.array([0, 1, 2, 2, 1])
expected_loss = np.asarray([[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
suite.addTest(test_utils.TestCaseWithParams.get_suite(NeuralNetTest, kwargs={'net': test_net, 'x': x, 'y': y,
'expected_loss_nt': expected_loss, 'expected_loss_wt': 1.303788}))
unittest.TextTestRunner(verbosity=2).run(suite)
|
lkuligin/cs231n | assignment1/classifiers/tests/test_linear_svm.py | import os
import sys
import unittest
import numpy as np
sys.path.insert(0,'..')
sys.path.insert(0,'../../../utils')
from linear_svm import svm_loss_naive, svm_loss_vectorized
import test_utils
class LinearSvmTest(test_utils.TestCaseWithParams):
def setUp(self):
self.weights = self.kwargs['W']
self.x = self.kwargs['x']
self.y = self.kwargs['y']
self.reg = self.kwargs['reg']
self.expected = self.kwargs['expected']
def test_svm_loss_naive_loss(self):
loss, _ = svm_loss_naive(self.weights, self.x, self.y, self.reg)
np.testing.assert_allclose(loss, self.expected)
def test_svm_loss_vectorized_loss(self):
loss, _ = svm_loss_vectorized(self.weights, self.x, self.y, self.reg)
np.testing.assert_allclose(loss, self.expected)
def test_svm_loss_naive_gradient(self):
loss, grad = svm_loss_naive(self.weights, self.x, self.y, self.reg)
f = lambda w: svm_loss_naive(self.weights, self.x, self.y, self.reg)[0]
grad_numerical = test_utils.grad_check_sparse(f, self.weights, grad)
def test_svm_loss_vectorized(self):
loss_naive, grad_naive = svm_loss_naive(self.weights, self.x, self.y, self.reg)
loss_vect, grad_vect = svm_loss_vectorized(self.weights, self.x, self.y, self.reg)
np.testing.assert_allclose(loss_naive, loss_vect)
np.testing.assert_allclose(grad_naive, grad_vect)
if __name__ == '__main__':
suite = unittest.TestSuite()
w = np.array([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.5, 0.6, 0.7]])
x = np.array([[1,2,3,4], [2,3,4,5], [3,4,5,6], [5,6,7,8]])
y = np.array([0,2,1,2]).T
suite.addTest(test_utils.TestCaseWithParams.get_suite(LinearSvmTest, kwargs={'W': w, 'x': x, 'y': y, 'reg': 0.0, 'expected': 1.95}))
suite.addTest(test_utils.TestCaseWithParams.get_suite(LinearSvmTest, kwargs={'W': w, 'x': x, 'y': y, 'reg': 0.1, 'expected': 2.153}))
unittest.TextTestRunner(verbosity=2).run(suite)
|
lkuligin/cs231n | assignment2/classifiers/cnn.py | from builtins import object
import numpy as np
from layers import *
from fast_layers import *
from layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
C, H, W = input_dim
self.params['W1'] = np.random.normal(0, weight_scale, [num_filters, C, filter_size, filter_size])
self.params['b1'] = np.zeros(num_filters)
self.params['W2'] = np.random.normal(0, weight_scale, [np.int(H/2)**2*num_filters, hidden_dim])
self.params['b2'] = np.zeros(hidden_dim)
self.params['W3'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])
self.params['b3'] = np.zeros(num_classes)
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
layer1_output, layer1_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
layer2_activation_input, layer2_activation_cache = affine_forward(layer1_output, W2, b2)
layer2_output, layer2_relu_cache = relu_forward(layer2_activation_input)
layer3_out, layer3_cache = affine_forward(layer2_output, W3, b3)
if y is None:
return layer3_out
grads = {}
loss, grad_loss = softmax_loss(layer3_out, y)
grad_x3, grad_w3, grad_b3 = affine_backward(grad_loss, layer3_cache)
grads['W3'], grads['b3'] = grad_w3 + self.reg * W3, grad_b3
grad_layer2_relu = relu_backward(grad_x3, layer2_relu_cache)
grad_x2, grad_w2, grad_b2 = affine_backward(grad_layer2_relu, layer2_activation_cache)
grads['W2'], grads['b2'] = grad_w2 + self.reg * W2, grad_b2
_, dw1, db1 = conv_relu_pool_backward(grad_x2, layer1_cache)
grads['W1'], grads['b1'] = dw1 + self.reg * W1, db1
loss += self.reg * 0.5 * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
return loss, grads
|
lkuligin/cs231n | utils/test_utils.py | from random import randrange
import unittest
import numpy as np
from past.builtins import xrange
class TestCaseWithParams(unittest.TestCase):
""" TestCase classes that want to be parametrized should
inherit from this class.
"""
def __init__(self, methodName='runTest', kwargs={}):
super(TestCaseWithParams, self).__init__(methodName)
self.kwargs = kwargs
@staticmethod
def get_suite(testcase_class, kwargs={}):
""" Create a suite containing all tests taken from the given
subclass, passing them the parameter 'param'.
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_class)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_class(name, kwargs=kwargs))
return suite
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5, printable=False):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in xrange(num_checks):
ix = tuple([randrange(m) for m in x.shape])
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))
if printable:
print('numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error))
else:
np.testing.assert_allclose(grad_numerical, grad_analytic, 1e-04)
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
|
lkuligin/cs231n | assignment2/classifiers/fc_net.py | from builtins import range
from builtins import object
import numpy as np
from layers import *
from layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
self.params['W1'] = np.random.normal(0, weight_scale, [input_dim, hidden_dim])
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])
self.params['b2'] = np.zeros(num_classes)
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
X_flattened= np.reshape(X, [X.shape[0], -1])
N, D = X_flattened.shape
# Compute the forward pass
layer1_input = X_flattened.dot(W1) + b1
layer1_output = np.maximum(layer1_input, 0.)
layer2_output = layer1_output.dot(W2) + b2
# If the targets are not given then jump out, we're done
if y is None:
return layer2_output
# Compute the loss
loss = None
prediction_shifted = np.apply_along_axis(lambda x: x-np.max(x), 1, layer2_output)
prediction_correct_class = prediction_shifted[np.arange(N), y]
log_denominator = np.sum(np.exp(prediction_shifted), axis=1)
loss = -np.sum(prediction_correct_class) + np.sum(np.log(log_denominator))
loss = loss / N + 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
# Backward pass: compute gradients
grads = {}
diff_layer2 = np.exp(prediction_shifted) / log_denominator[:, np.newaxis]
diff_layer2[np.arange(N), y] -= 1.
diff_layer2 /= N
grads['W2'] = layer1_output.T.dot(diff_layer2)
grads['b2'] = np.sum(diff_layer2, axis=0)
diff_layer1 = diff_layer2.dot(W2.T)
diff_layer1[layer1_input <= 0] = 0.
grads['W1'] = X_flattened.T.dot(diff_layer1)
grads['b1'] = np.sum(diff_layer1, axis=0)
grads['W1'] += 2 * self.reg * W1
grads['W2'] += 2 * self.reg * W2
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = len(hidden_dims)
self.dtype = dtype
self.params = {}
next_layer_input_dim = input_dim
for i in range(self.num_layers - 1):
self.params['W{0}'.format(i)] = np.random.normal(0, weight_scale, [next_layer_input_dim, hidden_dims[i]])
self.params['b{0}'.format(i)] = np.zeros([hidden_dims[i]])
next_layer_input_dim = hidden_dims[i]
if self.use_batchnorm:
self.params['beta{0}'.format(i)] = np.zeros([hidden_dims[i]])
self.params['gamma{0}'.format(i)] = np.ones([hidden_dims[i]])
self.params['W{0}'.format(self.num_layers-1)] = np.random.normal(0, weight_scale, [next_layer_input_dim, num_classes])
self.params['b{0}'.format(self.num_layers-1)] = np.zeros(num_classes)
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
activation_cache = {}
relu_cache = {}
batchnorm_cache = {}
dropout_cache = {}
layer_output = X
for i in range(self.num_layers):
W, b = self.params['W{0}'.format(i)], self.params['b{0}'.format(i)]
if i == self.num_layers-1:
scores, activation_cache[i] = affine_forward(layer_output, W, b)
else:
layer_activation_input, activation_cache[i] = affine_forward(layer_output, W, b)
if self.use_batchnorm:
layer_activation_input, batchnorm_cache[i] = batchnorm_forward(layer_activation_input,
self.params['gamma{0}'.format(i)],
self.params['beta{0}'.format(i)],
self.bn_params[i])
layer_output, relu_cache[i] = relu_forward(layer_activation_input)
if self.use_dropout:
layer_output, dropout_cache[i] = dropout_forward(layer_output, self.dropout_param)
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
layer_grad = scores
grad_x, grad_w, grad_b = None, None, None
adjust_loss_for_reg = lambda w: 0.5 * self.reg * (np.sum(np.square(w)))
for i in range(self.num_layers-1, -1, -1):
W, b = self.params['W{0}'.format(i)], self.params['b{0}'.format(i)]
if i == self.num_layers-1:
loss, layer_grad = softmax_loss(layer_grad, y)
grad_x, grad_w, grad_b = affine_backward(layer_grad, activation_cache[i])
else:
if self.use_dropout:
grad_x = dropout_backward(grad_x, dropout_cache[i])
layer_grad = relu_backward(grad_x, relu_cache[i])
if self.use_batchnorm:
layer_grad, grad_gamma, grad_beta = batchnorm_backward(layer_grad, batchnorm_cache[i])
grads['beta{0}'.format(i)] = grad_beta
grads['gamma{0}'.format(i)] = grad_gamma
grad_x, grad_w, grad_b = affine_backward(layer_grad, activation_cache[i])
grads['W{0}'.format(i)] = grad_w + self.reg * W
grads['b{0}'.format(i)] = grad_b
loss += adjust_loss_for_reg(W)
return loss, grads
|
lkuligin/cs231n | assignment1/classifiers/tests/test_knn.py | <reponame>lkuligin/cs231n
import numpy as np
import os
import sys
import unittest
sys.path.insert(0,'..')
sys.path.insert(0,'../../../utils')
import knn
import test_utils
class KNearestNeighborTest(test_utils.TestCaseWithParams):
def setUp(self):
self.classifier = knn.KNearestNeighbor()
self.classifier.train(np.array([[1,2,3,4],[2,3,4,5],[3,4,5,6]]), np.array([0,1,1]))
def test_compute_distances_two_loops(self):
dists = self.classifier.compute_distances_two_loops(self.kwargs['input'])
np.testing.assert_allclose(dists, self.kwargs['dists'])
def test_compute_distances_one_loop(self):
dists = self.classifier.compute_distances_one_loop(self.kwargs['input'])
np.testing.assert_allclose(dists, self.kwargs['dists'])
def test_compute_distances_no_loops(self):
dists = self.classifier.compute_distances_no_loops(self.kwargs['input'])
np.testing.assert_allclose(dists, self.kwargs['dists'])
def test_predict_labels(self):
pred = self.classifier.predict_labels(self.kwargs['dists'], 1)
np.testing.assert_allclose(pred, self.kwargs['pred_k1'])
pred = self.classifier.predict_labels(self.kwargs['dists'], 2)
np.testing.assert_allclose(pred, self.kwargs['pred_k2'])
if __name__ == '__main__':
suite = unittest.TestSuite()
test_case1 = {'dists': np.array([[2.,0,2], [6,4,2]]), 'input': np.array([[2,3,4,5], [4,5,6,7]]), 'pred_k1': np.array([1., 1]), 'pred_k2': np.array([0., 1])}
suite.addTest(test_utils.TestCaseWithParams.get_suite(KNearestNeighborTest, kwargs=test_case1))
unittest.TextTestRunner(verbosity=2).run(suite)
|
sclel016/ict_py | ict/RigolDG.py | <filename>ict/RigolDG.py
from ict.Interface import Interface
import numpy as np
import re
class AwgChannel:
"""
Class designed to interface with individual channels on the Rigol DG1062Z AWG
"""
def __init__(self, awg, ch_idx):
self.__awg = awg
self.ch_idx = ch_idx + 1
# ENABLE
@property
def enabled(self):
ret = self.__awg.query("OUTP%i?" % self.ch_idx)
if "OFF" in ret:
return 0
elif "ON" in ret:
return 1
else:
raise ValueError("Unexpected return string: %s", ret)
@enabled.setter
def enabled(self, val):
if val >= 1:
self.__awg.write("OUTP%i ON" % self.ch_idx)
elif val == 0:
self.__awg.write("OUTP%i OFF" % self.ch_idx)
# Channel Mode
@property
def mode(self):
return re.search("(\w+)\,", self.__awg.query(":SOUR%i:APPL?" % self.ch_idx)).group(1)
# SAMPLE RATE (Samp/s)
@property
def sample_rate(self):
ret_str = self.__awg.query(":SOUR%i:FUNC:ARB:SRAT?" % self.ch_idx)
return self.__awg.parse_sci(ret_str)
@sample_rate.setter
def sample_rate(self, val):
self.__awg.write(":SOUR%i:FUNC:ARB:SRAT %i" % (self.ch_idx, int(val)))
# Voltage Offset (V_DC)
@property
def v_off(self):
ret_str = self.__awg.query(":SOUR%i:VOLT:OFFS?" % self.ch_idx)
return self.__awg.parse_sci(ret_str)
@v_off.setter
def v_off(self, val):
self.__awg.write(":SOUR%i:VOLT:OFFS %f" % (self.ch_idx, val))
# Maximum Voltage(V_DC)
@property
def v_high(self):
ret_str = self.__awg.query(":SOUR%i:VOLT:HIGH?" % self.ch_idx)
return self.__awg.parse_sci(ret_str)
@v_high.setter
def v_high(self, val):
self.__awg.write(":SOUR%i:VOLT:HIGH %f" % (self.ch_idx, val))
# Minimum Voltage(V_DC)
@property
def v_low(self):
ret_str = self.__awg.query(":SOUR%i:VOLT:LOW?" % self.ch_idx)
return self.__awg.parse_sci(ret_str)
@v_low.setter
def v_low(self, val):
self.__awg.write(":SOUR%i:VOLT:LOW %f" % (self.ch_idx, val))
# Voltage Amplitude (V_pp)
@property
def amplitude(self):
ret_str = self.__awg.query(":SOUR%i:VOLT?" % self.ch_idx)
return self.__awg.parse_sci(ret_str)
@amplitude.setter
def amplitude(self, val):
self.__awg.write(":SOUR%i:VOLT %f" % (self.ch_idx, val))
def transfer_wave(self, wave, Fs):
""" Set the AWG to output a arbitrary waveform
:param
wave: Arbitrary wave samples (V)
Fs: Sample Rate (Samp/s)
:return:
"""
# Set channel to ARB mode
self.__awg.write(":SOUR%i:APPL:ARB" % self.ch_idx)
# Set Sample Rate
self.sample_rate = Fs
# Configure optimal voltage range
self.v_high = wave.max()
self.v_low = wave.min()
# Generate uint16 codes
codes = (wave - wave.min()) * (float(16383) / (wave.max() - wave.min()))
codes = np.round(codes)
# Send data
header_str = ':SOUR%i:TRAC:DATA:DAC16 VOLATILE,' % self.ch_idx
NUM_PTS_PER_WRITE = 16384
num_writes = int(np.ceil(codes.size / NUM_PTS_PER_WRITE))
for slc_idx in np.arange(0, num_writes):
if slc_idx == num_writes - 1:
print(slc_idx * NUM_PTS_PER_WRITE)
codes_slc = codes[(slc_idx * NUM_PTS_PER_WRITE):None]
self.__awg.write_binary_values(header_str + 'END,', codes_slc.astype("uint16"), datatype='H')
else:
codes_slc = codes[slc_idx * NUM_PTS_PER_WRITE:(slc_idx + 1) * NUM_PTS_PER_WRITE]
self.__awg.write_binary_values(header_str + 'CON,', codes_slc.astype("uint16"), datatype='H')
def set_sine(self, **kwargs):
""" Set the AWG to output a sine wave
:param
freq: Frequency (Hz)
vpp: Peak to Peak Voltage (V)
dc_offs: DC Voltage Offset (V)
phase: Phase Offset (Degrees)
:return:
"""
opt = dict()
opt['freq'] = kwargs.get('freq', 1e3)
opt['vpp'] = kwargs.get('vpp', 5)
opt['dc_offs'] = kwargs.get('dc_offs', 0)
opt['phase'] = kwargs.get('phase', 0)
self.__awg.write(":SOUR%i:APPL:SIN %f,%f,%f,%f" % (self.ch_idx, opt['freq'], opt['vpp'],
opt['dc_offs'], opt['phase']))
def set_square(self, **kwargs):
""" Set the AWG to output a square wave
:param
freq: Frequency (Hz)
vpp: Peak to Peak Voltage (V)
dc_offs: DC Voltage Offset (V)
phase: Phase Offset (Degrees)
:return:
"""
Opt = dict()
Opt['freq'] = kwargs.get('freq', 1e3)
Opt['vpp'] = kwargs.get('vpp', 5)
Opt['dc_offs'] = kwargs.get('dc_offs', 0)
Opt['phase'] = kwargs.get('phase', 0)
self.__awg.write(":SOUR%i:APPL:SQU %f,%f,%f,%f" % (self.ch_idx, Opt['freq'], Opt['vpp'],
Opt['dc_offs'], Opt['phase']))
def get_output(self):
""" Get info on the current transmission mode
:param
:return: Dictionary with
type: Type of waveform
freq: Frequency (Hz)
vpp: Peak to Peak Voltage (V)
dc_offs: DC Voltage Offset (V)
phase: Phase Offset (Degrees)
"""
ret_str = self.__awg.query("SOUR%i:APPL?" % self.ch_idx)
ret_str = ret_str.rstrip("\"\n")
ret_str = ret_str.lstrip("\"")
ret_list = ret_str.split(",")
ret_dict = dict()
ret_dict['type'] = ret_list[0]
ret_dict['freq'] = float(ret_list[1])
ret_dict['vpp'] = float(ret_list[2])
ret_dict['dc_offs'] = float(ret_list[3])
ret_dict['phase'] = float(ret_list[4])
return ret_dict
class RigolDG(Interface):
"""
Class designed to interface with individual channels on the Rigol DG1062Z AWG
"""
NUM_CHAN = 2
ch = []
def __init__(self, ip):
super().__init__(ip)
# self.inst.write_termination = '\n'
self.inst.chunk_size = 16e6 * 16 + 1e3
if "DG1022Z" in self.ident:
self.MAX_SAMPLE_RATE = int(20E6)
elif "DG1062Z" in self.ident:
self.MAX_SAMPLE_RATE = int(60E6)
for ii in range(self.NUM_CHAN):
self.ch.append(AwgChannel(self, ii))
# Couple Channels
@property
def couple(self):
ret_str = self.query(":COUP?")
if "OFF" in ret_str:
return 0
elif "ON" in ret_str:
return 1
else:
raise ValueError("Unexpected return string: %s", ret_str)
@couple.setter
def couple(self, val):
if val >= 1:
self.write("COUP ON")
elif val == 0:
self.write("COUP OFF")
|
sclel016/ict_py | ict/__init__.py | from .Interface import Interface
from .RigolDG import RigolDG
from .SiglentSDS import SiglentSDS
from .utilities import Waveform
# from .utilities import |
sclel016/ict_py | ict/SiglentSDS.py | from ict.Interface import Interface
from ict.utilities import Waveform
import numpy as np
import re
class ScopeChannel:
"""
Class designed to interface with individual channels on the Siglent SDS 1202X-E Oscilloscope
"""
def __init__(self,scope,ch_idx):
self._scope = scope
self.ch_idx = ch_idx + 1
# Enable channel
@property
def enabled(self):
ret = self._scope.query("C%i:TRA?" % self.ch_idx)
if "OFF" in ret:
return 0
elif "ON" in ret:
return 1
else:
raise ValueError("Unexpected return string: %s", ret)
@enabled.setter
def enabled(self,val):
if val>=1:
self._scope.write("C%i:TRA ON" % self.ch_idx)
elif val==0:
self._scope.write("C%i:TRA OFF"% self.ch_idx)
# DC Offset (V_DC)
@property
def v_off(self):
ret_str = self._scope.query("C%i:OFST?" % self.ch_idx)
return self._scope.parse_sci(ret_str)
@v_off.setter
def v_off(self,val):
self._scope.write("C%i:OFST %f" % (self.ch_idx, val))
# Channel Skew (+-100 NS)
@property
def skew(self):
ret_str = self._scope.query("C%i:SKEW?" % self.ch_idx)
return self._scope.parse_sci(ret_str)
@skew.setter
def skew(self,val):
self._scope.write("C%i:SKEW %E" % (self.ch_idx, val))
# V_DIV (V_pp)
@property
def v_div(self):
ret_str = self._scope.query("C%i:VOLT_DIV?" % self.ch_idx)
return self._scope.parse_sci(ret_str)
@v_div.setter
def v_div(self,val):
self._scope.write("C%i:VOLT_DIV %E" % (self.ch_idx, val))
@property
def all(self):
ret_str = self._scope.query("C%i:PAVA? ALL" % self.ch_idx)
# Strip
ret_str = ret_str.lstrip('C%i:PAVA ' % self.ch_idx)
ret_str = ret_str.rstrip('\n')
# Capture name and value
name_expr = r"(?P<name>[A-Z]{3,})"
val_expr = r"(?P<val>[+-]?\d+\.\d+[eE]?[+-]?\d+)"
r = re.compile(name_expr+","+val_expr)
# Convert to dict() of floats
ret_dict = dict(r.findall(ret_str))
for key in ret_dict:
ret_dict[key] = float(ret_dict[key])
return ret_dict
def _get_waveform(self):
# self._scope.write("C%i:WF? DAT2" % self.ch_idx)
wav_resp = self._scope.query_binary_values("C%i:WF? DAT2" % self.ch_idx, datatype='b')
return np.asarray(wav_resp)
def get_waveform(self):
wave = Waveform
# Y axis
wave.codes = self._get_waveform()
wave.v_off = self.v_off
wave.v_div = self.v_div
wave.y_values = wave.codes*(wave.v_div / 25)-wave.v_off
wave.y_units = "V"
wave.y_name = "Samples"
# X axis
wave.sample_rate = self._scope.sample_rate
wave.trg_offset = self._scope.trg_offset
wave.x_values = np.arange(0,wave.codes.size) * (1/wave.sample_rate) + wave.trg_offset
wave.x_units = "s"
wave.x_name = "Time"
return wave
class SiglentSDS(Interface):
"""
Class designed to interface with the Siglent SDS 1202X-E Oscilloscope
"""
NUM_CHAN = 2
ch = []
def __init__(self, ip):
super().__init__(ip)
self.inst.chunk_size = 14e6 * 8 + 1e3 # 14Mpts * 8bits/pt + overhead
for ii in range(self.NUM_CHAN):
self.ch.append(ScopeChannel(self, ii))
# WaveForm SetUp
@property
def wfsu(self):
ret_str = self._scope.query("WFSU?")
return ret_str.split()
# SAmple RAte (Samp/s)
@property
def sample_rate(self):
ret_str = self.query("SARA?")
return self.parse_sci(ret_str)
# Trigger Offset (s)
@property
def trg_offset(self):
ret_str = self.query("TRDL?")
return self.parse_sci(ret_str)
@trg_offset.setter
def trg_offset(self,val):
self.write("TRDL %E" % val)
# Time Division (s)
@property
def time_div(self):
ret_str = self.query("TDIV?")
return self.parse_sci(ret_str)
@time_div.setter
def time_div(self, val):
self.write("TDIV %E" % val)
# def measure_phase(self):
# """ Measures the phase delay between CH1 and CH2
#
# :return: Phase Delay (Degrees)
# """
#
# # Set measurement type
# self.write("PACU 1, PHASE, C1-C2")
#
# ret_str = self.query("PAVA? CUST1")
#
# print(ret_str)
#
# return self.parse_sci(ret_str)
def get_phase_delay(self):
""" Measures the phase delay between CH1 and CH2
:return: Phase Delay (Degrees)
"""
ret_str = self.query("C1-C2:MEAD? PHA")
expr = "PHA,(?P<val>[+-]?\d+(.\d+)?)degree"
return float(re.search(expr,ret_str).group('val'))
# return self.parse_sci(ret_str)
def get_time_delay(self):
""" Measures the time delay between CH1 and CH2
:return: Phase Delay (Degrees)
"""
ret_str = self.query("C1-C2:MEAD? FRR")
return self.parse_sci(ret_str)
|
sclel016/ict_py | ict/Interface.py | <reponame>sclel016/ict_py<gh_stars>0
import pyvisa
import re
class Interface:
inst = ''
ip = ''
ident = ''
rm = pyvisa.ResourceManager()
def __init__(self,ip):
self.ip = ip
self.inst = self.rm.open_resource('TCPIP0::%s::INSTR' % self.ip)
self.ident = self.inst.query("*IDN?")
def query(self,cmd):
return self.inst.query(cmd)
def write(self,cmd):
print(cmd)
self.inst.write(cmd)
def read(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def write_binary_values(self,*args,**kwargs):
self.inst.write_binary_values(*args,**kwargs)
def read(self,*args,**kwargs):
return self.inst.read(*args,**kwargs)
def query_binary_values(self,*args,**kwargs):
return self.inst.query_binary_values(*args,**kwargs)
def query_ascii_values(self,*args,**kwargs):
return self.inst.query_ascii_values(*args,**kwargs)
def read_raw(self,*args,**kwargs):
return self.inst.read_raw(*args,**kwargs)
def read_bytes(self,*args,**kwargs):
return self.inst.read_bytes(*args,**kwargs)
def parse_sci(self,in_str):
expr = r"[+-]?\d+\.\d+([eE][+-]?\d+)?"
return float(re.search(expr, in_str).group())
|
sclel016/ict_py | ict/utilities.py | <gh_stars>0
class Waveform:
x_values = []
x_units = ''
x_name = ''
y_values = []
y_units = ''
y_name = ''
user_field = ''
def __init__(self):
pass
@property
def x_label(self):
str_try = "%s (%s)" % (self.x_name, self.x_units)
print(str_try)
return str_try
@property
def y_label(self):
return "%s (%s)" % (self.y_name, self.y_units)
def freq_response(scope,awg,freqs):
# Setup
scope.ch[0].enable = 1
scope.ch[1].enable = 1
awg.ch[0].enable = 1
awg.ch[0].enable = 1
# scope.t_div
pass
|
sclel016/ict_py | main.py |
from ict import RigolDG
from ict import SiglentSDS
from ict import Waveform
import matplotlib.pyplot as plt
import numpy as np
# if __name__ == '__main__':
#%%
awg = RigolDG('192.168.1.14')
#
# awg.ch[0].set_sine(freq = 1e3,vpp=4)
# awg.ch[0].set_square(freq = 1e4,vpp=4)
# #
# A = 1
# N = 20e3
# Fs = 20e6
# f = 993
# awg.ch[0].sample_rate = Fs
# # # # test = np.sin(np.arange(0,10))
# # # # awg.ch[0].transfer_wave(test)
# awg.ch[0].transfer_wave(A*np.sin(2*np.pi * np.arange(0, N) * Fs / f),Fs)
# # awg.ch[0].transfer_wave(np.array([1,2,3,2,1,-1,-4]),Fs)
#%%
scope = SiglentSDS('192.168.1.16')
scope.get_phase_delay()
# scope.time_div
# scope.time_div = 100e-6
# scope.measure_phase()
print(scope.ch[0].all)
#
# wave = scope.ch[0].get_waveform()
#
# plt.plot(wave.x_values, wave.y_values)
#%%
|
Atrusberlin/Learning | unittesting/python/TestCaseTest.py | <reponame>Atrusberlin/Learning<filename>unittesting/python/TestCaseTest.py
from WasRun import WasRun
from TestCase import *
from TestSuite import *
class TestCaseTest(TestCase):
def setUp(self):
self.result= TestResult()
def testTemplateMethod(self):
test= WasRun("testMethod")
test.run(self.result)
#print(self.test.log)
assert("setUp testMethod tearDown " == test.log)
def testResult(self):
test= WasRun("testMethod")
test.run(self.result)
#print(result.summary())
assert("1 run, 0 failed" == self.result.summary())
def testFailedResult(self):
test= WasRun("testBrokenMethod")
test.run(self.result)
assert("1 run, 1 failed", result.summary())
def testFailedResultFormatting(self):
self.result.testStarted()
self.result.testFailed()
assert("1 run, 1 failed" == self.result.summary())
def testSuite(self):
suite= TestSuite()
suite.add(WasRun("testMethod"))
suite.add(WasRun("testBrokenMethod"))
suite.run(self.result)
assert("2 run, 1 failed" == self.result.summary())
suite= TestSuite()
suite.add(TestCaseTest("testTemplateMethod"))
suite.add(TestCaseTest("testResult"))
suite.add(TestCaseTest("testFailedResultFormatting"))
suite.add(TestCaseTest("testFailedResult"))
suite.add(TestCaseTest("testSuite"))
result= TestResult()
suite.run(result)
print (result.summary())
|
Atrusberlin/Learning | unittesting/python/TestCase.py | class TestCase:
def __init__(self, name):
self.name= name
def setUp(self):
pass
def run(self, result):
result.testStarted()
self.setUp()
try:
method= getattr(self, self.name)
method()
except:
result.testFailed()
self.tearDown()
def tearDown(self):
pass
class TestResult:
def __init__(self):
self.runCount= 0
self.errorCount= 0
def testStarted(self):
self.runCount= self.runCount + 1
def testFailed(self):
self.errorCount= self.errorCount + 1
def summary(self):
return "%d run, %d failed" % (self.runCount, self.errorCount) |
SubhamRajgaria/Operating-Systems | Assn5/plot.py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
with open("instructions2.txt") as f:
data = f.read()
data = data.split('\n')
x = [row.split(' ')[0] for row in data[0:10000]]
y = [row.split(' ')[1] for row in data[0:10000]]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("References ")
ax1.set_xlabel('Time')
ax1.set_ylabel('Page No')
ax1.plot(x,y,'ro', label='The page reference')
leg = ax1.legend()
plt.show()
fig.savefig("tenk.png") |
shrikant9867/mycfo | mycfo/checklist/doctype/checklist/checklist.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import itertools
from frappe.model.document import Document
class Checklist(Document):
pass
# def validate(self):
# self.build_notification()
# self.make_todo()
# def build_notification(self):
# if self.task:
# for t in self.task:
# print t.assignee
# user_list = frappe.db.sql("""select t1.email from `tabUser` t1,`tabUserRole` t2
# where t1.name = t2.parent and t2.role = '{0}'""".format(t.assignee),as_list =1)
# chain = itertools.chain(*user_list)
# user = list(chain)
# self.notify_employee(user,"Checklist Created","Checklist Assigned")
# def notify_employee(self,receiver,subj,msg):
# """
# send mail notification
# """
# frappe.sendmail(receiver, subject=subj, message =msg)
|
shrikant9867/mycfo | mycfo/hooks.py | <reponame>shrikant9867/mycfo
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
app_name = "mycfo"
app_title = "mycfo"
app_publisher = "Indictrans"
app_description = "Financial Service"
app_icon = "icon-th"
app_color = "grey"
app_email = "<EMAIL>"
app_version = "0.0.1"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = ["/assets/mycfo/css/jquery.rateyo.min.css", "/assets/mycfo/css/mycfo.css", "/assets/mycfo/css/select2.min.css"]
app_include_js = ["/assets/mycfo/js/jquery.rateyo.min.js","/assets/mycfo/js/jquery.twbsPagination.min.js", "assets/js/mycfo_report.min.js", "/assets/mycfo/js/select2.full.js"]
# include js, css files in header of web template
# web_include_css = "/assets/mycfo/css/mycfo.css"
# web_include_js = "/assets/mycfo/js/mycfo.js"
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "mycfo.install.before_install"
# after_install = "mycfo.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "mycfo.notifications.get_notification_config"
on_session_creation = "mycfo.mycfo_utils.make_login_log"
on_logout = "mycfo.mycfo_utils.update_login_log"
fixtures = ['Custom Field', 'Property Setter', "Role","Discussion Category", "Print Format"]
# Permissions
# -----------
# Permissions evaluated in scripted ways
permission_query_conditions = {
"IP File": "mycfo.ip_library.doctype.ip_file.ip_file.get_permission_query_conditions",
"IP Approver":"mycfo.ip_library.doctype.ip_approver.ip_approver.get_permission_query_conditions",
"IP Download Approval":"mycfo.ip_library.doctype.ip_download_approval.ip_download_approval.get_permission_query_conditions",
"Customer":"mycfo.mycfo.doctype.project_commercial.project_commercial.get_permission_query_conditions_for_customer",
"Project Commercial":"mycfo.mycfo.doctype.project_commercial.project_commercial.get_permission_query_conditions_for_project",
"Operational Matrix":"mycfo.mycfo.doctype.project_commercial.project_commercial.get_permission_query_conditions_for_om",
"Checklist Task":"mycfo.checklist.doctype.checklist_task.checklist_task.get_permission_query_conditions",
"Answer Sheet":"mycfo.trainings.doctype.answer_sheet.answer_sheet.get_permission_query_conditions",
"Assessment":"mycfo.trainings.doctype.assessment.assessment.get_permission_query_conditions",
"Training":"mycfo.trainings.doctype.training.training.get_permission_query_conditions",
"KPI":"mycfo.mycfo.doctype.project_commercial.project_commercial.get_permission_query_conditions_for_kpi",
}
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
scheduler_events = {
"hourly": [
"mycfo.ip_library.scheduler_tasks.update_ip_download_approval_status"
],
"daily":[
"mycfo.ip_library.scheduler_tasks.send_notification_for_expiry_of_document",
"mycfo.ip_library.scheduler_tasks.archive_document",
# "mycfo.discussion_forum.doctype.discussion_topic.discussion_topic.mail_topic_list",
"mycfo.mycfo.doctype.el_sign_off_details.el_sign_off_details.send_notification_to_el_sign_off"
]
}
# permission_query_conditions = {
# "Customer":"mycfo.mycfo.doctype.project_commercial.project_commercial.get_permission_query_conditions"
# }
# Testing
# -------
# before_tests = "mycfo.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "mycfo.event.get_events"
# }
|
shrikant9867/mycfo | mycfo/checklist/doctype/checklist_task/checklist_task.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import itertools
from frappe import _
import json
import datetime
from datetime import date
from datetime import datetime, timedelta
from frappe.utils import getdate, date_diff, add_days, cstr
from frappe.model.document import Document
from frappe.model.naming import make_autoname
class ChecklistTask(Document):
def validate(self):
self.validate_status()
self.validate_dates()
self.sync_tasks()
self.ct_reassign = []
def autoname(self):
if self.checklist_task:
self.name = make_autoname(self.checklist_task + '-' + '.#')
else:
self.name = make_autoname('CLT' + '.####')
def onload(self):
"""Load project tasks for quick view"""
task_id_list = []
if self.ct_depend_task:
for i in self.ct_depend_task:
task_id_list.append(i.task)
if self.depends_on_task:
depends_task = frappe.db.get_values("Checklist Task",
{"title":self.depends_on_task,
"expected_start_date":self.expected_start_date,
"project":self.project},
["status","name","title"], as_dict=True)
if depends_task[0]['name'] not in task_id_list:
self.append("ct_depend_task", {
"task": depends_task[0]['name'],
"status": depends_task[0]['status'],
"title": depends_task[0]['title']
})
if not self.get("ct_reassign"):
for task in self.get_tasks():
self.append("ct_reassign", {
"task_name": task.title,
"status": task.status,
"start_date": task.expected_start_date,
"end_date": task.expected_end_date,
"task_id": task.name,
"des": task.des,
"assignee": task.assignee,
"user":task.user,
"actual_end_date":task.end_date,
"count":task.count,
"tat":task.tat
# "actual_time":task.actual_time
})
self.validate_depends_on_status()
def validate_depends_on_status(self):
for d in self.get("ct_depend_task"):
depends_task = frappe.db.get_values("Checklist Task", {"name":d.task}, ["status"], as_dict=True)
if depends_task:
d.status = depends_task[0]["status"]
def get_tasks(self):
return frappe.get_all("Checklist Task", "*", {"project": self.project,"checklist_task":self.name}, order_by="expected_start_date asc")
def sync_tasks(self):
if self.flags.dont_sync_tasks: return
task_names = []
for t in self.ct_reassign:
if t.task_id:
task = frappe.get_doc("Checklist Task", t.task_id)
else:
task = frappe.new_doc("Checklist Task")
task.project = self.project
task.checklist_task=self.name
task.update({
# ct:cr
"title": t.task_name,
"status": t.status,
"expected_start_date": t.start_date,
"expected_end_date": t.end_date,
"des": t.des,
"assignee":t.assignee,
"user":t.assignee,
"to_be_processed_for":self.to_be_processed_for,
"process_description":self.process_description,
"checklist_name":self.checklist_name,
"tat":t.tat
})
task.flags.ignore_links = True
task.flags.from_project = True
task.save(ignore_permissions = True)
t.task_id = task.name
def on_submit(self):
if(self.status != "Closed"):
frappe.throw(_("Task Status is Not Closed So Cannot Submit Task."))
self.get_status_of_all()
self.get_task_closed()
def validate_dates(self):
if self.expected_start_date and self.expected_end_date and getdate(self.expected_start_date) > getdate(self.expected_end_date):
frappe.throw(_("'Expected Start Date' can not be greater than 'Expected End Date' For Task").format(self.name))
if self.actual_start_date and self.actual_end_date and getdate(self.actual_start_date) > getdate(self.actual_end_date):
frappe.throw(_("'Actual Start Date' can not be greater than 'Actual End Date'"))
def validate_status(self):
if self.status!=self.get_db_value("status") and self.status == "Closed":
for d in self.ct_depend_task:
if frappe.db.get_value("Checklist Task", d.task, "status") != "Closed":
frappe.throw(_("Cannot close task as its dependant task {0} is not closed.").format(d.task))
self.status = "Open"
# def update_time(self):
# tl = frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date,
# sum(hours) as time from `tabChecklist Time Log` where task = %s and docstatus=1"""
# ,self.name, as_dict=1)[0]
# if self.status == "Open":
# self.status = "WIP"
# self.actual_time= tl.time
# self.actual_start_date= tl.start_date
# self.actual_end_date= tl.end_date
def get_status_of_all(self):
"""
1.on_submit of task update checklist requisition
"""
checklist_requisition = frappe.get_doc("Checklist Requisition",self.project)
status_list = []
for task in checklist_requisition.cr_task:
status_list.append(task.status)
if(status_list.count("Closed") == len(checklist_requisition.cr_task)):
checklist_requisition.checklist_status = "Closed"
frappe.db.set_value("Checklist Requisition",self.project,"checklist_status","Closed")
Date = datetime.now()
checklist_requisition.end_date == Date.now()
frappe.db.set_value("Checklist Requisition",self.project,"end_date",Date.now())
tot_holiday = frappe.db.sql("""select count(*) from `tabHoliday List` h1,`tabHoliday` h2
where h2.parent = h1.name and h1.name = 'Mycfo' and h2.holiday_date >= %s and h2.holiday_date <= %s""",(checklist_requisition.expected_start_date,Date.now()))
d1 = Date
d = checklist_requisition.expected_start_date
d2 = datetime.combine(d, datetime.min.time())
actual_time = abs((d2 - d1).days) - 1 - tot_holiday[0][0]
checklist_requisition.count = actual_time
frappe.db.set_value("Checklist Requisition",self.project,"count",actual_time)
def get_task_closed(self):
"""
on_submit of each task
For update Field name Task Closed IN checklist requisition
"""
checklist_requisition = frappe.get_doc("Checklist Requisition",self.project)
counter = len(checklist_requisition.cr_task)
closed_count = len(filter(lambda x: x.status=="Closed",checklist_requisition.cr_task))
closed_task = "{1} / {0} Closed".format(counter,closed_count)
checklist_requisition.task_closed = closed_task
frappe.db.set_value("Checklist Requisition",self.project,"task_closed",closed_task)
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if not user == "Administrator":
return """(`tabChecklist Task`.user = '{0}')""".format(user)
@frappe.whitelist()
def valid_hours(doc):
current_doc = json.loads(doc)
if current_doc.get('expected_start_date') and current_doc.get('end_date'):
from_date = datetime.strptime(current_doc.get('expected_start_date'), '%Y-%m-%d')
to_date = datetime.strptime(current_doc.get('end_date'), '%Y-%m-%d')
# to_date = datetime.strptime(current_doc.get('end_date')[:-7], '%Y-%m-%d %H:%M:%S')
holiday_count = frappe.db.sql("""select count(*) from `tabHoliday List` h1,`tabHoliday` h2
where h2.parent = h1.name and h1.name = 'Mycfo' and h2.holiday_date >= %s and h2.holiday_date <= %s""",(from_date,to_date),as_list=1)
return holiday_count[0][0] |
shrikant9867/mycfo | mycfo/mycfo/doctype/financial_data/financial_data.py | <reponame>shrikant9867/mycfo
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FinancialData(Document):
def validate(self):
#self.validate_fund_name()
self.validate_shareholders_name()
self.validate_yearlydata_against_customer()
if self.is_pe_vc == 'Yes':
self.validate_fund_type()
# self.validate_fiscal_year()
def validate_fund_name(self):
fund_list = []
if self.get('name_of_fund'):
for d in self.get('name_of_fund'):
if d.fund_type:
if not d.name_of_fund:
frappe.msgprint("Please specify Fund Name against the Fund Type '%s'"%d.fund_type,raise_exception=1)
if d.fund_type not in fund_list:
fund_list.append(d.fund_type)
else:
frappe.msgprint("No duplicate fund type is allowed",raise_exception=1)
break
def validate_shareholders_name(self):
shareholder_list = []
if self.get('shareholders_detail'):
for d in self.get('shareholders_detail'):
if d.shareholder_name not in shareholder_list:
shareholder_list.append(d.shareholder_name)
else:
frappe.msgprint("No duplicate shareholder name is allowed",raise_exception=1)
break
def validate_yearlydata_against_customer(self):
if frappe.db.sql("""select name from `tabFinancial Data` where name!='%s' and customer='%s' and financial_year='%s'"""%(self.name,self.customer,self.financial_year)):
name = frappe.db.sql("""select name from `tabFinancial Data` where name!='%s' and customer='%s' and financial_year='%s'"""%(self.name,self.customer,self.financial_year),as_list=1)
frappe.msgprint(" Entry for Financial Year '%s' against the Customer = '%s' is already created"%(self.financial_year,self.customer),raise_exception=1)
def validate_fund_type(self):
if not len(self.get('name_of_fund'))>0:
frappe.msgprint("At least one Fund Details entry is mandatory in Fund Child table.",raise_exception=1)
def validate_fiscal_year(self):
#pass
fiscal_year = frappe.db.sql("""select value from `tabSingles` where doctype='Global Defaults' and field='current_fiscal_year'""",as_list=1)
if fiscal_year:
if self.financial_year >= fiscal_year[0][0]:
frappe.msgprint("No permission to create Financial Data for Current and Future Fiscal Year also.")
def get_shareholders(doctype, txt, searchfield, start, page_len, filters):
# from frappe.desk.reportview import get_match_cond
# txt = "%{}%".format(txt)
return frappe.db.sql("""select distinct f.name
from `tabFFWW` f , `tabFFWW Designation` d
where f.docstatus < 2
and f.contact is not null
and f.name = d.parent
and f.name in (select parent from `tabFFWW Designation` where designation='Share Holder')""",as_list=1)
def get_promoters(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
txt = "%{}%".format(txt)
return frappe.db.sql("""select distinct f.name
from `tabFFWW` f , `tabFFWW Designation` d
where f.docstatus < 2
and f.contact is not null
and f.name = d.parent
and f.name in (select parent from `tabFFWW Designation` where designation='Promoter')""") |
shrikant9867/mycfo | mycfo/kpi/report/resource_pool/resource_pool.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
columns, data = [], []
columns = get_colums()
data = get_data(filters)
return columns, data
def get_data(filters):
if 1==1:
result = frappe.db.sql("""select emp,skill,sub_skill,
none_field,beginner,imtermediatory,expert,emp_status
from (select
CASE WHEN skill !='' then (select employee_name from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
else null\
END AS emp,\
CASE WHEN skill !='' then (select industry from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
else null\
END AS ind,\
skill,sub_skill,none_field,beginner,imtermediatory,expert,
CASE WHEN skill !='' then (select employee from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
else null\
END AS emp_s,
CASE WHEN skill !='' then (select status from `tabEmployee` where name=emp_s)\
else null\
END AS emp_status\
from `tabSkill Mapping Details` where skill is not null) as innerTable order by emp,skill,sub_skill""",as_list=1,debug=1)
# result.append([])
# result.append(["Total Item",str(total_item[0][0])])
final_result_active = []
final_result_left = []
for i in result:
if i[7] == "Active":
final_result_active.append(i)
if i[7] == "Left":
final_result_left.append(i)
if filters.get("status") == "Active":
return final_result_active
elif filters.get("status") == "Left":
return final_result_left
# return final_result
else:
final_result = []
return final_result
def get_total_item():
return "11"
def get_colums():
columns = ["Employee::120"]+["Skill Matrix fc00:db20:35b:7399::5"]+ ["Skill Matrix fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"] +["None::130"]\
+["Beginner::130"]+ ["Intermediatory::130"] +["Expert::130"] + ["Employee Status::130"]
return columns
# elif filters.get("status") == "Left":
# print filters.get("status")
# print "in left"
# result = frappe.db.sql("""select * from (select
# CASE WHEN skill !='' then (select employee_name from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)
# else "" END AS emp,
# CASE WHEN skill !='' then (select industry from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)
# else ""
# END AS ind,
# skill as "Skill Matrix fdf8:f53e:61e4::18",
# sub_skill as "Skill Matrix fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
# none_field as "None::130",
# beginner as "Beginner::130",
# imtermediatory as "Intermediatory::130",
# expert as "Expert::130",
# CASE WHEN skill !='' then (select employee from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)
# else "null"
# END AS emp_s,
# CASE WHEN skill !='' then (select status from `tabEmployee` where name=emp_s)
# else "null"
# END AS emp_status
# from `tabSkill Mapping Details` where skill is not null
# order by sub_skill) as innerTable where emp_status = 'Left'""",as_list=1,debug=1)
# if filters.get("status") == "Left":
# result = frappe.db.sql("""select * from (select
# CASE WHEN skill !='' then (select employee_name from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
# else null\
# END AS emp,\
# CASE WHEN skill !='' then (select industry from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
# else null\
# END AS ind,\
# skill as "Skill Matrix fdf8:f53e:61e4::18",
# sub_skill as "Skill Matrix fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b",
# none_field as "None::130",
# beginner as "Beginner::130",
# imtermediatory as "Intermediatory::130",
# expert as "Expert::130",
# CASE WHEN skill !='' then (select employee from `tabSkill Mapping` where name=`tabSkill Mapping Details`.parent)\
# else null\
# END AS emp_s,\
# CASE WHEN skill !='' then (select status from `tabEmployee` where name=emp_s)\
# else null\
# END AS emp_status
# from `tabSkill Mapping Details` where skill is not null
# order by sub_skill) as innerTable where emp_status = 'Left'""",as_list=1,debug=1)
# result = frappe.db.sql("""select
# item_code "Item:Link/Item:150",
# inventory_maintained_by as "Inventory Maintained By::100",
# c_barcode as "Barcode::100",
# shortcode as "Shortcode::100",
# category as "Category::120",
# sub_category as "Sub Category::120",
# brand as "Brand::80",
# c.mrp as "MRP:Currency:100",
# c.retail_rate as "Retail Price:Currency:100",
# c.wholesale_rate as "Wholesale Price:Currency:100"
# from
# `tabItem`b LEFT JOIN `tabItem Master Rate`c ON c.parent=b.name
# order by item_code""",as_list=1,debug=1)
# def get_conditions(filters):
# cond = ''
# if filters.get('checklist_requisition') and filters.get('status') and filters.get('user'):
# cond = "where project = '{0}' and status = '{1}' and user = '{2}'".format(filters.get('checklist_requisition'),filters.get('status'),filters.get('user'))
# elif filters.get('checklist_requisition') and filters.get('status'):
# cond = "where project = '{0}' and status = '{1}'".format(filters.get('checklist_requisition'),filters.get('status'))
# elif filters.get('checklist_requisition') and filters.get('user'):
# cond = "where project = '{0}' and user = '{1}'".format(filters.get('checklist_requisition'),filters.get('user'))
# elif filters.get('status') and filters.get('user'):
# cond = "where status = '{0}' and user = '{1}'".format(filters.get('status'),filters.get('user'))
# elif filters.get('user'):
# cond = "where user = '{0}'".format(filters.get('user'))
# elif filters.get('checklist_requisition'):
# cond = "where project = '{0}' ".format(filters.get("checklist_requisition"))
# elif filters.get('status'):
# cond = "where status='{0}'".format(filters.get("status"))
# return cond |
shrikant9867/mycfo | mycfo/discussion_forum/page/discussion_forum/discussion_forum.py | <filename>mycfo/discussion_forum/page/discussion_forum/discussion_forum.py
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.website.render import resolve_path
from frappe import _
from frappe.website.render import clear_cache
from frappe.utils import today, cint, global_date_format, get_fullname, strip_html_tags,flt
from frappe.website.utils import find_first_image
from markdown2 import markdown
import datetime
import math
from mycfo.mycfo_utils import get_central_delivery
from frappe.utils import get_url
STANDARD_USERS = ("Guest", "Administrator")
no_cache = 1
no_sitemap = 1
@frappe.whitelist(allow_guest=True)
def get_data(category=None,user=None,assigned_to_me=None,page_no=0,limit=3):
"""Returns processed HTML page for a standard listing."""
conditions = []
assign_condition = []
todo_owner = ''
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
#next_start = cint(limit_start) + cint(limit_page_length)
if user:
conditions.append('t1.post_by="%s"' % frappe.db.escape(user))
if category:
conditions.append('t1.blog_category="%s"' % frappe.db.escape(category))
if assigned_to_me:
assign_condition.append('left join `tabToDo`t3 on t1.name = t3.reference_name ')
todo_owner = ' and t3.owner="%s" '% frappe.db.escape(frappe.session.user)
limit_query = " limit %(start)s offset %(page_len)s"%{"start": limit, "page_len":offset }
query = """\
select
distinct t1.name,t1.title, t1.blog_category, t1.published_on,
t1.published_on as creation,
ifnull(t1.intro, t1.content) as content,
t2.employee_name,t1.post_by,
(select count(name) from `tabComment` where
comment_doctype='Discussion Topic' and comment_docname=t1.name and comment_type="Comment") as comments
from `tabDiscussion Topic` t1 left join
`tabEmployee` t2
on t1.post_by = t2.name
{assign_condition}
where ifnull(t1.published,0)=1
{condition} {to_do_and}
order by published_on desc, name asc""".format(
condition= (" and " + " and ".join(conditions)) if conditions else "",
to_do_and = todo_owner,
assign_condition = (" and ".join(assign_condition)) if assign_condition else "",
)
posts = frappe.db.sql(query+ limit_query, as_dict=1)
for post in posts:
post.published = global_date_format(post.creation)
post.content = strip_html_tags(post.content[:140])
post.assigned = check_if_assigned(post)
if not post.comments:
post.comment_text = _('No comments yet')
elif post.comments==1:
post.comment_text = _('1 comment')
else:
post.comment_text = _('{0} comments').format(str(post.comments))
total_records = get_total_topics(query)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
return posts,total_pages,int(page_no)+1,paginate if posts else {}
def check_if_assigned(post):
assigned = frappe.db.get_value("ToDo",
{"owner":frappe.session.user,"reference_type":"Discussion Topic","reference_name":post.name},"name")
return 1 if assigned else 0
def get_total_topics(query):
executable_query = frappe.db.sql(query, as_list=1)
return len([topic for topic in executable_query if topic])
# def get_total_topics(conditions):
# condition = (" and " + " and ".join(conditions)) if conditions else ""
# return frappe.db.sql("""select count(*) from `tabDiscussion Topic` as t1
# where ifnull(t1.published,0)=1 {0}""".format(condition),as_list=1)[0][0] or 0
@frappe.whitelist(allow_guest=True)
def get_post(topic_name):
topic = frappe.get_doc("Discussion Topic",topic_name).as_dict()
topic.update({
"comment_list":get_comments(topic_name),
"employee_name":frappe.db.get_value("Employee",topic.post_by,"employee_name")
})
return topic
@frappe.whitelist(allow_guest=True)
def get_comments(topic_name,page_no=0,is_sorted='false', limit=3):
if is_sorted=="true":
comment_list = get_sorted_comment_list("Discussion Topic",topic_name,page_no,limit)
else:
comment_list = get_comment_list("Discussion Topic",topic_name,page_no,limit)
total_records = get_comment_count(topic_name)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
page_no = int(page_no) + 1
for comment in comment_list:
ratings = get_rating_details(comment)
comment["creation"] = comment.creation.strftime('%d-%m-%Y,%I:%M %p')
comment.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating"),
"no_of_users":ratings.get("number_of_users"),
"get_attachments": get_attachments("Comment",comment['name']) ,
"comment": comment['comment'].replace("\n","<br>")
})
print frappe.request.url
return comment_list,total_pages,page_no,paginate,is_sorted
@frappe.whitelist(allow_guest=True)
def get_attachments(dt, dn):
print "in atachment"
return frappe.get_all("File", fields=["name", "file_name", "file_url"],
filters = {"attached_to_name": dn, "attached_to_doctype": dt})
@frappe.whitelist(allow_guest=True)
def sort_comments(topic_name,page_no=0,limit=3):
comment_list = get_sorted_comment_list("Discussion Topic",topic_name,page_no,limit)
total_records = get_comment_count(topic_name)
paginate = True if total_records > limit else False
total_pages = math.ceil(total_records/flt(limit))
page_no = int(page_no) + 1
for comment in comment_list:
ratings = get_rating_details(comment)
comment["creation"] = comment.creation.strftime('%d-%m-%Y,%I:%M %p')
comment.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating"),
"no_of_users":ratings.get("number_of_users"),
"get_attachments": get_attachments("Comment",comment['name'])
})
comment_list.sort(key=lambda x: x['average_rating'],reverse=True)
return comment_list,total_pages,page_no,paginate
def get_comment_count(topic_name):
return frappe.get_list("Comment",fields=["count(*)"],
filters={"comment_type":"Comment","comment_docname":topic_name},as_list=1,ignore_permissions=1)[0][0] or 0
def get_sorted_comment_list(doctype, name,page_no,limit):
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
return frappe.db.sql("""select
name,comment, comment_by_fullname, creation, comment_by, name as cname,
CASE WHEN 5!=6 then (select avg(ratings) from `tabTopic Ratings` where comment=cname)
ELSE " "
END AS ratings
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by ratings desc limit %s offset %s""",(doctype,name,limit,offset), as_dict=1) or []
def get_comment_list(doctype, name,page_no,limit):
if page_no:
offset = (cint(page_no) * cint(limit))
else:
offset = 0
return frappe.db.sql("""select
name,comment, comment_by_fullname, creation, comment_by
from `tabComment` where comment_doctype=%s
and ifnull(comment_type, "Comment")="Comment"
and comment_docname=%s order by creation desc limit %s offset %s""",(doctype,name,limit,offset), as_dict=1) or []
def get_rating_details(comment):
ratings = {}
if comment.get("name"):
comment = comment.get("name")
ratings["avg"] = round(frappe.get_list("Topic Ratings", fields=["ifnull(avg(ratings),0.0)"],
filters={ "comment":comment}, as_list=True)[0][0],2)
ratings["ratings"] = frappe.db.sql("""select count(*) from
`tabTopic Ratings` where comment='{0}'""".format(comment),as_list=1)[0][0]
ratings["user_rating"] = frappe.db.get_value("Topic Ratings",{"comment":comment,"user":frappe.session.user},"ratings")
ratings['number_of_users'] = frappe.db.sql("""select count(distinct user) from `tabTopic Ratings` where comment = '{0}'""".format(comment),as_list=1)[0][0]
return ratings
@frappe.whitelist(allow_guest=True)
def add_comment(comment,topic_name):
print comment,"comment"
import datetime
frappe.get_doc({
"doctype":"Comment",
"comment_by": frappe.session.user,
"comment_doctype":"Discussion Topic",
"comment_docname": topic_name,
"comment": comment,
"comment_type":"Comment"
}).insert(ignore_permissions=True)
@frappe.whitelist(allow_guest=True)
def add_rating(rating,comment,topic_name):
comment_doc = frappe.get_doc("Comment",comment)
if(comment_doc.comment_by==frappe.session.user):
frappe.throw("You can not rate your own comments")
import datetime
frappe.get_doc({
"doctype":"Topic Ratings",
"user": frappe.session.user,
"comment":comment,
"ratings":flt(rating,1)
}).insert(ignore_permissions=True)
ratings = get_rating_details({"name":comment})
comments = {}
comments.update({
"average_rating":ratings.get("avg",0.0),
"ratings":ratings.get("ratings",0),
"user_rating":ratings.get("user_rating")
})
return comments
@frappe.whitelist()
def assign_topic(args=None):
"""add in someone's to do list
args = {
"assign_to": ,
"doctype": ,
"name": ,
"description":
}
"""
if not args:
args = frappe.local.form_dict
from frappe.utils import nowdate
emp_list = eval(args['assign_to'])
for employee in emp_list:
d = frappe.get_doc({
"doctype":"ToDo",
"owner": employee,
"reference_type": args['doctype'],
"reference_name": args['name'],
"description": args.get('description'),
"priority": args.get("priority", "Medium"),
"status": "Open",
"date": args.get('date', nowdate()),
"assigned_by": frappe.session.user,
}).insert(ignore_permissions=True)
# set assigned_to if field exists
if frappe.get_meta(args['doctype']).get_field("assigned_to"):
for employee in emp_list:
frappe.db.set_value(args['doctype'], args['name'], "assigned_to", employee)
# notify
if not args.get("no_notification"):
from frappe.desk.form.assign_to import notify_assignment
notify_assignment(d.assigned_by, d.owner, d.reference_type, d.reference_name, action='ASSIGN', description=args.get("description"), notify=1)
send_mail_to_mycfo_users(emp_list, args["name"])
return
# def user_query(doctype, txt, searchfield, start, page_len, filters):
# from frappe.desk.reportview import get_match_cond
# txt = "%{}%".format(txt)
# return frappe.db.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
# from `tabUser`
# where ifnull(enabled, 0)=1
# and docstatus < 2
# and name not in ({standard_users})
# and user_type != 'Website User'
# and name in (select parent from `tabUserRole` where role='Employee')
# and ({key} like %s
# or concat_ws(' ', first_name, middle_name, last_name) like %s)
# {mcond}
# order by
# case when name like %s then 0 else 1 end,
# case when concat_ws(' ', first_name, middle_name, last_name) like %s
# then 0 else 1 end,
# name asc
# limit %s, %s""".format(standard_users=", ".join(["%s"]*len(STANDARD_USERS)),
# key=searchfield, mcond=get_match_cond(doctype)),
# tuple(list(STANDARD_USERS) + [txt, txt, txt, txt, start, page_len]))
def users_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select usr.name, usr.first_name
from `tabUser` usr
where usr.name != '{0}'
and usr.name != '{1}'
and usr.name not in ( select owner from `tabToDo`
where reference_type= "Discussion Topic" and reference_name= "{2}" and status="Open")
and (usr.name like '{txt}'
or usr.first_name like '{txt}' )
limit 20
""".format(filters['doc'],frappe.session.user,filters['doc_name'],txt= "%%%s%%" % txt),as_list=1)
@frappe.whitelist(allow_guest=True)
def get_categories():
return frappe.get_list("Discussion Category", fields=["name","title"],ignore_permissions=1)
def send_mail_to_mycfo_users(email_ids, title_name):
title, category, owner = frappe.db.get_value("Discussion Topic", title_name, ["title", "blog_category", "owner"])
template = "/templates/discussion_forum_templates/topic_assign_notification.html"
owner = frappe.db.get_value("User", owner, [" concat(first_name, ' ', last_name) "])
assignee = frappe.db.get_value("User", frappe.session.user, ["concat(first_name, ' ', ifnull(last_name,'') )"])
args = {"assignee" :assignee, "subject":title, "category":category, "host_url":get_url(), "owner":owner}
frappe.sendmail(recipients=email_ids, sender=None, subject="Discussion Topic Assigned to You",
message=frappe.get_template(template).render(args), cc=get_central_delivery())
|
shrikant9867/mycfo | mycfo/trainings/page/training_test/training_test.py | from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint, time_diff_in_hours, now, time_diff, add_days, formatdate
from frappe import _
import json
import math
import re
@frappe.whitelist()
def get_question_list(ans_sheet):
qtn_list = frappe.get_all("Assessment Answers", filters={"parent":ans_sheet}, order_by="name asc", fields= "*")
qtn_dict = format_question_list(qtn_list)
ans_sheet_data = frappe.db.get_value("Answer Sheet", {"name":ans_sheet}, ["last_attempted_question", "answer_sheet_status", "training_name", "total_marks"], as_dict=1)
last_attempted_qtn = ans_sheet_data.get("last_attempted_question") if ans_sheet_data.get("last_attempted_question") else qtn_list[0].get("name")
qtn_count = len(qtn_list)
qtn_keys = [ qtn.get("name") for qtn in qtn_list]
return {
"question_dict":qtn_dict, "questions_count":qtn_count,
"last_qtn":last_attempted_qtn, "qtn_keys":qtn_keys, "ans_sheet_status":ans_sheet_data.get("answer_sheet_status"),
"total_marks":ans_sheet_data.get("total_marks"), "training_name":ans_sheet_data.get("training_name")
}
def format_question_list(qtn_list):
qtn_dict = {}
for qtn in qtn_list:
qtn_dict[qtn.get("name")] = qtn
return qtn_dict
@frappe.whitelist()
def update_user_answer(request_data):
request_data = json.loads(request_data)
common_update_answer(request_data)
frappe.db.sql(""" update `tabAnswer Sheet`
set last_attempted_question = %(qtn)s where name = %(ans_sheet)s """,
{"qtn":request_data.get("new_qtn_id"), "ans_sheet":request_data.get("ans_sheet")} )
frappe.db.commit()
def common_update_answer(request_data):
request_data["user_answer"] = request_data.get("user_answer") if request_data.get("user_answer") else ""
mapper = {"Objective":"user_answer = '{0}' ".format(request_data.get("user_answer")), "Subjective":"user_subjective_answer = '{0}' ".format(request_data.get("user_answer"))}
frappe.db.sql(""" update `tabAssessment Answers`
set {col_nm}, visited_flag = 1
where name = '{row_id}' """.format(col_nm = mapper.get(request_data.get("qtn_type")),
row_id = request_data.get("qtn_id")) )
@frappe.whitelist()
def update_ans_sheet_status(ans_sheet):
ansr_sheet = frappe.get_doc("Answer Sheet", ans_sheet)
ansr_sheet.answer_sheet_status = "Pending"
ansr_sheet.save(ignore_permissions=True)
@frappe.whitelist()
def end_test(request_data):
request_data = json.loads(request_data)
common_update_answer(request_data)
ansr_sheet = frappe.get_doc("Answer Sheet", request_data.get("ans_sheet"))
ansr_sheet.answer_sheet_status = "Open"
ansr_sheet.save(ignore_permissions=True)
marks_message = """ Congratulations, You have successfully completed test."""
if not check_if_all_subjective_questions(ansr_sheet):
marks_message += """ You have got {0} marks out of {1} & your percentage score is {2}%.
""".format(ansr_sheet.marks_obtained, ansr_sheet.total_marks,
ansr_sheet.percentage_score)
if not ansr_sheet.subjective_flag == "Yes":
ansr_sheet.submit()
else:
marks_message += "Subjective questions will be evaluated later by evaluator."
return marks_message
def check_if_all_subjective_questions(doc):
for row in doc.table_5:
if row.question_type == "Objective":
return False
return True |
shrikant9867/mycfo | mycfo/trainings/page/training_dashboard/training_dashboard.py | <filename>mycfo/trainings/page/training_dashboard/training_dashboard.py
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint, time_diff_in_hours, now, time_diff, add_days, formatdate
from frappe import _
import json
import math
import re
@frappe.whitelist()
def get_global_search_suggestions(filters):
query = """ select name from `tabTraining` where training_status = 'Published'
and name like '%{0}%'
union select name from `tabSkill Matrix 18` where name like '%{0}%'
union select name from `tabSkill Matrix 120` where name like '%{0}%'
union select name from `tabDocument Type` where name like '%{0}%'
""".format(filters)
suggestions = frappe.db.sql(query, as_list=1)
suggestions = [suggestion[0] for suggestion in suggestions]
return suggestions
@frappe.whitelist()
def get_published_trainings(search_filters):
search_filters = json.loads(search_filters)
limit_query = "LIMIT 5 OFFSET {0}".format(search_filters.get("page_no") * 5 )
downld_query, avg_rat_query = get_ratings_and_downloads_query()
my_query = """ select * , ({1}) as download_count , ({2}) as avg_ratings from `tabTraining` tr
where tr.training_status = 'Published' and
( tr.skill_matrix_18 like '%{0}%' or tr.name like '%{0}%'
or tr.skill_matrix_120 like '%{0}%' or tr.document_type like '%{0}%'
or tr.industry like '%{0}%' or tr.description like '%{0}%' ) order by tr.creation desc """.format(search_filters.get("filters"), downld_query, avg_rat_query)
total_records = get_total_records(my_query)
response_data = frappe.db.sql(my_query + limit_query, as_dict=True)
assessment_status = get_request_download_status(response_data)
total_pages = math.ceil(total_records[0].get("count",0)/5.0)
return {"response_data":response_data, "total_pages":total_pages, "test_status":assessment_status}
def get_total_records(query):
return frappe.db.sql(query.replace("*", "count(*) as count", 1), as_dict=1)
def get_request_download_status(response_data):
for response in response_data:
response["comments"] = frappe.db.sql(""" select tr.user_id, tr.comments, tr.ratings, concat(usr.first_name , ' ' ,usr.last_name) as full_name
from `tabTraining Review` tr left join `tabUser` usr
on usr.name = tr.user_id
where tr.training_name = %s""",(response.get("training_name")),as_dict=1)
ans_sheet = frappe.db.sql(""" select answer_sheet_status from
`tabAnswer Sheet`
where student_name = %s and training_name = %s
order by creation desc limit 1 """,(frappe.session.user, response.get("training_name") ), as_dict=1)
response["ans_status"] = ans_sheet[0].get("answer_sheet_status") if ans_sheet else ""
result = frappe.db.get_value("Answer Sheet", {"student_name":frappe.session.user, "answer_sheet_status":["in", ["New", "Pending"] ]}, 'name')
return result if result else ""
def get_ratings_and_downloads_query():
download_query = " select count(name) from `tabTraining Download Log` tdl where tdl.training_name = tr.name "
avg_rat_query = " select ifnull(avg(ratings), 0.0) from `tabTraining Review` rvw where rvw.training_name = tr.name "
return download_query , avg_rat_query
@frappe.whitelist()
def create_training_review(request_data):
request_data = json.loads(request_data)
if not frappe.db.get_value("Training Review", {"user_id":frappe.session.user, "training_name":request_data.get("training_name")}, "name"):
tr = frappe.new_doc("Training Review")
tr.user_id = frappe.session.user
tr.ratings = flt(request_data.get("ratings"))
tr.comments = request_data.get("comments")
tr.training_name = request_data.get("training_name")
tr.save(ignore_permissions=True)
else:
tr = frappe.get_doc("Training Review", {"user_id":frappe.session.user, "training_name":request_data.get("training_name")})
tr.comments = request_data.get("comments")
tr.ratings = flt(request_data.get("ratings"))
tr.save(ignore_permissions=True)
@frappe.whitelist()
def make_training_subscription_form(request_data):
request_data = json.loads(request_data)
training_data = frappe.db.get_value("Training",{"name":request_data.get("tr_name")}, "*", as_dict=True)
tsa = frappe.new_doc("Training Subscription Approval")
tsa.request_type = "Unforced Training"
tsa.training_requester = frappe.session.user
tsa.update(get_subscription_form_dict(training_data))
tsa.save(ignore_permissions=True)
tsa.submit()
# send_mail_of_training_request(training_data.get("name"))
@frappe.whitelist()
def assign_forced_training(request_data):
request_data = json.loads(request_data)
for row in request_data:
training_data = frappe.db.get_value("Training",{"name":row.get("training_name")}, "*", as_dict=True)
tsa = frappe.new_doc("Training Subscription Approval")
tsa.request_type = "Forced Training"
tsa.training_requester = frappe.db.get_value("Employee", {"name":row.get("employee")}, "user_id")
tsa.update(get_subscription_form_dict(training_data))
tsa.save(ignore_permissions=True)
tsa.submit()
def send_mail_of_training_request(training_name):
template = "/templates/training_templates/training_request.html"
cd = get_central_delivery()
first_name, last_name = frappe.db.get_value("User", {"name":frappe.session.user}, ["first_name", "last_name"])
user_name = ' '.join([first_name, last_name]) if last_name else first_name
args = {"user_name":first_name , "training_name":training_name}
frappe.sendmail(recipients= cd, sender=None, subject="Training Document Notification",
message=frappe.get_template(template).render(args))
def get_subscription_form_dict(training_data):
return {
"training_name":training_data.get("name"),
"document_type":training_data.get("document_type"),
"industry":training_data.get("industry"),
"skill_matrix_120":training_data.get("skill_matrix_120"),
"skill_matrix_18":training_data.get("skill_matrix_18"),
"assessment":training_data.get("assessment"),
"request_status":"Open",
"central_delivery_status":"Accepted",
"central_delivery":"Administrator"
}
def get_central_delivery():
central_delivery = frappe.get_list("UserRole", filters={"role":"Central Delivery","parent":["!=", "Administrator"]}, fields=["parent"])
central_delivery = [user.get("parent") for user in central_delivery]
return central_delivery
@frappe.whitelist()
def validate_if_current_user_is_author():
if "Central Delivery" in frappe.get_roles():
return "success"
else:
return frappe.db.get_value("Training", {"owner":frappe.session.user, "training_status":"Published"}, "name")
@frappe.whitelist()
def get_training_list(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if "Central Delivery" in frappe.get_roles():
return frappe.db.sql(get_training_query(cond, txt))
else:
cond = "and owner = '{0}'".format(frappe.session.user)
return frappe.db.sql(get_training_query(cond, txt), as_list=1)
def get_training_query(cond, txt):
return """ select name from
`tabTraining`
where training_status = 'Published'
and name like '{txt}'
{cond} limit 20 """.format(cond = cond, txt = "%%%s%%" % txt )
@frappe.whitelist()
def get_employee_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(get_emp_query(filters.get("training_name"),txt),as_list=1)
def get_emp_query(training_name, txt):
return """ select name, employee_name from
`tabEmployee` emp
where NOT EXISTS ({cond})
and user_id not in ('Administrator', '{usr}')
and (name like '{txt}' or employee_name like '{txt}')
limit 20 """.format(cond = get_sub_query(training_name), txt = "%%%s%%" % txt, usr= frappe.session.user )
def get_sub_query(training_name):
return """ select *
from `tabAnswer Sheet` ans
where ans.answer_sheet_status in ("New", "Open", "Pending")
and ans.student_name = emp.user_id
and ans.training_name = '{0}'
order by ans.creation desc limit 1 """.format(training_name)
# def get_sub_query(training_name):
# return """ select ( select tsa.training_requester from `tabTraining Subscription Approval` tsa
# where tsa.request_status in ("Open", "Accepted") and tsa.training_name = '{0}'
# and tsa.training_requester= emp.user_id order by creation desc limit 1) as emp_user_id from
# `tabEmployee` emp """.format(training_name)
def get_filtered_employee(cond, txt):
cond = cond if cond else "''"
return """ select name, employee_name from
`tabEmployee`
where user_id not in ({cond})
and user_id not in ('Administrator', '{usr}')
and (name like '{txt}' or employee_name like '{txt}')
limit 20 """.format(cond = cond, txt = "%%%s%%" % txt, usr= frappe.session.user )
@frappe.whitelist()
def create_training_download_log(training_name, ans_sheet):
tdl = frappe.new_doc("Training Download Log")
tdl.user_id = frappe.session.user
tdl.training_name = training_name
tdl.downloaded_datetime = now()
tdl.answer_sheet_link = ans_sheet
tdl.save(ignore_permissions=True)
@frappe.whitelist()
def get_my_trainings():
response_data = frappe.db.sql(""" select tr.training_name, tr.training_path,
ans.answer_sheet_status, ans.name as ans_sheet, ans.creation,
ans.percentage_score from
`tabAnswer Sheet` ans join `tabTraining` tr
on ans.training_name = tr.name
where student_name = %s order by ans.creation desc""",(frappe.session.user), as_dict=1)
get_meta_data_of_response(response_data)
return response_data
@frappe.whitelist()
def get_meta_data_of_response(response_data):
mapper = {"New":"Not Completed", "Pending":"Partial Completed", "Open":"Test Completed", "Closed":"Result Declared"}
for response in response_data:
response["download_flag"] = frappe.db.get_value("Training Download Log", {"training_name":response.get("training_name"),
"user_id":frappe.session.user, "answer_sheet_link":response.get("ans_sheet")}, "name")
response["assessment_status"] = mapper.get(response.get("answer_sheet_status")) if response.get("answer_sheet_status") else ""
response["tooltip_title"] = "{0} test Completed".format(response.get("training_name")) if response.get("answer_sheet_status") in ["Open", "Closed"] else " Test allowed after training download !!!!"
response["sub_date"] = formatdate(response.get("creation"))
response["feedback_form"] = frappe.db.get_value("Training Feedback", {"user":frappe.session.user, "answer_sheet":response.get("ans_sheet"), "training":response.get("training_name")}, "name")
@frappe.whitelist()
def check_answer_sheet_status(ans_sheet):
return frappe.db.get_value("Answer Sheet", {"name":ans_sheet}, 'answer_sheet_status')
@frappe.whitelist()
def get_feedback_questionnaire():
qtns = frappe.get_all("IP Questionnaire", filters={"parent":"Training Questionnaire", "status":1}, fields=["*"])
return qtns
@frappe.whitelist()
def create_feedback_questionnaire_form(answer_dict, ans_sheet, training):
answer_dict = json.loads(answer_dict)
fdbk = frappe.get_doc({
"doctype": "Training Feedback",
"user":frappe.session.user,
"user_answers":answer_dict,
"training":training,
"answer_sheet":ans_sheet
})
fdbk.flags.ignore_permissions = True
fdbk.insert()
return "success" |
shrikant9867/mycfo | mycfo/mycfo_utils.py | from __future__ import unicode_literals
import frappe
import shutil
import json
import os
from frappe.utils import get_datetime, now
import subprocess
from openpyxl import load_workbook
def get_central_delivery():
central_delivery = frappe.db.sql(""" select distinct usr.email from `tabUser` usr
left join `tabUserRole` usr_role
on usr_role.parent = usr.name
where usr.name != "Administrator"
and usr_role.role = "Central Delivery" """, as_dict=1)
central_delivery = [user.get("email") for user in central_delivery if user.get("email")]
return central_delivery
def make_login_log():
log = frappe.new_doc("Login Log")
log.user = frappe.session.user
log.login_time = now()
log.sid = frappe.session.sid
log.save(ignore_permissions=True)
def update_login_log():
log = frappe.db.get_value("Login Log", {"sid":frappe.session.sid}, "name")
if log:
frappe.db.sql(""" update `tabLogin Log` set log_out_time = %s where name = %s """, (now(), log) )
frappe.db.commit()
def get_mycfo_users():
mycfo_user_list = []
cust_user = frappe.db.sql(""" select distinct usr.name from `tabUser` usr
left join `tabUserRole` usr_role
on usr_role.parent = usr.name
where usr.name != "Administrator"
and usr_role.role = "Customer"
and usr.enabled = 1 """, as_dict=1)
cust_user = [user.get("name") for user in cust_user if user.get("name")]
cust_user = ','.join('"{0}"'.format(w) for w in cust_user)
mycfo_users = frappe.db.sql(""" select distinct usr.name from `tabUser` usr
left join `tabUserRole` usr_role
on usr_role.parent = usr.name
where usr.name != "Administrator"
and usr.name not in (%s)
and usr_role.role = "Mycfo User"
and usr.enabled = 1 """%(cust_user), as_dict=1)
mycfo_users = [user.get("name") for user in mycfo_users if user.get("name")]
return mycfo_users
def init_ip_file_conversion():
conv_details = frappe.get_all("IP File Converter", fields=["*"], filters={"converter_status":0, "attempt_count":["<=", "7"]})
status_list = []
errors = 0
for row in conv_details:
my_dict = {}
try:
if row.get("file_extension") == "xlsm":
convert_xlsm_to_xlsx(row.get("xlsm_path"), row.get("file_path"))
if os.path.isdir(row.get("dir_path", "")):
shutil.rmtree(row.get("dir_path", ""), ignore_errors=True)
args = json.loads(row.get("command"))
subprocess.check_call(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ip_file_cond = " file_viewer_path = '%s' "%frappe.db.escape(row.get("file_viewer_path"))
update_ip_file(row.get("ip_file"), ip_file_cond)
my_dict = {"converter_status":1, "attempt_count":int(row.get("attempt_count",0)) + 1}
status_list.append({"ip_file":row.get("ip_file"), "status":"Conversion Success", "errors":""})
if os.path.isfile(row.get("edited_file_path")):
os.remove(row.get("edited_file_path"))
except Exception, e:
my_dict = {"traceback":frappe.get_traceback(), "attempt_count":int(row.get("attempt_count",0)) + 1}
status_list.append({"ip_file":row.get("ip_file"), "status":"Conversion Unsuccessful", "errors":e.message})
errors += 1
finally:
update_ip_file_converter(row.get("name"), my_dict)
create_ip_file_converter_log(len(conv_details), errors, status_list)
def update_ip_file(ip_file, ip_file_cond):
query = """ update `tabIP File` set {0} where name = '{1}' """.format(ip_file_cond, frappe.db.escape(ip_file))
frappe.db.sql(query)
frappe.db.commit()
def update_ip_file_converter(ipc_name, ipc_dict):
ipc = frappe.get_doc("IP File Converter", ipc_name)
ipc.update(ipc_dict)
ipc.save(ignore_permissions=True)
def create_ip_file_converter_log(total_count, error_count, converter_log_dict):
log = frappe.new_doc("IP File Converter Log")
log.total_count = total_count
log.error_count = error_count
log.converter_log_dict = json.dumps(converter_log_dict)
log.save(ignore_permissions=True)
def convert_xlsm_to_xlsx(xlsm_path, xls_path):
wb = load_workbook(xlsm_path)
ws = wb.active
ws['D2'] = 42
wb.save(xls_path) |
shrikant9867/mycfo | mycfo/trainings/doctype/training_subscription_approval/training_subscription_approval.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from mycfo.mycfo_utils import get_central_delivery
class TrainingSubscriptionApproval(Document):
def validate(self):
pass
def before_submit(self):
self.validate_for_central_delivery_status()
self.initiate_for_request_submission()
def validate_for_central_delivery_status(self):
if not self.central_delivery_status:
frappe.throw("Central delivery Status is mandatory to submit training request.")
def initiate_for_request_submission(self):
mapper = {"Accepted":self.accept_request, "Rejected":self.reject_request}
mapper.get(self.central_delivery_status)()
def accept_request(self):
self.request_status = "Accepted"
request_type_dict = {"Forced Training":["/templates/training_templates/assigned_training_notification.html", [ frappe.db.get_value("User", {"name":self.training_requester}, "email") ] ],
"Unforced Training":["/templates/training_templates/training_request_notification.html", get_central_delivery() ]
}
template = request_type_dict.get(self.request_type)[0]
recipients = request_type_dict.get(self.request_type)[1]
self.create_answer_sheet()
self.send_mail(template, recipients)
def create_answer_sheet(self):
as_data = frappe.get_doc("Assessment", {"name":self.assessment})
new_as_data = self.get_assessment_dict(as_data)
ans_key = frappe.new_doc("Answer Sheet")
ans_key.answer_sheet_status = "New"
ans_key.student_name = self.training_requester
ans_key.training_subscription = self.name
ans_key.update(new_as_data)
ans_key.save(ignore_permissions=1)
def get_assessment_dict(self, as_data):
return {
"total_questions":as_data.get("total_questions"),
"total_marks":as_data.get("total_marks"),
"table_5":as_data.get("table_5"),
"training_name":as_data.get("training_name"),
"assessment_evaluator":as_data.get("assessment_evaluator"),
"subjective_flag":as_data.get("subjective_flag")
}
def reject_request(self):
self.request_status = "Rejected"
template = "/templates/training_templates/training_request_notification.html"
self.send_mail(template)
def send_mail(self, template, recipients):
subject = "Training Document Notification"
first_nm, last_nm = frappe.db.get_value("User", {"name":self.training_requester}, ["first_name", "last_name"])
args = {"training_name":self.training_name, "cd":frappe.session.user, "first_name":first_nm,
"last_name":last_nm if last_nm else "", "comments":self.central_delivery_comments, "status":self.request_status }
frappe.sendmail(recipients= recipients, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
|
shrikant9867/mycfo | mycfo/mycfo/report/el_sign_off_report/el_sign_off_report.py | <reponame>shrikant9867/mycfo
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
result = []
frappe.errprint(frappe.session.user)
if frappe.session.user != "Administrator":
result = frappe.db.sql("""select customer, sign_off_datetime, el_user, user_id from
`tabEL Sign Off Details` where owner = '%s' order by creation """%(frappe.session.user), as_list=1)
else:
result = frappe.db.sql("""select customer, sign_off_datetime, el_user, user_id from
`tabEL Sign Off Details` order by creation """, as_list=1)
return result
def get_columns():
return [
_("Customer") + ":Link/Customer:250",
_("Sign Off Datetime") + ":Datetime:200",
_("EL User Name") + ":Data:200",
_("EL User ID") + ":Link/User:250"
] |
shrikant9867/mycfo | mycfo/trainings/doctype/assessment/assessment.py | <filename>mycfo/trainings/doctype/assessment/assessment.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Assessment(Document):
def validate(self):
self.validate_for_answers()
self.set_subjective_flag()
self.validate_for_questions()
self.total_marks = sum([row.total_marks for row in self.table_5 if row.total_marks])
self.total_questions = len(self.table_5)
def set_subjective_flag(self):
for row in self.table_5:
if row.question_type == 'Subjective':
self.subjective_flag = "Yes"
break
else:
self.subjective_flag = "No"
def validate_for_answers(self):
mapper ={"A":"option_a", "B":"option_b", "C":"option_c", "D":"option_d", "E":"option_e"}
for row in self.table_5:
options = [ row.get(option).strip() for option in ["option_a", "option_b", "option_c", "option_d", "option_e"] if row.get(option)]
if row.question_type == "Objective":
if not row.option_a or not row.option_b:
frappe.throw("Option A and option B are mandatory for objective questions for row no {0}".format(row.idx))
if not row.objective_answer or not row.get(mapper.get(row.objective_answer)):
frappe.throw("Please provide valid answer for row no {0}".format(row.idx))
if len(set(options)) != len(options):
frappe.throw("Duplicate answer not allowed for row no {0}".format(row.idx))
if row.total_marks <= 0:
frappe.throw("Total Marks for question should be non-zero digit for row no {0}".format(row.idx))
def validate_for_questions(self):
if not self.table_5:
frappe.throw("Questions are mandatory to save assessment.")
def get_permission_query_conditions(user):
roles = frappe.get_roles()
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
return """(`tabAssessment`.owner = "{user}" )""".format(user = frappe.session.user) |
shrikant9867/mycfo | mycfo/kpi/page/resourcepool/resourcepool.py | from __future__ import unicode_literals
import frappe
from frappe.utils import cstr,now,add_days
import json
@frappe.whitelist()
def get_sample_data():
# frappe.msgprint("in get sample data");
return {
"get_sample_data": frappe.db.sql("""select skill,sub_skill,none_field,beginner,imtermediatory,expert, parent as a,CASE WHEN skill !='' then (select employee_name from `tabSkill Mapping` where name=a)\
else null\
END AS Name,\
CASE WHEN skill !='' then (select industry from `tabSkill Mapping` where name=a)\
else null\
END AS ind\
from `tabSkill Mapping Details` where skill is not null order by sub_skill""", as_list=1)
} |
shrikant9867/mycfo | mycfo/ip_library/scheduler_tasks.py | from __future__ import unicode_literals
from frappe.utils import now, today
import frappe
from mycfo.mycfo_utils import get_central_delivery
def update_ip_download_approval_status():
sub_query = """ select name from `tabIP Download Approval`
where docstatus=1 and validity_end_date <= '{0}' """.format(now())
response = frappe.db.sql(sub_query, as_list=1)
ipd_name = ','.join('"{0}"'.format(record[0]) for record in response if record)
if ipd_name:
query = """ update `tabIP Download Approval`
set validity_end_date = null , approval_status = 'Expired',
modified = '{0}'
where name in ({1}) """.format(now(), ipd_name)
frappe.db.sql(query)
def send_notification_for_expiry_of_document():
for day_diff in [7, 3]:
result = frappe.db.sql(""" select name, owner, file_name, validity_end_date from `tabIP File`
where published_flag = 1
and DATEDIFF(validity_end_date, CURDATE()) = %s """,(day_diff), as_dict=1)
send_mail(result)
def send_mail(result):
subject = "IP Document Expiry Notification"
template = "/templates/ip_library_templates/ip_library_expiry_notification.html"
central_delivery = get_central_delivery()
print central_delivery
for response in result:
first, last, email = frappe.db.get_value("User", {"name":response.get("owner")}, ["first_name", "last_name", "email"])
response["first_name"] = first
response["last_name"] = last
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(response), cc=central_delivery)
# def get_central_delivery():
# central_delivery = frappe.get_list("UserRole", filters={"role":"Central Delivery","parent":["!=", "Administrator"]}, fields=["parent"])
# central_delivery = [user.get("parent") for user in central_delivery]
# return central_delivery
def archive_document():
result = frappe.db.sql(""" select name,file_name, validity_end_date from `tabIP File`
where published_flag = 1
and validity_end_date < CURDATE() """, as_dict=1)
result = ','.join('"{0}"'.format(record.get("name")) for record in result if record)
if result:
query = """ update `tabIP File` set file_status = 'Archived', published_flag = 0
where name in ({0}) """.format(result)
frappe.db.sql(query)
|
shrikant9867/mycfo | mycfo/ip_library/doctype/quick_survey/test_quick_survey.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Quick Survey')
class TestQuickSurvey(unittest.TestCase):
pass
|
shrikant9867/mycfo | mycfo/mycfo/doctype/country_code/country_code.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CountryCode(Document):
def validate(self):
self.validate_country_code()
def validate_country_code(self):
name = frappe.db.sql("""select name from `tabCountry Code` where country_code='%s' and name!='%s'
"""%(self.country_code,self.name),as_list=1)
if name:
frappe.msgprint("Country code '%s' is already specified for the country '%s' "%(self.country_code,self.country_name),raise_exception=1)
|
shrikant9867/mycfo | mycfo/mycfo/page/operational_matrix/operational_matrix.py | <reponame>shrikant9867/mycfo
import frappe
from frappe.model.document import Document
from frappe.utils import nowdate, cstr, flt, now, getdate, add_months
from frappe.model.mapper import get_mapped_doc
import datetime
import json
@frappe.whitelist()
def get_operational_matrix_data(customer=None):
om_name = frappe.db.sql("""select name from `tabOperation And Project Commercial` where customer='%s' and operational_matrix_status='Active' order by creation desc"""%customer,as_list=1)
final_data = []
if len(om_name)>0:
for name in om_name:
om_data = frappe.db.sql("""select * from `tabOperation And Project Commercial` where name='%s'"""%(name[0]),as_dict=1)
om_child_table = frappe.db.sql("""select role,user_name,email_id,contact from `tabOperation And Project Details` where parent='%s'"""%name[0],as_dict=1)
if om_child_table:
if len(om_child_table)>0:
om_data[0]['child_records'] = om_child_table
else:
final_data.append(om_data)
if len(om_data)>0:
final_data.append(om_data)
else:
final_data.append(om_data)
else:
return {"final_data": final_data}
if len(final_data)>0:
return {"final_data": final_data}
@frappe.whitelist()
def get_operational_matrix_details(customer=None,operational_matrix=None,target_doc=None,ignore_permissions=True):
if not frappe.db.get_value("Operation And Project Commercial",{'operational_id':operational_matrix,'customer':customer},'name'):
doclist = get_mapped_doc("Operational Matrix", operational_matrix, {
"Operational Matrix": {
"doctype": "Operation And Project Commercial"
},
"Operation Details": {
"doctype": "Operation And Project Details",
}
}, target_doc, ignore_permissions=ignore_permissions)
doclist.customer = customer
doclist.save(ignore_permissions)
elif frappe.db.get_value("Operation And Project Commercial",{'operational_id':operational_matrix,'customer':customer,'operational_matrix_status':'Deactive'},'name'):
name = frappe.db.get_value("Operation And Project Commercial",{'operational_id':operational_matrix,'customer':customer,'operational_matrix_status':'Deactive'},'name')
frappe.db.sql("""update `tabOperation And Project Commercial` set operational_matrix_status='Active'
where name='%s'"""%name)
frappe.db.commit()
frappe.msgprint("Specified operation matrix '%s' is get linked to customer '%s' please check below records."%(operational_matrix,customer))
else:
frappe.msgprint("Specified operation matrix '%s' is already linked to customer '%s'."%(operational_matrix,customer))
last_final_data = get_operational_matrix_data(customer)
return {"final_data": last_final_data['final_data']}
@frappe.whitelist()
def get_filtered_data(customer=None,operational_matrix=None):
om_name = frappe.db.sql("""select name from `tabOperation And Project Commercial` where operational_matrix_status='Active' and %s """
%get_item_conditions(customer,operational_matrix),as_list=1)
final_data = []
if len(om_name)>0:
for name in om_name:
om_data = frappe.db.sql("""select * from `tabOperation And Project Commercial` where name='%s'"""%(name[0]),as_dict=1)
om_child_table = frappe.db.sql("""select role,user_name,email_id,contact from `tabOperation And Project Details` where parent='%s'"""%name[0],as_dict=1)
if om_child_table:
if len(om_child_table)>0:
om_data[0]['child_records'] = om_child_table
else:
final_data.append(om_data)
if len(om_data)>0:
final_data.append(om_data)
else:
final_data.append(om_data)
if len(final_data)>0:
return {"final_data": final_data}
def get_item_conditions(customer,operational_matrix):
conditions = []
if customer:
conditions.append("customer='{0}'".format(customer))
if operational_matrix:
conditions.append("operational_id='{0}'".format(operational_matrix))
return " "+" and ".join(conditions) if conditions else ""
@frappe.whitelist()
def deactivate_records(operational_record=None,customer=None):
if operational_record:
frappe.db.sql("""update `tabOperation And Project Commercial` set operational_matrix_status='Deactive'
where name='%s'"""%operational_record)
frappe.db.commit()
if customer:
last_final_data = get_operational_matrix_data(customer)
if last_final_data:
return {"final_data": last_final_data['final_data']}
else:
return None
@frappe.whitelist()
def get_operational_matrix(operational_matrix=None):
om_child_table = frappe.db.sql("""select role,user_name,email_id,contact from `tabOperation Details` where parent='%s'"""%operational_matrix,as_dict=1)
if len(om_child_table)>0:
return {'final_data': om_child_table}
@frappe.whitelist()
def get_unlinked_operation_matrix(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select om.name, om.operational_matrix_title
from `tabOperational Matrix` om
where om.name not in ( select operational_id from
`tabOperation And Project Commercial`
where operational_matrix_status = 'Active' and customer = %(customer)s )
and ( om.name like %(txt)s or om.operational_matrix_title like %(txt)s ) limit 20 """,
{ 'txt': "%%%s%%" % txt, 'customer':filters.get("customer") } ,as_list=1 )
|
shrikant9867/mycfo | mycfo/mycfo/report/customer_skill_mapping_report/customer_skill_mapping_report.py | <filename>mycfo/mycfo/report/customer_skill_mapping_report/customer_skill_mapping_report.py
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
result = []
result = frappe.db.sql("""
select tmp.skill_matrix_18, tmp.skill_matrix_120,
tmp.total_skill, tmp.sum_total_skill, (tmp.sum_total_skill / tmp.total_skill) as average_Skill
from (
select skmt.skill_matrix_18, skmt.name as skill_matrix_120,
count(smd.name) as total_skill,
( sum(smd.beginner) * 1 + sum(smd.imtermediatory) * 2 + sum(smd.expert) * 3) as sum_total_skill
from `tabSkill Matrix 120` skmt
join `tabSkill Mapping Details` smd
on skmt.name = smd.sub_skill
and smd.parenttype = "Customer Skill Mapping"
group by skmt.name order by skmt.skill_matrix_18 ) as tmp """, as_list=1)
return result
def get_columns():
return [
_("Skill Matrix 18") + ":Link/:200",
_("Skill Matrix 120") + ":Link/:300",
_("Count of Total Skill") + ":Int:150",
_("Sum of Total Skill") + ":Int:150",
_("Average Total Skill") + ":Float:150"
]
|
shrikant9867/mycfo | mycfo/ip_library/doctype/ip_file/ip_file.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import cint, getdate, today, getdate
import base64
import json
import os
from mycfo.mycfo_utils import get_central_delivery
class IPFile(Document):
def validate(self):
self.validate_duplicate_tag()
self.validate_for_duplicate_file_name()
self.validity_for_cd_users()
self.validate_for_file_data()
self.store_document()
self.create_request_for_ip_approval()
def validate_for_duplicate_file_name(self):
if cint(self.get("__islocal")):
if frappe.db.get_value("IP File", {"name":self.file_name}, "name"):
frappe.throw("Ip File with same name already exists.")
def validity_for_cd_users(self):
if not len(get_central_delivery()):
frappe.throw("There are no Central Delivery users in system.Please upload IP File later.")
def init_for_add_comment(self):
self.add_comment("File status Changed to {0} for request_type {1}".format(self.file_status, self.request_type))
def validate_for_file_data(self):
if not self.file_data and cint(self.get("__islocal")):
frappe.throw("Please upload the IP document for publishing.")
def store_document(self):
self.create_directory()
try:
if self.file_data and self.request_type not in ["Archive", "Upgrade Validity"]:
base64_data = self.file_data.get("file_data").encode("utf8")
base64_data = base64_data.split(',')[1]
base64_data = base64.b64decode(base64_data)
extension = "." + self.file_extension if self.file_extension else ""
file_path = frappe.get_site_path("public","files", "mycfo", "edited_file", self.document_type, self.file_name + extension)
with open(file_path, "wb+") as fi_nm:
fi_nm.write(base64_data)
self.new_file_path = '/'.join(["files", "mycfo", "edited_file", self.document_type, self.file_name + extension])
except Exception,e:
frappe.throw("File Upload Error")
def create_directory(self):
if not os.path.exists(frappe.get_site_path("public", "files", "mycfo")):
os.makedirs(frappe.get_site_path("public", "files", "mycfo", "edited_file"))
os.mkdir(frappe.get_site_path("public", "files", "mycfo", "published_file"))
if not os.path.exists(frappe.get_site_path("public", "files", "mycfo", "edited_file", self.document_type)):
os.makedirs(frappe.get_site_path("public", "files", "mycfo", "edited_file", self.document_type))
os.mkdir(frappe.get_site_path("public", "files", "mycfo", "published_file", self.document_type))
def create_request_for_ip_approval(self):
status_dict = {"New":"New Upload Pending", "Edit":"Edit Pending"}
if not self.approver_link and self.file_data and self.request_type not in ["Archive", "Upgrade Validity"]:
ipa = self.create_ip_approver_form(self.validity_end_date, self.new_file_path)
self.approver_link = ipa.name
self.file_status = status_dict.get(self.request_type)
if "Central Delivery" not in frappe.get_roles():
self.run_post_ip_approver_method()
elif self.file_data and self.approver_link:
ipa = frappe.get_doc("IP Approver", self.approver_link)
ipa.file_path = self.new_file_path
ipa.save(ignore_permissions=True)
if "Central Delivery" not in frappe.get_roles():
self.run_post_ip_approver_method()
self.file_status = status_dict.get(self.request_type)
self.file_data = ""
def validate_duplicate_tag(self):
tags = []
for d in self.get("ip_file_tags"):
if d.ip_tags not in tags:
tags.append(d.ip_tags)
else:
frappe.throw("Please remove duplicate tags first..")
def init_for_validity_upgradation(self):
from datetime import datetime
self.request_type = "Upgrade Validity"
validity = datetime.strptime(self.new_validity, '%d-%m-%Y').strftime('%Y-%m-%d')
ipa = self.create_ip_approver_form(validity, self.file_path)
if "Central Delivery" in frappe.get_roles():
ipa.central_delivery = frappe.session.user
ipa.central_delivery_status = "Approved"
ipa.submit()
else:
self.file_status = "Upgrade Validity Pending"
self.new_validity = ""
self.init_for_add_comment()
self.prepare_for_cd_notification()
self.save()
def create_ip_approver_form(self, validity_end_date, file_path):
ipa = frappe.new_doc("IP Approver")
ipa.request_type = self.request_type
ipa.current_status = "Open"
ipa.file_name = self.file_name
ipa.file_extension = self.file_extension
ipa.file_description = self.description
ipa.file_type = self.document_type
ipa.customer = self.customer
ipa.source = self.source
ipa.industry = self.industry
ipa.skill_matrix_18 = self.skill_matrix_18
ipa.skill_matrix_120 = self.skill_matrix_120
if self.request_type != "Upgrade Validity":
ipa.approver = self.file_approver or ""
ipa.employee_name = self.employee_name
else:
ipa.approver = ""
ipa.validity_end_date = validity_end_date
ipa.file_path = self.new_file_path
ipa.ip_file_requester = frappe.session.user
ipa.ip_file = self.name
ipa.level_of_approval = self.security_level
ipa.flags.ignore_mandatory = True
ipa.save(ignore_permissions=True)
return ipa
def on_update(self):
self.submit_ip_approver_form_for_central_delivery_role()
def submit_ip_approver_form_for_central_delivery_role(self):
if "Central Delivery" in frappe.get_roles() and self.request_type in ["New", "Edit"] and self.approver_link:
ip_approver_form = frappe.get_doc("IP Approver", self.approver_link)
ip_approver_form.central_delivery = frappe.session.user
ip_approver_form.central_delivery_status = "Approved"
ip_approver_form.level_of_approval = self.security_level
ip_approver_form.validity_end_date = self.validity_end_date
ip_approver_form.submit()
frappe.msgprint("Please reload the document.")
def run_post_ip_approver_method(self):
self.prepare_for_approver_notification()
self.init_for_add_comment()
def prepare_for_cd_notification(self):
template = "/templates/ip_library_templates/upgrade_validity_request_notification.html"
subject = "IP Document Upgrade Validity Notification"
central_delivery = self.get_central_delivery()
cc = [ frappe.db.get_value("User", {"name":self.owner } ,"email") ] if frappe.session.user != self.owner else []
args = {"user_name":frappe.session.user, "file_name":self.file_name }
frappe.sendmail(recipients=central_delivery, sender=None, subject=subject,
message=frappe.get_template(template).render(args), cc=cc)
def prepare_for_approver_notification(self):
user_id = ""
if self.file_approver:
full_name, user_id = frappe.db.get_value("Employee", self.file_approver, ["employee_name", "user_id"])
central_delivery = self.get_central_delivery()
central_delivery.append( frappe.db.get_value("User",{"name":user_id}, "email") ) if user_id else ""
self.send_mail(central_delivery)
def get_central_delivery(self):
central_delivery = frappe.db.sql(""" select distinct usr.email from `tabUser` usr
left join `tabUserRole` usr_role
on usr_role.parent = usr.name
where usr.name != "Administrator"
and usr_role.role = "Central Delivery" """, as_dict=1)
central_delivery = [user.get("email") for user in central_delivery if user.get("email")]
return central_delivery
def send_mail(self, email):
template = "/templates/ip_library_templates/Ip_file_approver_notification.html"
subject = "IP File upload request"
args = {"user_name":frappe.session.user, "file_name":self.file_name,"customer":self.customer}
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
@frappe.whitelist()
def get_approver_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select distinct(opd.user_name), emp.employee_name
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL")
and opd.email_id != %(user)s
and opc.customer = %(customer)s
and opc.operational_matrix_status = "Active"
and (emp.name like %(txt)s
or emp.employee_name like %(txt)s)
limit 20
""", {
'txt': "%%%s%%" % txt,
'customer':filters.get("customer"),
"user":frappe.session.user}, as_list=1)
@frappe.whitelist()
def init_for_archive_request(doc):
doc = json.loads(doc)
validate_for_archive_request(doc)
ip_arch = create_archive_request(doc)
if "Central Delivery" in frappe.get_roles():
ip_arch.central_delivery = frappe.session.user
ip_arch.central_delivery_status = "Approved"
ip_arch.submit()
else:
send_archive_notification(doc)
comment = "File status Changed to Archive Pending for request type Archive."
frappe.get_doc("IP File", doc.get("file_name")).add_comment(comment)
def validate_for_archive_request(doc):
if doc.get("file_status") not in ["Published", "Republished", "Rejected by CD (Archive)", "Rejected by CD (Edit)", "Rejected by CD (Validity)", "Validity Upgraded"]:
frappe.throw("File Status must be published or republished to archive document.")
def create_archive_request(doc):
ip_arch = frappe.new_doc("IP Archiver")
ip_arch.request_type = "Archive"
ip_arch.current_status = "Open"
ip_arch.file_name = doc.get("file_name")
ip_arch.file_description = doc.get("description")
ip_arch.file_type = doc.get("document_type")
ip_arch.customer = doc.get("customer")
ip_arch.source = doc.get("source")
ip_arch.industry = doc.get("industry")
ip_arch.skill_matrix_18 = doc.get("skill_matrix_18")
ip_arch.skill_matrix_120 = doc.get("skill_matrix_120")
ip_arch.validity_end_date = doc.get("validity_end_date")
ip_arch.level_of_approval = doc.get("security_level")
ip_arch.file_path = doc.get("file_path")
ip_arch.ip_file_owner = doc.get("owner")
ip_arch.ip_file = doc.get("name")
ip_arch.archive_requester = frappe.session.user
ip_arch.flags.ignore_mandatory = True
ip_arch.save(ignore_permissions=True)
return ip_arch
def send_archive_notification(doc):
template = "/templates/ip_library_templates/archive_request_notification.html"
subject = "IP Document Archive Request Notification"
central_delivery = frappe.db.sql(""" select distinct usr.email from `tabUser` usr
left join `tabUserRole` usr_role
on usr_role.parent = usr.name
where usr.name != "Administrator"
and usr_role.role = "Central Delivery" """, as_dict=1)
central_delivery = [user.get("email") for user in central_delivery if user.get("email")]
args = {"user_name":frappe.session.user, "file_name":doc.get("file_name")}
cc = [ frappe.db.get_value("User", {"name":doc.get("owner") } ,"email") ] if frappe.session.user != doc.get("owner") else []
frappe.sendmail(recipients=central_delivery, sender=None, subject=subject,
message=frappe.get_template(template).render(args), cc=cc)
def get_permission_query_conditions(user):
roles = frappe.get_roles()
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
emp_name = frappe.db.get_value("Employee",{"user_id":frappe.session.user}, "name")
ip_files = frappe.db.sql(""" select name from `tabIP File` where owner = '{0}' or file_approver = '{1}' """.format(frappe.session.user, emp_name),as_dict=1)
ip_files = "', '".join([frappe.db.escape(ipf.get("name")) for ipf in ip_files if ipf])
return """(`tabIP File`.name in ('{files}') )""".format(files = ip_files)
@frappe.whitelist()
def get_customer_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select name, customer_group
from `tabCustomer`
where (name like %(txt)s
or customer_group like %(txt)s)
limit 20
""", {'txt': "%%%s%%" % txt}, as_list=1)
|
shrikant9867/mycfo | mycfo/mycfo/doctype/operational_matrix/operational_matrix.py | <reponame>shrikant9867/mycfo
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class OperationalMatrix(Document):
def validate(self):
if self.operational_matrix_status == 'Deactive':
self.delink_operational_matrix()
self.validate_employee_child_table()
self.validate_child_table_duplication()
if self.get('operation_details'):
self.validate_duplicate_entry()
self.update_child_table_of_ompc()
def delink_operational_matrix(self):
operational_data = frappe.db.sql("""select name from `tabOperation And Project Commercial` where operational_matrix_status='Active'
and operational_id='%s'"""%self.name,as_list=1)
if(len(operational_data))>0:
for operationa_name in operational_data:
frappe.db.sql("""update `tabOperation And Project Commercial` set operational_matrix_status='Deactive'
where name='%s'"""%operationa_name[0])
frappe.db.commit()
def validate_employee_child_table(self):
if not self.get('operation_details'):
frappe.msgprint("At least one entry is mandetory in Operation Matrix child table.",raise_exception=1)
def validate_child_table_duplication(self):
record_list = []
details_dict = {}
details = {}
if self.get('operation_details'):
for d in self.get('operation_details'):
details = {}
details[d.role] = d.user_name
if details not in record_list:
record_list.append(details)
else:
frappe.msgprint("No duplicate record is allowed to be enter in Operation Details child table",raise_exception=1)
def update_child_table_of_ompc(self):
if self.operation_details:
ompc = frappe.db.sql("""select t1.name from `tabOperation And Project Commercial`t1,
`tabOperational Matrix`t2
where t1.operational_id = t2.name
and t2.name = '{0}'""".format(self.name),as_list=1)
ompc_list = [e[0] for e in ompc]
opm = ''
for omp in ompc_list:
om_pc = frappe.get_doc("Operation And Project Commercial",omp)
om_pc.set("operation_details", [])
for item in self.operation_details:
child = om_pc.append("operation_details", {})
child.role = item.role
child.email_id = item.email_id
child.user_name = item.user_name
child.contact = item.contact
om_pc.save(ignore_permissions = True)
def validate_duplicate_entry(self):
pass |
shrikant9867/mycfo | mycfo/mycfo/report/customer_operational_matrix/customer_operational_matrix.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
result = frappe.db.sql('''SELECT opc.customer,
opc.operational_id,
opc.operational_matrix_status,
opd.role,
opd.user_name,
opd.email_id,
opd.contact
from `tabOperation And Project Commercial` opc
left join `tabOperation And Project Details` opd
on opd.parent = opc.name
{0}
'''.format(get_conditions(filters)),as_list=1)
return result
def get_conditions(filters):
if filters:
return "where opc.customer like '%%%s%%'"%filters['customer']
else:
return ""
def get_columns():
return [
_("Customer") + ":Link/Customer:150",
_("Operational ID") + ":Link/Operational Matrix:110",
_("OM Status") + ":Data:110",
_("Role") + ":Data:110",
_("Employee ID") + ":Link/Employee:100",
_("Email ID") + ":Data:200",
_("Contact") + ":Data:110",
] |
shrikant9867/mycfo | mycfo/config/hr.py | <gh_stars>0
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Skill Mapping"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Skill Mapping",
"description": _("Skill Mapping Details"),
},
{
"type": "report",
"is_query_report": True,
"name": "Resource Pool",
"description": _("Resource Pool"),
"doctype": "Skill Mapping",
},
{
"type": "page",
"name": "kpi-report",
"icon": "icon-sitemap",
"label": _("KPI Graphical Reports"),
"description": _("Graphical Report for KPI"),
}
]
},
{
"label": _("Skill Mapping Reports"),
"icon": "icon-star",
"items": [
{
"type": "report",
"name": "Skill Mapping Report",
"is_query_report": True,
"doctype": "Skill Mapping",
"description": _("Skill mapping report comprises total, sum & average of skill")
},
{
"type": "report",
"name": "Employee Skill Mapping",
"is_query_report": True,
"doctype": "Skill Mapping",
"description": _("Employee skill maaping details")
}
]
}
]
|
shrikant9867/mycfo | mycfo/mycfo/doctype/project_commercial/project_commercial.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import cstr, flt, getdate, comma_and, cint,add_months
import datetime
class ProjectCommercial(Document):
def validate(self):
if self.p_type == 'Fixed + Variable':
self.validate_fixed_variable_type()
self.validate_total_of_both_type()
self.validate_project_value()
# if self.project_status == 'Terminate' or self.project_status == 'Close':
# self.delink_projectid()
self.validate_due_date_in_childtable()
def validate_project_value(self):
total = 0.00
if self.get('table_17'):
for d in self.get('table_17'):
if d.amount:
total+= d.amount
if flt(self.p_value) != total:
frappe.msgprint("Project value must be equal to the total of all amount specified in the amount details child table.",raise_exception=1)
def delink_projectid(self):
commercial_data = frappe.db.sql("""select name from `tabOperation And Project Commercial` where operational_matrix_status='Active'
and project_commercial='%s'"""%self.name,as_list=1)
if(len(commercial_data))>0:
for commercial_name in commercial_data:
frappe.db.sql("""update `tabOperation And Project Commercial` set operational_matrix_status='Deactive'
where name='%s'"""%commercial_name[0])
frappe.db.commit()
def validate_fixed_variable_type(self):
if self.p_value:
if self.fix_val and self.var_val:
if flt(self.p_value) != (flt(self.fix_val) + flt(self.var_val)):
self.fix_val=''
self.var_val=''
frappe.msgprint("For project type Fixed + Variable,Total of Fixed and Variable Value must be equal to the Project Value",raise_exception=1)
else:
return {"status":True}
def validate_due_date_in_childtable(self):
date_list = []
if self.get('table_17'):
for d in self.get('table_17'):
if d.due_date:
if d.due_date not in date_list:
date_list.append(d.due_date)
else:
frappe.msgprint("Duplicate Due Date is not allowed in Amount Details child table",raise_exception=1)
break
def validate_total_of_both_type(self):
fixed_total = 0.0
variable_total = 0.0
if self.fixed_type == 'Milestone':
if self.get('table_17'):
for d in self.get('table_17'):
if d.f_type == 'Fixed':
fixed_total+=d.amount
else:
variable_total+=d.amount
if flt(fixed_total) != flt(self.fix_val):
frappe.msgprint("Total sum of amount for fixed type in child table must be equal to the Fixed Value specified",raise_exception=1)
if flt(variable_total) != flt(self.var_val):
frappe.msgprint("Total sum of amount for variable type in child table must be equal to the Variable Value specified",raise_exception=1)
# Add child records................................
def get_child_details(self,months=None):
self.get_dates(months)
def get_dates(self,months):
self.set('table_17', [])
date_list = []
start_date = getdate(self.start_date)
start_day = start_date.day
start_month = start_date.month
start_year = start_date.year
due_amount = flt(flt(self.p_value)/cint(months))
new_date = self.pick_date +'-'+ cstr(start_month) + '-' + cstr(start_year)
final_date = getdate(datetime.datetime.strptime(cstr(new_date),'%d-%m-%Y'))
date_list.append(final_date)
self.check_project_value(date_list,months,due_amount,final_date)
def check_project_value(self,date_list,months,due_amount,final_date):
if self.pro_per == '30' or self.pro_per == '31':
months-=1
if flt(self.p_value)%cint(months) == 0:
due_amount = due_amount
if months == 1:
self.create_child_record(due_amount,date_list)
else:
for i in range(1,months):
date=add_months(final_date,1)
date_list.append(date)
final_date=date
self.create_child_record(due_amount,date_list)
else:
modulo_value = flt(self.p_value)%cint(months)
monthly_amount = flt(flt(self.p_value - modulo_value)/cint(months))
amount_list = []
for i in range(0,months):
if i == months-1:
amount_list.append(flt(monthly_amount + modulo_value))
else:
amount_list.append(monthly_amount)
if months == 1:
self.create_child_record(due_amount,date_list)
else:
for i in range(1,months):
date=add_months(final_date,1)
date_list.append(date)
final_date=date
self.create_child1_record(amount_list,date_list)
def get_child_details_for_fixed_variable(self,months=None):
self.set('table_17', [])
date_list = []
start_date = getdate(self.start_date)
start_month = start_date.month
start_year = start_date.year
due_amount = flt(flt(self.fix_val)/cint(months))
new_date = self.fixed_pick_date +'-'+ cstr(start_month) + '-' + cstr(start_year)
final_date = getdate(datetime.datetime.strptime(cstr(new_date),'%d-%m-%Y'))
date_list.append(final_date)
self.check_project_value_for_fix_varialble(date_list,final_date,months,due_amount)
def check_project_value_for_fix_varialble(self,date_list,final_date,months,due_amount):
if self.pro_per == '30' or self.pro_per == '31':
months-=1
if flt(self.fix_val)%cint(months) == 0:
due_amount = due_amount
if months == 1:
self.create_child_record(due_amount,date_list)
if self.var_val:
ch = self.append('table_17', {})
ch.f_type='Variable'
ch.amount = self.var_val
else:
for i in range(1,months):
date=add_months(final_date,1)
date_list.append(date)
final_date=date
self.create_child_record(due_amount,date_list)
if self.var_val:
ch = self.append('table_17', {})
ch.f_type='Variable'
ch.amount = self.var_val
else:
modulo_value = flt(self.fix_val)%cint(months)
monthly_amount = flt(flt(self.fix_val - modulo_value)/cint(months))
amount_list = []
for i in range(0,months):
if i == months-1:
amount_list.append(flt(monthly_amount + modulo_value))
else:
amount_list.append(monthly_amount)
if months == 1:
self.create_child_record(due_amount,date_list)
if self.var_val:
ch = self.append('table_17', {})
ch.f_type='Variable'
ch.amount = self.var_val
else:
for i in range(1,months):
date=add_months(final_date,1)
date_list.append(date)
final_date=date
self.create_child1_record(amount_list,date_list)
if self.var_val:
ch = self.append('table_17', {})
ch.f_type='Variable'
ch.amount = self.var_val
def create_child_record(self,due_amount,date_list):
if(len(date_list)>0):
for i in date_list:
ch = self.append('table_17', {})
ch.f_type='Fixed'
ch.due_date = i
ch.amount = due_amount
def create_child1_record(self,amount_list,date_list):
if(len(date_list)>0):
for i, date in enumerate(date_list):
ch = self.append('table_17', {})
ch.f_type='Fixed'
ch.due_date = date
ch.amount = amount_list[i]
def clear_child_table(self):
self.set('table_17', [])
#if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):def get_permission_query_conditions(user):
def get_permission_query_conditions_for_customer(user):
if not user: user = frappe.session.user
"""
If the user type is mycfo user then show him only the customers that he is linked with.
"""
#pass
roles = frappe.get_roles(user)
if "Mycfo User" in roles and not user == "Administrator" and "Central Delivery" not in roles:
customer_list = frappe.db.sql("""SELECT DISTINCT(customer)
from `tabOperation And Project Commercial`
WHERE name in (SELECT parent from `tabOperation And Project Details` WHERE email_id ='{0}')""".format(user),as_list=1)
name_list = "', '".join([customer[0] for customer in customer_list])
return """(`tabCustomer`.name in ('{name_list}'))""".format(name_list=name_list)
def get_permission_query_conditions_for_project(user):
if not user: user = frappe.session.user
"""
If the user type is mycfo user then show him only the project that he is linked with.
"""
#pass
roles = frappe.get_roles(user)
if "Mycfo User" in roles and not user == "Administrator" and "Central Delivery" not in roles:
customer_list = frappe.db.sql("""SELECT DISTINCT(customer)
from `tabOperation And Project Commercial`
WHERE name in (SELECT parent from `tabOperation And Project Details` WHERE email_id ='{0}')""".format(user),as_list=1)
name_list = "', '".join([customer[0] for customer in customer_list])
return """(`tabProject Commercial`.customer in ('{name_list}'))""".format(name_list=name_list)
def get_permission_query_conditions_for_om(user):
if not user: user = frappe.session.user
"""
If the user type is mycfo user then show him only the operation matrix that he is linked with.
"""
#pass
roles = frappe.get_roles(user)
if "Mycfo User" in roles and not user == "Administrator" and "Central Delivery" not in roles:
customer_list = frappe.db.sql("""SELECT DISTINCT(operational_id)
from `tabOperation And Project Commercial`
WHERE name in (SELECT parent from `tabOperation And Project Details` WHERE email_id ='{0}')""".format(user),as_list=1)
name_list = "', '".join([customer[0] for customer in customer_list])
return """(`tabOperational Matrix`.name in ('{name_list}'))""".format(name_list=name_list)
def get_permission_query_conditions_for_kpi(user):
if not user: user = frappe.session.user
"""
If the user type is mycfo user then show him only the project that he is linked with.
"""
#pass
roles = frappe.get_roles(user)
if "Mycfo User" in roles and not user == "Administrator" and "Central Delivery" not in roles:
customer_list = frappe.db.sql("""SELECT DISTINCT(customer)
from `tabOperation And Project Commercial`
WHERE name in (SELECT parent from `tabOperation And Project Details` WHERE email_id ='{0}') and operational_matrix_status = 'Active' """.format(user),as_list=1)
name_list = "', '".join([customer[0] for customer in customer_list])
return """(`tabKPI`.customer in ('{name_list}'))""".format(name_list=name_list)
elif "Customer" in frappe.get_roles(user) and not user == "Administrator":
customer_list = frappe.db.sql(""" select defvalue from `tabDefaultValue`
where parenttype='User Permission' and defkey = 'Customer'
and parent = '{0}' """.format(user), as_list=1)
customer_list = "', '".join([customer[0] for customer in customer_list])
return """(`tabKPI`.customer in ('{name_list}') and `tabKPI`.kpi_status = 'Reviewed')""".format(name_list=customer_list)
|
shrikant9867/mycfo | mycfo/checklist/report/task_wise_analysis/task_wise_analysis.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
def execute(filters=None):
columns, data = [], []
columns = get_colums()
data = get_data(filters)
return columns, data
def get_data(filters):
if filters:
result = frappe.db.sql("""select title,checklist_name,expected_start_date,expected_end_date,end_date,count,tat from `tabChecklist Task`
{0} order by expected_start_date desc""".format(get_conditions(filters)),as_list=1,debug=1)
return result
else:
result = []
return result
def get_conditions(filters):
cond = ''
if filters.get('checklist_requisition') and filters.get('status') and filters.get('user'):
cond = "where project = '{0}' and status = '{1}' and user = '{2}'".format(filters.get('checklist_requisition'),filters.get('status'),filters.get('user'))
elif filters.get('checklist_requisition') and filters.get('status'):
cond = "where project = '{0}' and status = '{1}'".format(filters.get('checklist_requisition'),filters.get('status'))
elif filters.get('checklist_requisition') and filters.get('user'):
cond = "where project = '{0}' and user = '{1}'".format(filters.get('checklist_requisition'),filters.get('user'))
elif filters.get('status') and filters.get('user'):
cond = "where status = '{0}' and user = '{1}'".format(filters.get('status'),filters.get('user'))
elif filters.get('user'):
cond = "where user = '{0}'".format(filters.get('user'))
elif filters.get('checklist_requisition'):
cond = "where project = '{0}' ".format(filters.get("checklist_requisition"))
elif filters.get('status'):
cond = "where status='{0}'".format(filters.get("status"))
return cond
def get_colums():
columns = [_("Task Name") + ":Data:140"] + [_("Process/Checklist Name") + ":Data:180"] + [_("Planned Start Date") + ":Date:140"] + \
[_("Planned End Date") + ":Date:140"] + [_("Actual End Date") + ":Date:140"] + \
[_("Actual Time(In Hours)") + ":Int:170"] + [_("TAT(In Hours)") + ":Int:110"]
return columns
|
shrikant9867/mycfo | mycfo/mycfo/doctype/financial_data/test_financial_data.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Financial Data')
class TestFinancialData(unittest.TestCase):
pass
|
shrikant9867/mycfo | mycfo/trainings/doctype/answer_sheet/answer_sheet.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AnswerSheet(Document):
def before_save(self):
if self.answer_sheet_status == "Open":
self.calculate_score()
def before_submit(self):
self.validate_for_answer_sheet()
self.answer_sheet_status = "Closed"
self.update_training_subscription_form()
def validate_for_answer_sheet(self):
if self.answer_sheet_status != "Open":
frappe.throw("Answer Sheet with Open status are only allowed to submit.")
def calculate_score(self):
for row in self.table_5:
if row.marks_obtained > row.total_marks:
frappe.throw("Marks obtained for question {0} must be less than total marks.".format(row.idx))
if row.question_type == 'Objective':
row.marks_obtained = row.total_marks if row.user_answer == row.objective_answer else 0
self.marks_obtained = sum([row.marks_obtained for row in self.table_5 if row.marks_obtained])
self.percentage_score = round(float(self.marks_obtained) / float(self.total_marks) * 100, 2)
def update_training_subscription_form(self):
frappe.db.sql("update `tabTraining Subscription Approval` set request_status = 'Expired' where name = %s", (self.training_subscription))
def get_permission_query_conditions(user):
roles = frappe.get_roles()
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
return """(`tabAnswer Sheet`.assessment_evaluator = "{user}" )""".format(user = frappe.session.user) |
shrikant9867/mycfo | mycfo/trainings/doctype/training_questionnaire/training_questionnaire.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TrainingQuestionnaire(Document):
def validate(self):
self.validate_for_questions()
self.validate_for_objective_questions()
def validate_for_questions(self):
if not self.questionnaire:
frappe.throw("Questions are mandatory to save.")
def validate_for_objective_questions(self):
for row in self.questionnaire:
options = [ row.get(option).strip() for option in ["option_a", "option_b", "option_c", "option_d", "option_e"] if row.get(option)]
if row.question_type == "Objective":
if not row.option_a or not row.option_b:
frappe.throw("Option A and option B are mandatory for objective questions for row no {0}".format(row.idx))
if len(set(options)) != len(options):
frappe.throw("Duplicate answer not allowed for row no {0}".format(row.idx))
|
shrikant9867/mycfo | mycfo/config/trainings.py | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Training Approver",
"description": _("Approve Trainings from here.")
},
{
"type": "doctype",
"name": "Answer Sheet",
"description": _("Evaluate answer sheet.")
},
{
"type": "doctype",
"name": "Training Feedback",
"description": _("Check Training Feedback of users.")
},
{
"type": "doctype",
"name": "Training Subscription Approval",
"description": _("Approve training subscription requests.")
}
]
},
{
"label": _("Masters"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Training",
"description": _("Upload Training from here.")
},
{
"type": "doctype",
"name": "Assessment",
"description": _("Make Assessment for trainings.")
},
{
"type": "doctype",
"name": "Training Questionnaire",
"description": _("Set Training Feedback Questionnaire.")
}
]
},
{
"label": _("Training DB"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "training-dashboard",
"icon": "icon-sitemap",
"label": _("Training Search Dashboard"),
"route": "training-dashboard",
"description": _("Search or Assign trainings from here")
}
]
},
{
"label": _("Standard Reports"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "training-report",
"icon": "icon-sitemap",
"label": _("Graphical Reports"),
"description": _("Graphical Report for Trainings"),
}
]
}
] |
shrikant9867/mycfo | mycfo/kpi/doctype/kpi/kpi.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class KPI(Document):
def validate(self):
if not self.get("__islocal"):
total_weightage = sum([self.business_total_weightage, self.finance_total_weightage, self.people_total_weightage, self.process_total_weightage])
if total_weightage != 100:
frappe.throw("Total of Business weightage, Finance weightage, People weightage & Process weightage must be equal to 100. Currently, Total weightage equals to {0}.".format(total_weightage))
def after_insert(self):
email = frappe.db.get_value("Customer", self.customer, "email")
if not email:
frappe.msgprint("Please Set the Email ID for Customer {0}".format(self.customer))
else:
title = self.title_prefix + ' - ' + self.customer
send_kpi_notification(self, email, title)
send_kpi_notification_el(self, title)
def before_submit(self):
employee = frappe.db.get_value("Employee", {"user_id":frappe.session.user}, "name")
response = frappe.db.sql(""" select distinct(opd.user_name), emp.employee_name
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL")
and opd.user_name = '%s'
and opc.customer = '%s' """%(employee, self.customer), as_list=1)
roles = frappe.get_roles()
is_central = 0
if "Central Delivery" in roles:
is_central = 1
if is_central == 1:
pass
elif not(len(response)):
frappe.throw("Only EL Can submit KPI")
def on_submit(self):
css_doc = frappe.new_doc("Customer Satisfaction Survey")
css_doc.customer = self.customer
css_doc.start_date = self.start_date
css_doc.end_date = self.end_date
css_doc.kpi = self.name
css_doc.save(ignore_permissions=True)
frappe.msgprint("Customer Satisfaction Survey "+css_doc.name + " is created.")
@frappe.whitelist()
def get_kpi_resouce_assigned_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select distinct(opd.user_name), emp.employee_name
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL","EM","TM")
and opc.customer = %(customer)s
and (emp.name like %(txt)s
or emp.employee_name like %(txt)s)
limit 20
""", {
'txt': "%%%s%%" % txt,
'customer':filters.get("customer"),
"user":frappe.session.user}, as_list=1)
@frappe.whitelist()
def get_el_list(customer):
customer_list = frappe.db.sql("""SELECT DISTINCT(customer)
from `tabOperation And Project Commercial`,`tabOperation And Project Details`,`tabEmployee`
WHERE `tabOperation And Project Commercial`.name in
(SELECT parent from `tabOperation And Project Details` WHERE user_id = '{0}' and role ="EL")
and customer ='{1}'""".format(frappe.session.user,customer),as_list=1)
return len(customer_list)
def send_kpi_notification(doc, email, title):
template = "/templates/ip_library_templates/kpi_mail_notification_EL.html"
subject = "New KPI Added"
args = {"user_name":frappe.session.user, "file_name":doc.get("file_name"),"customer":doc.get("customer"),\
"title":title,"email":doc.get("email")}
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
def send_kpi_notification_el(doc, title):
template = "/templates/ip_library_templates/kpi_mail_notification_EL.html"
subject = "New KPI Added"
el = frappe.db.sql("""select distinct usr.email from `tabOperation Details` od join
`tabOperation And Project Commercial` topc
on od.parent=topc.operational_id
join `tabEmployee` emp
on emp.name = od.user_name
join `tabUser` usr
on usr.email = emp.user_id
where od.role='EL' and topc.customer=%s""",doc.get("customer"), as_dict=1)
el = [row.get("email") for row in el]
args = {"user_name":frappe.session.user, "file_name":doc.get("file_name"),\
"customer":doc.get("customer"),"title":title, "email":doc.get("email")}
if el:
frappe.sendmail(recipients=el, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
@frappe.whitelist()
def get_el(customer):
list_of_el = frappe.db.sql("""select distinct(opd.user_name), emp.employee_name
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL")
and opc.customer = "{0}"
and opc.operational_matrix_status = "Active" """.format(customer),as_list=1)
el_name = ""
for i in [e[1] for e in list_of_el]:
el_name += i + "\n"
return el_name
|
shrikant9867/mycfo | mycfo/mycfo/doctype/customer_skill_mapping/customer_skill_mapping.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from mycfo.kpi.doctype.skill_mapping.skill_mapping import get_sample_data
class CustomerSkillMapping(Document):
def update_skill_mapping_details(self, args):
self.set('skill_mapping_details', [])
for data in args.get('data'):
if data.get('industry')!=None:
smd = self.append('skill_mapping_details',{})
smd.skill = data.get('master_industry')
smd.sub_skill = data.get('industry')
smd.beginner = data.get('beginner')
smd.imtermediatory = data.get('imtermediatory')
smd.expert = data.get('expert')
smd.none_field = data.get('none_field')
self.save()
def before_insert(self):
if not len(self.skill_mapping_details):
skill_data = get_sample_data()
for data in skill_data.get("get_sample_data"):
smd = self.append('skill_mapping_details',{})
smd.skill = data[0]
smd.sub_skill = data[1]
@frappe.whitelist()
def get_sample_data_from_table(doc_name):
return {
"get_sample_data": frappe.db.sql("""select skill,sub_skill,none_field,beginner,imtermediatory,expert from `tabSkill Mapping Details` where parent='%s' order by skill asc, sub_skill asc"""%doc_name, as_list=1)
}
@frappe.whitelist()
def get_customer_skill_mapping(customer, group, segment):
if frappe.db.get_value("Customer Skill Mapping", customer, "name"):
csm = frappe.get_doc("Customer Skill Mapping", customer)
else:
csm = frappe.new_doc("Customer Skill Mapping")
csm.customer = customer
csm.customer_group = group
csm.customer_segment = segment
return csm.as_dict() |
shrikant9867/mycfo | mycfo/discussion_forum/report/discussion_forum_report/discussion_forum_report.py | <reponame>shrikant9867/mycfo
# Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns(filters)
data = get_data(filters)
return columns, data
def get_columns(filters):
if filters.get("report_type") == "Discussion Topic Report":
return [
_("Topic Posted By (User)") + ":Link/User:200",
_("Topic Name") + ":Data:250",
_("Discussion Category") + ":Link/Discussion Category:180",
_("Posted Datetime") + ":Datetime:200"
]
else:
return [
_("Topic Commentar (User)") + ":Link/User:200",
_("Topic Name") + ":Data:250",
_("Comment Count") + ":Int:150"
]
def get_data(filters):
if filters.get("report_type") == "Discussion Topic Report":
return frappe.db.sql(""" select owner,title,blog_category, creation
from `tabDiscussion Topic`
where creation between %(start_time)s and %(end_time)s """,
{"start_time":filters.get("start_time"), "end_time":filters.get("end_time")}, as_list=1)
else:
return frappe.db.sql(""" select com.comment_by, topic.title , count(com.name) from
`tabComment` com
join `tabDiscussion Topic` topic
on com.comment_docname = topic.name
where com.comment_doctype = "Discussion Topic" and com.comment_type = "Comment"
and com.creation between %(start_time)s and %(end_time)s
group by com.comment_by, com.comment_docname """,
{"start_time":filters.get("start_time"), "end_time":filters.get("end_time")}, as_list=1)
|
shrikant9867/mycfo | mycfo/mycfo/doctype/category/category.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.naming import make_autoname
from frappe.model.document import Document
class Category(Document):
def validate(self):
if self.is_child==0:
self.validate_parent_category_name()
if self.is_child==1:
self.validate_child_category_name()
self.validate_category_name()
def on_update(self):
pass
def validate_parent_category_name(self):
if not self.c_name:
frappe.msgprint("Category name is mandatory",raise_exception=1)
def validate_child_category_name(self):
if not self.name1:
frappe.msgprint("Category name is mandatory",raise_exception=1)
def validate_category_name(self):
if not self.parent_category:
frappe.msgprint("If you are creating a child record in 'Category Master' then Parent Category is mandatory",raise_exception=1)
def autoname(self):
if self.is_child==0:
self.name = self.c_name
else:
self.name = self.name1
|
shrikant9867/mycfo | mycfo/checklist/doctype/checklist_time_log/checklist_time_log.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from datetime import datetime
import itertools
import json
from frappe.utils import cstr, flt, get_datetime, get_time, getdate
from datetime import datetime, timedelta
class NegativeHoursError(frappe.ValidationError): pass
from frappe.model.document import Document
class ChecklistTimeLog(Document):
def validate(self):
self.set_status()
self.validate_timings()
self.validate_time_logs()
def on_submit(self):
self.update_task_and_project()
def on_cancel(self):
self.update_task_and_project()
# def update_task_and_project(self):
# if self.task:
# task = frappe.get_doc("Checklist Task", self.task)
# task.update_time()
# task.save()
# if self.requisition_id:
# requisition = frappe.get_doc("Checklist Requisition",self.requisition_id)
# requisition.update_checklist_requisition()
# requisition.save()
# elif self.project:
# frappe.get_doc("Checklist Requisition", self.project).update_project()
def before_update_after_submit(self):
self.set_status()
def before_cancel(self):
self.set_status()
def set_status(self):
self.status = {
0: "Draft",
1: "Submitted",
2: "Cancelled"
}[self.docstatus or 0]
def validate_timings(self):
if self.to_time and self.from_time and get_datetime(self.to_time) <= get_datetime(self.from_time):
frappe.throw(_("To Time must be greater than From Time"), NegativeHoursError)
def validate_time_logs(self):
old_time_log = frappe.db.sql("""select from_time from `tabChecklist Time Log` where task = '{0}'""".format(self.task),as_list=1)
first_from_date = old_time_log
if((len(first_from_date) > 1) and (get_datetime(self.from_time) <= get_datetime(first_from_date[0][0]))):
frappe.throw(_("From time For This Task Will Be Greater Than {0}".format(first_from_date[0][0])))
@frappe.whitelist()
def valid_dates(doc):
current_doc = json.loads(doc)
holiday_date = frappe.db.sql("""select holiday_date from `tabHoliday` h1,`tabHoliday List` h2
where h1.parent = h2.name and h2.name = 'Mycfo' order by holiday_date asc""",as_list=1)
chain = itertools.chain(*holiday_date)
holiday = list(chain)
to_date = ''
from_date = ''
if current_doc.get('from_time'):
from_time = datetime.strptime(current_doc.get('from_time'), '%Y-%m-%d %H:%M:%S')
from_date = from_time.date()
if from_date in holiday:
return "From Time In Holiday List"
if current_doc.get('to_time'):
to_time = datetime.strptime(current_doc.get('to_time'), '%Y-%m-%d %H:%M:%S')
to_date = to_time.date()
if to_date in holiday:
return "To Time In Holiday List"
@frappe.whitelist()
def valid_hours(doc):
current_doc = json.loads(doc)
if current_doc.get('from_time') and current_doc.get('to_time'):
from_date = datetime.strptime(current_doc.get('from_time'), '%Y-%m-%d %H:%M:%S')
to_date = datetime.strptime(current_doc.get('to_time'), '%Y-%m-%d %H:%M:%S')
holiday_count = frappe.db.sql("""select count(*) from `tabHoliday List` h1,`tabHoliday` h2
where h2.parent = h1.name and h1.name = 'Mycfo' and h2.holiday_date >= %s and h2.holiday_date <= %s""",(from_date.date(),to_date.date()),as_list=1)
return holiday_count[0][0]
|
shrikant9867/mycfo | mycfo/checklist/doctype/checklist_requisition/checklist_requisition.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import itertools
from frappe import _
import json
from frappe.model.naming import make_autoname
from frappe.utils import nowdate, getdate
from frappe.model.document import Document
from frappe.utils import now_datetime
from datetime import datetime, timedelta
class ChecklistRequisition(Document):
def autoname(self):
if self.checklist_status == "Open":
self.name = make_autoname(self.checklist_name.upper()+ ' - ' +'.#####')
def on_submit(self):
if(self.checklist_status != "Closed"):
frappe.throw(_("Checklist Requisition Status is Not Closed So Cannot Submit"))
def onload(self):
"""Load project tasks for quick view"""
if not self.get("cr_task"):
for task in self.get_tasks():
self.append("cr_task", {
"task_name": task.title,
"status": task.status,
"start_date": task.expected_start_date,
"end_date": task.expected_end_date,
"task_id": task.name,
"des": task.des,
"assignee": task.assignee,
"user":task.user,
"actual_end_date":task.end_date,
"count":task.count,
"depends_on_task":task.depends_on_task,
"tat":task.tat
# "actual_time":task.actual_time
})
# def on_update(self):
# self.get_closed_task()
def __setup__(self):
self.onload()
def get_tasks(self):
# print frappe.db.sql("""select * from `tabChecklist Task` where project= %s and checklist_task <> "NULL" """,(self.name),as_dict=1)
return frappe.get_all("Checklist Task", "*", {"project": self.name,"checklist_task":''}, order_by="expected_start_date asc")
def validate(self):
self.sync_tasks()
self.make_todo()
self.cr_task = []
def sync_tasks(self):
if self.flags.dont_sync_tasks: return
task_names = []
for t in self.cr_task:
if t.task_id:
task = frappe.get_doc("Checklist Task", t.task_id)
else:
task = frappe.new_doc("Checklist Task")
task.project = self.name
task.update({
# ct:cr
"title": t.task_name,
"status": t.status,
"expected_start_date": t.start_date,
"expected_end_date": t.end_date,
"des": t.des,
"assignee":t.assignee,
"user":t.assignee,
"to_be_processed_for":self.to_be_processed_for,
"process_description":self.process_description,
"checklist_name":self.checklist_name,
"depends_on_task":t.depends_on_task,
"tat":t.tat
})
task.flags.ignore_links = True
task.flags.from_project = True
task.save(ignore_permissions = True)
t.task_id = task.name
# def update_checklist_requisition(self):
# print "update_checklist_requisition"
# if self.cr_task:
# for task in self.cr_task:
# tl = frappe.db.sql("""select name,end_date from `tabChecklist Task` where name = '{0}'""".format(task.task_id),as_list=1)
# print tl[0][0],tl[0][1]
# update = frappe.db.sql("""update `tabRequisition Task` set actual_end_date = '{0}' where task_id ='{1}' and parent = '{2}' """.format(tl[0][1],tl[0][0],self.name))
# print update
# return update
def make_todo(self):
if self.flags.dont_make_todo: return
todo_names = []
for task in self.cr_task:
if task.task_id:
task_id = frappe.get_doc("Checklist Task", task.task_id)
todo = frappe.db.get_value("ToDo",{'reference_name':task.task_id,'reference_type':task_id.doctype},'name')
# todo = frappe.db.sql("""select name as name from `tabToDo` where reference_type = '{0}' and reference_name = '{1}'""".format(task_id.doctype,task.task_id),as_dict=1)
if todo:
todo1 = frappe.get_doc("ToDo",todo)
todo1.update({
# "role": task.assignee,
"reference_type": "Checklist Task",
"reference_name": task.task_id
})
else:
self.init_make_todo(task)
def init_make_todo(self,task):
todo = frappe.new_doc("ToDo")
todo.description = self.name
todo.update({
# ct:cr
# "role": task.assignee,
"reference_type": "Checklist Task",
"reference_name": task.task_id,
"owner": task.user,
"date":task.end_date
})
todo.flags.ignore_links = True
todo.flags.from_project = True
todo.save(ignore_permissions = True)
def get_tasks_detail(self):
checklist_doc = frappe.get_doc("Checklist",self.checklist_name)
checklist_list = []
for task in checklist_doc.get("task"):
checklist_list.append({'task_name':task.task_name,
'start_date':datetime.now().strftime("%Y-%m-%d"),
'end_date':self.get_end_date(task.tat),
'des':task.des,
'assignee':task.assignee,
'tat':task.tat,
'depends_on_task':task.depends_on_task})
return checklist_list
def get_end_date(self,tat):
due_date = datetime.now() + timedelta(hours=tat)
holiday = self.get_holiday(due_date)
tat_with_holiday = holiday*24 + tat
due_date_with_holiday = datetime.now() + timedelta(hours=tat_with_holiday)
return due_date_with_holiday.strftime("%Y-%m-%d")
def get_holiday(self,due_date):
tot_hol = frappe.db.sql("""select count(*) from `tabHoliday List` h1,`tabHoliday` h2
where h2.parent = h1.name and h1.name = 'Mycfo' and h2.holiday_date >= %s and h2.holiday_date <= %s""",(nowdate(),due_date.strftime("%Y-%m-%d")))
return tot_hol[0][0]
@frappe.whitelist()
def reopen_task(task_id):
try:
checklist_task = frappe.get_doc("Checklist Task",task_id)
checklist_task.status="Open"
checklist_task.save()
frappe.msgprint("Checklist Task Reopend")
return "reopened"
except Exception, e:
frappe.msgprint(e)
# @frappe.whitelist()
# def filter_user(doctype, txt, searchfield, start, page_len, filters):
# """
# filter users according to Role
# """
# user_list = frappe.db.sql("""select t1.email from `tabUser` t1,`tabUserRole` t2
# where t2.parent = t1.name and t2.role = '{0}'""".format(filters['assignee']),as_list =1)
# return user_list
@frappe.whitelist()
def list_view(name):
list_requisition = frappe.get_doc("Checklist Requisition",name)
counter = len(list_requisition.cr_task)
closed_count = len(filter(lambda x: x.status=="Closed",list_requisition.cr_task))
closed_task = "{1} / {0} Closed".format(counter,closed_count)
return closed_task
@frappe.whitelist()
def show_subtasks(task_id):
subtasks = frappe.db.get_values("Checklist Task",{"checklist_task":task_id},["title","expected_start_date","expected_end_date","status","assignee","des"],as_dict=True)
return subtasks |
shrikant9867/mycfo | mycfo/mycfo/doctype/el_sign_off_details/el_sign_off_details.py | <reponame>shrikant9867/mycfo<filename>mycfo/mycfo/doctype/el_sign_off_details/el_sign_off_details.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt, cstr, today, now_datetime, now
import datetime
class ELSignOffDetails(Document):
pass
@frappe.whitelist()
def get_info(customer, user):
user_name = frappe.db.get_values("User", {"name":user}, ['first_name', 'last_name'], as_dict=True)
if customer :
els = frappe.new_doc("EL Sign Off Details")
els.customer = customer
if user_name and user_name[0]['last_name']:
els.el_user = user_name[0]['first_name'] + " " + user_name[0]['last_name']
elif user_name and user_name[0]['first_name']:
els.el_user = user_name[0]['first_name']
els.user_id = user
els.sign_off_datetime = now()
els.flags.ignore_permissions = 1
els.save()
return "Sign Off Entry Created"
else:
return "Entry Not Created"
@frappe.whitelist()
def send_notification_to_el_sign_off():
from datetime import date
from calendar import monthrange
d = datetime.date.today()
year = d.year
check_day = cstr(d.month) + '-' + cstr(d.day)
quarter_dates = ["3-20","6-20","9-20","12-20"]
cust_dict = {}
cust_list = frappe.db.sql("""select name from `tabCustomer`""")
for c in cust_list:
cust = frappe.db.sql("""select od.email_id, opc.name from
`tabOperation And Project Commercial` opc, `tabOperation And Project Details` od
where od.role = "EL" and operational_matrix_status = "Active" and opc.name = od.parent
and opc.customer = '%s' """%(c),as_list=1)
el_user = []
for c1 in cust:
el_user.append(c1[0])
cust_dict[c[0]] = el_user
for q in quarter_dates:
if q == check_day :
if q == "3-20":
quarter = 1
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
elif q == "6-20":
quarter = 2
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
elif q == "9-20":
quarter = 3
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
else:
quarter = 4
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
date_of_first_day_of_quarter = date(year, first_month_of_quarter, 1)
date_of_last_day_of_quarter = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])
for cust in cust_dict:
sign_off_details = frappe.db.sql(""" select name from `tabEL Sign Off Details`
where sign_off_datetime between '%s' and '%s' and customer = '%s' """
%(date_of_first_day_of_quarter, date_of_last_day_of_quarter, cust), as_list=1)
if not sign_off_details:
msg = """Dear User, \n\n You have not Authenticated and Updated the Customer - '%s'
in current quarter. Please Sign Off respective customer before end of the
current quarter. \n\n Thank You. """%(cust)
frappe.sendmail(cust_dict[cust], subject="Sign Off Delay Notification",message=msg)
@frappe.whitelist()
def get_ps_checklist(customer):
if frappe.db.get_value("Post Sales Handover Checklist", customer, "name"):
psc = frappe.get_doc("Post Sales Handover Checklist", customer)
else:
psc = frappe.new_doc("Post Sales Handover Checklist")
psc.customer = customer
return psc.as_dict()
@frappe.whitelist()
def get_closure_checklist(customer):
if frappe.db.get_value("Closure Checklist", customer, "name"):
psc = frappe.get_doc("Closure Checklist", customer)
else:
psc = frappe.new_doc("Closure Checklist")
psc.customer = customer
return psc.as_dict()
|
shrikant9867/mycfo | mycfo/mycfo/report/login_report/login_report.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
format = "%H:%i"
return frappe.db.sql(""" select lo.user, count(lo.name), usr.last_login,
TIME_FORMAT(SEC_TO_TIME(AVG(TIME_TO_SEC(timediff(lo.log_out_time, lo.login_time) ))), %(format)s ) as avg_duration
from `tabLogin Log` lo
left join `tabUser` usr
on lo.user = usr.name
where ( lo.login_time between %(start_time)s and %(end_time)s )
or ( lo.log_out_time between %(start_time)s and %(end_time)s )
group by lo.user """, {"start_time":filters.get("start_time"),
"end_time":filters.get("end_time"), "format":format}, as_list = 1)
def get_columns():
return [
_("User") + ":Link/User:200",
_("Logged In Count") + ":Int:150",
_("Last Login") + ":Datetime:180",
_("Average Duration (HH:MM)") + ":Data:200"
] |
shrikant9867/mycfo | mycfo/checklist/report/checklist_wise_analysis/checklist_wise_analysis.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_colums()
data = get_data(filters)
return columns, data
def get_data(filters):
if filters:
result = frappe.db.sql("""select checklist_name,expected_start_date,end_date,count from `tabChecklist Requisition`
{0} order by expected_start_date desc""".format(get_conditions(filters)),as_list=1)
return result
else:
result = []
return result
def get_conditions(filters):
cond = ''
if filters.get('checklist') and filters.get("checklist_status"):
cond = "where name = '{0}' and checklist_status = '{1}'".format(filters.get('checklist'),filters.get('checklist_status'))
elif filters.get('checklist'):
cond = "where name ='{0}'".format(filters.get("checklist"))
elif filters.get("checklist_status"):
cond = "where checklist_status ='{0}'".format(filters.get("checklist_status"))
return cond
def get_colums():
columns = [_("Checklist") + ":Data:250"] + [_("Start Date") + ":Date:250"] + \
[_("End Date") + ":Date:250"] + [_("Actual Time(In Days)") + ":Int:150"]
return columns
|
shrikant9867/mycfo | mycfo/mycfo/page/project_commercial/project_commercial.py | import frappe
from frappe.model.document import Document
from frappe.utils import nowdate, cstr, flt, now, getdate, add_months
import datetime
import json
@frappe.whitelist()
def get_project_commercial_data(customer=None):
pc_name = frappe.db.sql("""select name from `tabProject Commercial` where customer='%s' order by creation desc"""%customer,as_list=1)
final_data = []
if len(pc_name)>0:
for name in pc_name:
pc_data = frappe.db.sql("""select * from `tabProject Commercial` where name='%s'"""%(name[0]),as_dict=1)
pc_child_table = frappe.db.sql("""select amount,due_date,f_type from `tabBillings` where parent='%s'"""%name[0],as_dict=1)
if pc_child_table:
if len(pc_child_table)>0:
pc_data[0]['child_records'] = pc_child_table
if len(pc_data)>0:
final_data.append(pc_data)
if len(final_data)>0:
return {"final_data": final_data}
|
shrikant9867/mycfo | mycfo/ip_library/doctype/ip_archiver/ip_archiver.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class IPArchiver(Document):
def validate(self):
frappe.db.sql(""" update `tabIP File` set request_type='Archive', file_status='Archive Pending' where name= '%s' """%frappe.db.escape(self.ip_file))
def before_submit(self):
self.validate_for_status()
self.set_current_status()
self.update_ip_file_status()
self.send_archive_notification()
def validate_for_status(self):
if not self.central_delivery_status:
frappe.throw("Central Delivery Status is mandatory to submit the docuemnt")
def set_current_status(self):
status_mapper = {"Approved":"Archived", "Rejected":"Rejected"}
self.current_status = status_mapper.get(self.central_delivery_status)
self.file_path = ""
def update_ip_file_status(self):
file_status_mapper = {"Archived":["Archived", 0], "Rejected":["Rejected by CD (Archive)", 1]}
query = " update `tabIP File` set file_status= '{0}' , published_flag = {1}, request_type = 'Archive' where name = '{2}' ".format(
file_status_mapper.get(self.current_status)[0], file_status_mapper.get(self.current_status)[1], frappe.db.escape(self.ip_file))
frappe.db.sql(query)
self.prepare_for_add_comment(file_status_mapper.get(self.current_status)[0])
def prepare_for_add_comment(self, file_status):
comment = "File status Changed to {0} for request type Archive.".format(file_status)
frappe.get_doc("IP File", self.ip_file).add_comment(comment)
def send_archive_notification(self):
template = "/templates/ip_library_templates/archive_notification.html"
subject = "IP Document Archive Notification"
email_recipients = frappe.db.get_values("User", {"name":["in", [self.archive_requester, self.ip_file_owner] ]}, ["email"], as_dict=1)
email = list(set([ recipient.get("email") for recipient in email_recipients if recipient.get("email") ] ))
args = {"status":self.central_delivery_status, "comments":self.central_delivery_comments, "file_name":self.file_name}
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
|
shrikant9867/mycfo | mycfo/trainings/doctype/training_approver/training_approver.py | <filename>mycfo/trainings/doctype/training_approver/training_approver.py
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TrainingApprover(Document):
def before_submit(self):
self.validate_for_central_delivery_status()
self.init_for_training_publishing()
def validate_for_central_delivery_status(self):
if not self.central_delivery_status:
frappe.throw("Central delivery status is mandatory to submit the document.")
def init_for_training_publishing(self):
mapper = {"Accepted":self.publish_training, "Rejected":self.reject_training}
mapper.get(self.central_delivery_status)()
def publish_training(self):
self.training_status = "Published"
training_data = self.get_training_data()
self.update_training_data(training_data)
self.update_assessment_evaluator()
self.add_comment_to_training()
template = "/templates/training_templates/training_published_notification.html"
self.send_mail(template)
def reject_training(self):
self.training_status = "Rejected"
self.update_training_data({"training_status":self.training_status})
self.add_comment_to_training()
template = "/templates/training_templates/training_rejection_notification.html"
self.send_mail(template)
def get_training_data(self):
return {
"description":self.description,
"document_type":self.document_type,
"validity_for_completion":self.validity_for_completion,
"training_status":self.training_status,
"industry":self.industry,
"skill_matrix_120":self.skill_matrix_120,
"skill_matrix_18":self.skill_matrix_18,
"evaluator":self.evaluator,
"evaluator_name":self.evaluator_name
}
def send_mail(self, template):
subject = "Training Document Notification"
first_nm, last_nm, email = frappe.db.get_value("User", {"name":self.training_author}, ["first_name", "last_name", "email"])
args = {"training_name":self.training_name, "cd":frappe.session.user, "status":self.training_status,
"first_name":first_nm, "last_name":last_nm , "comments":self.central_delivery_comments }
frappe.sendmail(recipients= email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
def update_training_data(self, training_data):
tr = frappe.get_doc("Training", {"name":self.training_name})
tr.update(training_data)
tr.save(ignore_permissions = True)
def add_comment_to_training(self):
comment = "Training Document {0} status changed to {1}".format(self.training_name, self.training_status)
frappe.get_doc("Training", self.training_name).add_comment(comment)
def update_assessment_evaluator(self):
asmt = frappe.get_doc("Assessment", self.assessment)
asmt.assessment_evaluator = frappe.db.get_value("Employee", {"name":self.evaluator}, 'user_id')
asmt.save(ignore_permissions=True) |
shrikant9867/mycfo | mycfo/patches/v1_0/update_user_link.py | <reponame>shrikant9867/mycfo
import frappe
def execute():
frappe.db.sql(""" update `tabTraining` set user = owner """)
frappe.db.sql(""" update `tabIP File` set user = owner """)
|
shrikant9867/mycfo | mycfo/config/ip_library.py | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "IP File",
"description": _("Upload IP File Content from here.")
},
{
"type": "doctype",
"name": "IP Approver",
"description": _("Publish/Republish IP File.")
},
{
"type": "doctype",
"name": "IP Download Approval",
"description": _("Approve Download request of IP File.")
},
{
"type": "doctype",
"name": "IP Archiver",
"description": _("Archive IP File from here.")
},
{
"type": "doctype",
"name": "IP File Feedback",
"description": _("Check IP File Feedback of users.")
}
]
},
{
"label": _("Masters"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Document Type",
"description": _("Document Type"),
},
{
"type": "doctype",
"name": "Skill Matrix 18",
"description": _("Skill Matrix 18"),
},
{
"type": "doctype",
"name": "Skill Matrix 120",
"description": _("Skill Matrix 120"),
},
{
"type": "doctype",
"name": "IP File Questionnaire",
"description": _("Set IP File Feedback Questionnaire.")
}
]
},
{
"label": _("IP Library"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "ip-file-dashboard",
"icon": "icon-sitemap",
"label": _("Global IP File Search Dashboard"),
"route": "ip-file-dashboard",
"description": _("Search Ip File from here")
}
]
},
{
"label": _("Standard Reports"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "ip-library-reports",
"icon": "icon-sitemap",
"label": _("Graphical Reports"),
"description": _("Graphical Report for Request and IP Library"),
}
]
}
] |
shrikant9867/mycfo | mycfo/mycfo/report/customer_operational_matrix_details/customer_operational_matrix_details.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_colums()
data = get_data(filters)
return columns, data
def get_data(filters):
result = frappe.db.sql("""select op.operational_id,op.operational_matrix_title,op.operational_matrix_status,op.customer,cu.pan_number,
cu.tan_number, cu.cin_number, od.role,emp.employee_name,od.email_id
from `tabOperation And Project Commercial` op,`tabCustomer` cu, `tabOperation Details` od,
`tabEmployee` emp
where cu.name = op.customer
and od.parent = op.operational_id
and emp.name = od.user_name
and od.role in ("EL","EM") order by op.operational_id """,as_list=1)
return result
def get_colums():
columns = [
_("Operational Matrix") + ":Link/Operational Matrix:120",
_("OM Title") + ":Data:200",
_("OM Status") + ":Data:100",
_("Customer") + ":Link/Customer:170",
_("PAN NO") + ":Data:100",
_("TAN NO") + ":Data:100",
_("CIN NO") + ":Data:170",
_("Role") + ":Data:80",
_("Employee Name") + ":Data:150",
_("Email Id") + ":Data:200"
]
return columns
|
shrikant9867/mycfo | mycfo/ip_library/doctype/ip_download_approval/ip_download_approval.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import now, add_days
from frappe.model.document import Document
class IPDownloadApproval(Document):
def validate(self):
self.set_current_status_of_request()
def set_current_status_of_request(self):
status_dict = {"Rejected":"Rejected by Approver", "Approved":"Approved by Approver"}
self.approval_status = status_dict.get(self.approver_status, "Pending")
def before_submit(self):
if self.approver: # this condition is to check if download requester is EL or not because EL requests are directly sunmmited by CD
self.validate_for_download_request_submission()
self.prepare_for_user_notification()
level_mapper = {"1-Level":self.set_approval_status_for_level1, "2-Level":self.set_approval_status_for_level2}
level_mapper.get(self.level_of_approval)()
else:
status_mapper = {"status":self.central_delivery_status, "comments":self.central_delivery_comments}
self.set_approval_status_for_level2()
self.init_for_send_mail(status_mapper)
def set_approval_status_for_level1(self):
status_dict = {"Rejected":"Rejected", "Approved":"Download Allowed"}
self.approval_status = status_dict.get(self.approver_status)
def set_approval_status_for_level2(self):
status_dict = {"Rejected":"Rejected", "Approved":"Download Allowed"}
self.approval_status = status_dict.get(self.central_delivery_status)
def prepare_for_user_notification(self):
args, email = self.get_requester_data()
level_mapper = {"1-Level":{"status":self.approver_status, "comments":self.approver_comments},
"2-Level":{"status":self.central_delivery_status, "comments":self.central_delivery_comments}}
status_mapper = level_mapper.get(self.level_of_approval)
self.init_for_send_mail(status_mapper)
def init_for_send_mail(self, status_mapper):
args, email = self.get_requester_data()
args.update(status_mapper)
self.send_notification("IP Document {0} {1}".format(self.file_name, args.get("status")), email,
"templates/ip_library_templates/download_request_approval.html",args)
def get_requester_data(self):
email, first_name, last_name = frappe.db.get_value("User", {"name":self.ip_file_requester}, ["email", "first_name", "last_name"])
args = {"file_name":self.file_name, "first_name":first_name, "last_name":last_name}
return args, email
def validate_for_download_request_submission(self):
roles = frappe.get_roles()
validate_mapper = {"1-Level":self.validate_for_level_1_document, "2-Level":self.validate_for_level_2_document}
validate_mapper.get(self.level_of_approval)(roles)
def validate_for_level_1_document(self, roles):
if "Central Delivery" in roles:
frappe.throw("Central Delivery is not allowed to submit download request of Level-1 document")
if not self.approver_status:
frappe.throw("Approver Status is mandatory to submit download request of Level-1 document")
def validate_for_level_2_document(self, roles):
if "Central Delivery" not in roles:
frappe.throw("Only Central Delivery is allowed to submit download request of Level-2 document")
self.validate_for_approval()
def validate_for_approval(self):
if self.central_delivery_status != self.approver_status:
frappe.throw(" Approver Status & Central Delivery Status must be same to submit document ")
def send_notification(self, subject, email, template, args):
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
def get_permission_query_conditions(user):
roles = frappe.get_roles()
emp_name = frappe.db.get_value("Employee",{"user_id":frappe.session.user}, "name")
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
cond = " where approver = '{0}' ".format(emp_name)
ip_files = frappe.db.sql(""" select name from `tabIP Download Approval` {0} """.format(cond), as_dict=1)
ip_files = "', '".join([ipf.get("name") for ipf in ip_files if ipf])
return """(`tabIP Download Approval`.name in ('{files}') )""".format(files = ip_files) |
shrikant9867/mycfo | mycfo/config/discussion_forum.py | <filename>mycfo/config/discussion_forum.py
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "discussion-forum",
"label": _("Discussion Forum"),
"description": _("Discussion Forum"),
},
{
"type": "doctype",
"name": "Discussion Topic",
},
{
"type": "doctype",
"name": "Topic Ratings",
},
]
},
{
"label": _("Setup"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Discussion Category",
"description": _("Discussion Category Type"),
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-star",
"items": [
{
"type": "report",
"name": "Discussion Forum Report",
"is_query_report": True,
"doctype": "Discussion Topic"
},
{
"type": "page",
"name": "discussion-forum-rep",
"icon": "icon-sitemap",
"label": _("Graphical Reports"),
"description": _("Graphical Report for Discussion Forum"),
}
]
}
]
|
shrikant9867/mycfo | mycfo/config/checklist.py | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Checklist Requisition",
"description": _("Run Checklist")
},
{
"type": "doctype",
"name": "Checklist Task",
"description": _("Task Details")
}
# {
# "type": "doctype",
# "name": "Checklist Time Log",
# "description": _("Time Log Details"),
# }
]
},
{
"label": _("Masters"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Checklist",
"description": _("Checklist Details"),
}
]
},
{
"label": _("Standard Reports"),
"icon": "icon-star",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Checklist Requisition Analysis",
"description": _("Process/Checklist Report"),
"doctype": "Checklist Requisition",
},
{
"type": "report",
"is_query_report": True,
"name": "Task Wise Analysis",
"description": _("Tasks Report"),
"doctype": "Checklist Task",
},
{
"type": "page",
"name": "checklist-report",
"icon": "icon-sitemap",
"label": _("Graphical Reports"),
"description": _("Graphical Report for Checklist"),
},
]
},
] |
shrikant9867/mycfo | mycfo/config/mycfo.py | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Customer",
"description": _("Customer Details")
},
{
"type": "doctype",
"name": "FFWW",
"description": _("FFWW Details"),
"label":_("FFWW"),
},
{
"type": "doctype",
"name": "Operational Matrix",
"description": _("Operational Matrix Details"),
},
{
"type": "doctype",
"name": "Project Commercial",
"description": _("Project Commercial Details"),
},
{
"type": "doctype",
"name": "Financial Data",
"description": _("Financial Details"),
},
{
"type": "doctype",
"name": "Customer Skill Mapping",
"description": _("Customer Skill Mapping Details"),
},
{
"type": "doctype",
"name": "KPI",
"description": _("KPI Details"),
},
{
"type": "doctype",
"name": "Customer Satisfaction Survey",
"description": _("Customer Satisfaction Survey Details"),
},
{
"type": "doctype",
"name": "Skill Mapping",
"description": _("Skill Mapping Details"),
}
]
},
{
"label": _("Masters"),
"icon": "icon-star",
"items": [
{
"type": "doctype",
"name": "Sector",
"description": _("Sector"),
},
{
"type": "doctype",
"name": "Service Type",
"description": _("Service Type"),
},
{
"type": "doctype",
"name": "Designation",
"description": _("Designation"),
},
{
"type": "doctype",
"name": "Industry",
"description": _("Industry Name"),
},
{
"type": "doctype",
"name": "Industry Group",
"description": _("Industry Group"),
},
{
"type": "doctype",
"name": "Sub Industry",
"description": _("Sub Industry Name"),
},
{
"type": "doctype",
"name": "Customer Segment",
"description": _("Customer Segments"),
},
{
"type": "doctype",
"name": "Category",
"description": _("Category"),
},
]
},
{
"label": _("Standard Reports"),
"icon": "icon-star",
"items": [
{
"type": "page",
"name": "project-commercial-report",
"icon": "icon-sitemap",
"label": _("Graphical Reports"),
"description": _("Graphical Report for Project Commercial")
},
{
"type": "report",
"name": "Login Report",
"is_query_report": True,
"doctype": "Login Log",
"description": _("Login details report.")
},
{
"type": "report",
"name": "Customer Skill Mapping Report",
"is_query_report": True,
"doctype": "Customer Skill Mapping",
"description": _("Customer Skill mapping report comprises total, sum & average of skill")
},
{
"type": "report",
"name": "Customer Skill Mapping",
"label": "Customer Skill Mapping Analysis",
"is_query_report": True,
"doctype": "Customer Skill Mapping",
"description": _("Customer skill maaping detailed analysis")
},
{
"type": "report",
"name": "Customer Operational Matrix",
"label": "Customer Operational Matrix",
"is_query_report": True,
"doctype": "Operation And Project Commercial",
"description": _("Customer Operational Matrix Details")
},
{
"type": "report",
"name": "EL Sign Off Report",
"label": "EL Sign Off Report",
"is_query_report": True,
"doctype": "EL Sign Off Details",
"description": _("EL Sign Off Details")
},
{
"type": "report",
"name": "Customer Operational Matrix Details",
"label": "Customer Operational Matrix Details",
"is_query_report": True,
"doctype": "Operation And Project Commercial",
"description": _("Customer Operational Matrix Details")
},
{
"type": "report",
"name": "Resource Pool",
"label": "Resource Pool",
"is_query_report": True,
"doctype": "Skill Mapping",
"description": _("Skill Mapping Report")
}
]
}
] |
shrikant9867/mycfo | mycfo/ip_library/doctype/ip_approver/ip_approver.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
import shutil
import subprocess
from frappe.utils import today, getdate
import subprocess
import shlex
import os
import json
class IPApprover(Document):
def validate(self):
if self.request_type != "Upgrade Validity" and self.approver: # This (self.approver) condition is to check whether the request is by EL or not.
self.check_if_file_rejected()
self.set_current_status_of_approval()
self.update_ip_file_status()
def check_if_file_rejected(self):
if self.approver_status == "Rejected" and self.file_rejected != "Yes":
approver = frappe.db.get_value("Employee", self.approver, ["user_id"])
self.process_data_before_notification(approver, self.approver_comments)
def set_current_status_of_approval(self):
if self.approver_status == "Approved":
self.current_status = "Approved by Approver"
elif self.approver_status == "Rejected":
self.current_status = "Rejected by Approver"
def update_ip_file_status(self):
file_status = self.get_file_status()
cond = " file_status = '{0}' ".format(file_status)
self.update_ip_file(cond)
if not self.comment_flag and self.approver_status:
self.init_for_add_comment(file_status)
self.comment_flag = self.approver_status
def process_data_before_notification(self, approver, comments):
self.file_rejected = "Yes"
args, email = self.get_requester_data()
appr_fnm, appr_lstnm = frappe.db.get_value("User", {"name":approver}, ["first_name", "last_name"])
approver_full_name = appr_fnm + " " + appr_lstnm if appr_lstnm else ""
args["approver"] = approver_full_name
args["comments"] = comments
self.send_notification("IP Document {0} Rejected".format(self.file_name), email,
"templates/ip_library_templates/user_notification.html",args)
def send_notification(self, subject, email, template, args):
frappe.sendmail(recipients=email, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
def before_submit(self):
self.validate_validity_end_date()
self.validate_for_central_delivery()
if self.request_type != "Upgrade Validity":
self.check_for_edit_and_new_request()
else:
self.check_for_validity_upgrade()
def validate_validity_end_date(self):
if getdate(self.validity_end_date) < getdate(today()):
frappe.throw("Validity End Date must be greater than Current Date.")
def validate_for_central_delivery(self):
if not self.central_delivery or not self.central_delivery_status:
frappe.throw("Central delivery & Central Delivery status is mandatory to submit document.")
def check_for_edit_and_new_request(self):
if self.central_delivery_status == "Approved":
extension = "." + self.file_extension if self.file_extension else ""
self.current_status = "Published"
self.init_update_ip_file(extension)
request_type = {"New":"Published", "Edit":"Republished"}
self.init_for_add_comment(request_type.get(self.request_type))
edited_file_path = frappe.get_site_path("public", self.file_path)
self.validate_file_path_exists(edited_file_path)
shutil.copy(edited_file_path, frappe.get_site_path("public", "files", "mycfo", "published_file", self.file_type, self.file_name + extension))
self.create_compatible_odf_fromat()
self.prepare_for_published_notification()
frappe.msgprint("Document {0} {1} successfully.".format(self.file_name, request_type.get(self.request_type)))
# if os.path.isfile(edited_file_path):
# os.remove(edited_file_path)
else:
self.current_status = "Rejected by CD"
request_type = {"New":["Rejected by CD", 0], "Edit":["Rejected by CD (Edit)", 1]}
ip_file_cond = """ file_status = "{0}", published_flag = {1}, new_file_path = "", approver_link= "" """.format(request_type.get(self.request_type)[0], request_type.get(self.request_type)[1])
self.update_ip_file(ip_file_cond)
self.init_for_add_comment(request_type.get(self.request_type)[0])
self.process_data_before_notification(self.central_delivery, self.central_delivery_comments)
def validate_file_path_exists(self, edited_file_path):
if not os.path.isfile(edited_file_path):
frappe.throw("File path not found.IP File can not be published.")
def check_for_validity_upgrade(self):
if self.central_delivery_status == "Approved":
self.current_status = "Published"
cond = " file_status = 'Validity Upgraded', validity_end_date= '{0}', request_type = 'Upgrade Validity' ".format(self.validity_end_date)
self.init_for_add_comment("Validity Upgraded")
else:
self.current_status = "Rejected by CD"
cond = " file_status = 'Rejected by CD (Validity)', request_type = 'Upgrade Validity' "
self.init_for_add_comment("Rejected by CD (Validity)")
self.update_ip_file(cond)
self.init_for_validity_notification()
def prepare_for_published_notification(self):
if "Central Delivery" not in frappe.get_roles(self.ip_file_requester): # condition to check if requester is central delivery or not
args, email = self.get_requester_data()
self.send_notification("IP Document {0} Published".format(self.file_name), email,
"templates/ip_library_templates/cd_upload_notification.html",args)
def get_requester_data(self):
email, first_name, last_name = frappe.db.get_value("User", {"name":self.ip_file_requester}, ["email", "first_name", "last_name"])
args = {"file_name":self.file_name, "first_name":first_name, "last_name":last_name}
return args, email
def init_update_ip_file(self, extension):
file_path = '/'.join(["files", "mycfo", "published_file", self.file_type, self.file_name + extension])
request_type = {"New":"Published", "Edit":"Republished"}
file_status = request_type.get(self.request_type)
ip_file_cond = self.get_updated_ip_file_cond(file_path, file_status)
self.update_ip_file(ip_file_cond)
def get_file_status(self):
if self.request_type == "New":
my_dict = {"Approved":"Approved by Approver", "Rejected":"Rejected by Approver"}
return my_dict.get(self.approver_status, "New Upload Pending")
elif self.request_type == "Edit":
my_dict = {"Approved":"Approved by Approver (Edit)", "Rejected":"Rejected by Approver (Edit)"}
return my_dict.get(self.approver_status, "Edit Pending")
def get_updated_ip_file_cond(self, file_path, file_status):
file_dict = {
"approver_link":"",
"new_file_path":"",
"skill_matrix_120":frappe.db.escape(self.skill_matrix_120),
"skill_matrix_18":frappe.db.escape(self.skill_matrix_18),
"industry":frappe.db.escape(self.industry),
"source":frappe.db.escape(self.source),
"description":frappe.db.escape(self.file_description) if self.file_description else "",
"validity_end_date":self.validity_end_date,
"security_level":self.level_of_approval,
"file_path":frappe.db.escape(file_path),
"file_status":file_status,
"uploaded_date":today(),
"published_flag":1,
"customer":frappe.db.escape(self.customer),
"file_approver":self.approver or "",
"employee_name":frappe.db.escape(self.employee_name)
}
cond = ""
cond_list = [ "{0} = '{1}' ".format(key, value) for key, value in file_dict.items()]
cond = ','.join(cond_list)
return cond
def update_ip_file(self, ip_file_cond):
query = """ update `tabIP File` set {0} where name = '{1}' """.format(ip_file_cond, frappe.db.escape(self.ip_file))
frappe.db.sql(query)
def init_for_add_comment(self, file_status):
comment = "File status Changed to {0} for request type {1}.".format(file_status, self.request_type)
frappe.get_doc("IP File", self.ip_file).add_comment(comment)
def init_for_validity_notification(self):
template = "/templates/ip_library_templates/upgrade_validity_notification.html"
subject = "IP Document Upgrade Validity Notification"
file_owner = frappe.db.get_value("IP File", {"name":self.ip_file}, 'owner')
email_recipients = frappe.db.get_values("User", {"name":["in", [self.ip_file_requester, file_owner] ]}, ["email"], as_dict=1)
email = list(set([ recipient.get("email") for recipient in email_recipients if recipient.get("email") ] ))
args = {"status":self.central_delivery_status, "comments":self.central_delivery_comments, "file_name":self.file_name}
self.send_notification(subject, email, template, args)
def create_compatible_odf_fromat(self):
mapper = self.get_extension_mapper()
xlsm_path = ""
edited_file_path = frappe.get_site_path("public", self.file_path)
if self.file_extension != "zip" and self.file_extension:
try:
extension = mapper.get(self.file_extension, "pdf")
viewer_path = frappe.get_site_path('/'.join(["public", "files", "mycfo", "published_file", self.file_type, self.file_name + "_viewer." + extension]))
file_path = '/'.join(["files", "mycfo", "published_file", self.file_type, self.file_name + "." + self.file_extension])
dir_path = ""
if self.file_extension == "pdf":
file_viewer_path = "assets/mycfo/ViewerJS/index.html#../../../../" + file_path
self.update_ip_file(" file_viewer_path = '%s' "%frappe.db.escape(file_viewer_path))
else:
file_path = frappe.get_site_path("public", file_path)
if extension != "html":
file_viewer_path = "assets/mycfo/ViewerJS/index.html#../../../../" + '/'.join(["files", "mycfo", "published_file", self.file_type, self.file_name + "_viewer." + extension])
else:
viewer_path = frappe.get_site_path('/'.join(["public", "files", "mycfo", "published_file", self.file_type, self.file_name, self.file_name + "_viewer." + extension]))
file_viewer_path = '/'.join(["files", "mycfo", "published_file", self.file_type, self.file_name, self.file_name + "_viewer." + extension])
dir_path = frappe.get_site_path("public", "files", "mycfo", "published_file", self.file_type, self.file_name)
if self.file_extension == "xlsm":
xlsm_path = file_path
file_path = frappe.get_site_path("public", '/'.join(["files", "mycfo", "published_file", self.file_type, self.file_name + "." + "xlsx"]))
args = ['unoconv', '-f', str(extension) , '-T', '9', '-o', str(viewer_path), str(file_path)]
self.schedule_conversion_in_cron({"dir_path":dir_path, "args":args, "file_viewer_path":file_viewer_path,
"file_extension":self.file_extension, "xlsm_path":xlsm_path,
"file_path":file_path, "edited_file_path":edited_file_path} )
except Exception, e:
frappe.throw(e)
def schedule_conversion_in_cron(self, data):
ipc = frappe.new_doc("IP File Converter")
ipc.ip_file = self.ip_file
ipc.ip_approver = self.name
ipc.dir_path = data.get("dir_path")
ipc.command = json.dumps(data.get("args"))
ipc.file_viewer_path = data.get("file_viewer_path")
ipc.file_extension = data.get("file_extension")
ipc.xlsm_path = data.get("xlsm_path")
ipc.file_path = data.get("file_path")
ipc.edited_file_path = data.get("edited_file_path")
ipc.save(ignore_permissions=True)
def get_extension_mapper(self):
return {"gif":"pdf", "jpg":"pdf", "jpeg":"pdf", "png":"pdf", "svg":"pdf",
"doc":"pdf", "docx":"pdf", "xls":"html", "xlsx":"html", "xlsm":"html",
"ppt":"pdf", "pptx":"pdf", "pdf":"pdf", "txt":"pdf", "csv":"ods"}
@frappe.whitelist()
def get_central_delivery_user(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""
select usr.name, usr.first_name
from `tabUserRole` rol
join `tabUser` usr
on rol.parent = usr.name
where rol.role = "Central Delivery"
and usr.name != 'Administrator'
and (usr.name like %(txt)s
or usr.first_name like %(txt)s)
limit 20
""",{'txt': "%%%s%%" % txt})
@frappe.whitelist()
def get_user_with_el_roles(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""
select
distinct emp.name, emp.employee_name
from `tabOperation And Project Commercial` opc
join `tabOperation And Project Details` opd
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
and opd.role = "EL"
and project_commercial = %(project)s
and (emp.name like %(txt)s
or emp.employee_name like %(txt)s )
limit 20 """, {"project":filters.get("project_id"), "txt": "%%%s%%" % txt})
def get_permission_query_conditions(user):
roles = frappe.get_roles()
emp_name = frappe.db.get_value("Employee",{"user_id":frappe.session.user}, "name")
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
cond = " where approver = '{0}' ".format(emp_name)
ip_files = frappe.db.sql(""" select name from `tabIP Approver` {0} """.format(cond), as_dict=1)
ip_files = "', '".join([ipf.get("name") for ipf in ip_files if ipf])
return """(`tabIP Approver`.name in ('{files}') )""".format(files = ip_files) |
shrikant9867/mycfo | mycfo/ip_library/page/ip_file_dashboard/ip_file_dashboard.py | from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint, time_diff_in_hours, now, time_diff, add_days
from frappe import _
import json
import math
import re
from mycfo.mycfo_utils import get_central_delivery
@frappe.whitelist()
def get_global_search_suggestions(filters):
query = """ select file_name from `tabIP File` where ( published_flag = 1 or file_status = 'Archived' )
and file_name like '%{0}%'
union select name from `tabSkill Matrix 18` where name like '%{0}%'
union select name from `tabSkill Matrix 120` where name like '%{0}%'
union select name from `tabDocument Type` where name like '%{0}%'
union select name from `tabCustomer` where name like '%{0}%'
union select name from `tabIndustry` where name like '%{0}%'
union select name from `tabIP Tags` where name like '%{0}%'
""".format(filters)
suggestions = frappe.db.sql(query, as_list=1)
security_levels = [ [level] for level in ["0-Level", "1-Level", "2-Level"] if re.search(filters, level, re.IGNORECASE)]
suggestions.extend(security_levels)
suggestions = [suggestion[0] for suggestion in suggestions]
return suggestions
@frappe.whitelist()
def get_published_ip_file(search_filters):
search_filters = json.loads(search_filters)
limit_query = "LIMIT 5 OFFSET {0}".format(search_filters.get("page_no") * 5 )
if search_filters['filters'] :
ip_file_filters = ','.join('"{0}"'.format(w) for w in search_filters['filters'] )
my_query = """ select
*
from
( select ipf.new_file_path, ipf.skill_matrix_120, ipf.file_name, ipf.file_extension, ipf.creation,
ipf.modified, ipf.file_status, ipf.owner, ipf.document_type, ipf.modified_by, ipf.published_flag, ipf.source,
ipf.security_level, ipf.docstatus, ipf.file_path, ipf.file_approver, ipf.description, ipf.skill_matrix_18,
ipf.validity_end_date, ipf.request_type, ipf.user, ipf.employee_name, ipf.file_viewer_path,
ipf.customer, ipf.name, ipf.industry, ipf.uploaded_date, ipf.approver_link
from `tabIP File` ipf
left join `tabIP File Tags` ipt
on ipt.parent = ipf.name
where ( ipf.published_flag = 1 or ipf.file_status = 'Archived' )
and ( ipf.skill_matrix_18 in ({0}) or ipf.file_name in ({0})
or ipf.security_level in ({0}) or ipf.customer in ({0})
or ipf.industry in ({0}) or ipf.skill_matrix_120 in ({0})
or ipf.document_type in ({0}) or ipt.ip_tags in ({0}) )
) as new_tbl
group by new_tbl.name order by new_tbl.uploaded_date desc
""".format(ip_file_filters)
else :
my_query = ip_file_search_without_filters()
total_records = get_total_records(my_query)
response_data = frappe.db.sql(my_query + limit_query, as_dict=True)
get_request_download_status(response_data)
total_pages = math.ceil(len(total_records)/5.0)
return response_data, total_pages
def get_total_records(my_query):
return frappe.db.sql(my_query.replace("*", "count(*) as count", 1), as_dict=1)
def ip_file_search_without_filters():
return """ select new_file_path, skill_matrix_120, file_name, file_extension, creation,
modified, file_status, owner, document_type, modified_by, published_flag, source,
security_level, docstatus, file_path, file_approver, description, skill_matrix_18,
validity_end_date, request_type, user, employee_name, file_viewer_path,
customer, name, industry, uploaded_date, approver_link from `tabIP File`
order by uploaded_date desc """
def get_sub_query_of_request_status(file_name):
return """ select ipd.approval_status, ipd.validity_end_date , ipd.name
from `tabIP Download Approval` ipd
where ipd.file_name = '{0}'
and ipd.ip_file_requester = '{1}'
and ipd.approval_status != 'Expired'
order by ipd.creation desc limit 1 """.format(frappe.db.escape(file_name), frappe.session.user)
def get_request_download_status(response_data):
for response in response_data:
sub_query = get_sub_query_of_request_status(response.get("file_name"))
result = frappe.db.sql(sub_query, as_dict=1)
response["download_validity_end_date"] = result[0].get("validity_end_date") if result else ""
response["approval_status"] = result[0].get("approval_status") if result else ""
response["download_form"] = result[0].get("name") if result else ""
get_comments_reviews(response)
@frappe.whitelist()
def get_latest_uploaded_documents(search_filters):
search_filters = json.loads(search_filters)
limit_query = "LIMIT 5 OFFSET {0}".format(search_filters.get("page_no") * 5 )
my_query = get_latest_query()
total_records = get_total_records(my_query)
response_data = frappe.db.sql(my_query + limit_query, as_dict=True)
get_request_download_status(response_data)
total_pages = math.ceil(total_records[0].get("count",0)/5.0)
return response_data, total_pages
def get_latest_query():
return """ select * from `tabIP File` ipf
where ipf.published_flag = 1
and DATEDIFF(CURDATE(), ipf.uploaded_date) < 15 order by uploaded_date desc """
@frappe.whitelist()
def get_latest_upload_count():
counts = {}
latest_query = get_latest_query()
counts["latest_records"] = get_total_records(latest_query)[0]["count"]
pending_requests_query = get_pending_request_query()
counts["pending_requests"] = len(frappe.db.sql(pending_requests_query, as_dict=1))
downloads_query = get_downloads_query()
counts["total_downloads"] = len(frappe.db.sql(downloads_query, as_dict=1))
return counts
def get_search_conditions(search_filters):
search_mapper = {"project_id":"ipf.project", "skill_matrix_18":"ipf.skill_matrix_18", "skill_matrix_120":"ipf.skill_matrix_120",
"security_level":"ipf.security_level", "file_name":"ipf.name"}
cond = "and "
for key,value in search_mapper.items():
cond += "{0} like '%{1}%' or".format(value, search_filters.get("filters"))
return cond
@frappe.whitelist()
def create_ip_file_feedback(request_data):
request_data = json.loads(request_data)
if not frappe.db.get_value("IP Review", {"user_id":frappe.session.user, "file_name":request_data.get("file_name")}, "name"):
ipr = frappe.new_doc("IP Review")
ipr.user_id = frappe.session.user
ipr.ratings = flt(request_data.get("ratings"))
ipr.comments = request_data.get("comments")
ipr.file_name = request_data.get("file_name")
ipr.save(ignore_permissions=True)
else:
ipr = frappe.get_doc("IP Review", {"user_id":frappe.session.user, "file_name":request_data.get("file_name")})
ipr.comments = request_data.get("comments")
ipr.ratings = flt(request_data.get("ratings"))
ipr.save(ignore_permissions=True)
@frappe.whitelist()
def create_ip_download_request(ip_file_name, customer, approver):
file_data = frappe.db.get_value("IP File", {"name":ip_file_name}, '*', as_dict=True)
check_for_existing_download_approval_form(file_data)
if not frappe.db.get_value("IP Download Approval", {"file_name":file_data.get("file_name"),
"ip_file_requester":frappe.session.user, "approval_status":"Pending"}, "name"):
ipa = frappe.new_doc("IP Download Approval")
ipa.file_name = file_data.get("file_name")
ipa.file_description = file_data.get("file_description")
ipa.file_type = file_data.get("file_type")
ipa.customer = customer
ipa.industry = file_data.get("industry")
ipa.department = file_data.get("department")
ipa.skill_matrix_18 = file_data.get("skill_matrix_18")
ipa.skill_matrix_120 = file_data.get("skill_matrix_120")
ipa.file_path = file_data.get("file_path")
ipa.approver = approver or ""
ipa.employee_name = frappe.db.get_value("Employee", {"name":approver}, 'employee_name') if approver else ""
ipa.ip_file_requester = frappe.session.user
ipa.level_of_approval = file_data.get("security_level")
ipa.approval_status = "Pending"
ipa.save(ignore_permissions=True)
prepare_for_todo_creation(file_data, approver,customer)
return "success"
def check_for_existing_download_approval_form(file_data):
idp_list = frappe.db.get_values("IP Download Approval", {"file_name":file_data.get("file_name"),
"ip_file_requester":frappe.session.user, "approval_status":"Download Allowed"}, 'name', as_dict=1)
idp_name = ','.join('"{0}"'.format(idp.get("name")) for idp in idp_list if idp)
if idp_name:
query = """ update `tabIP Download Approval`
set validity_end_date = null , approval_status = 'Expired',
modified = '{0}'
where name in ({1}) """.format(now(), idp_name)
frappe.db.sql(query)
def prepare_for_todo_creation(file_data, emp_id,customer):
users = []
user_id = frappe.db.get_value("Employee", emp_id, 'user_id') if emp_id else ""
users.append(user_id) if user_id else ""
if file_data.get("security_level") == "2-Level" or not user_id:
central_delivery = get_central_delivery()
users.extend(central_delivery)
make_todo(users, file_data,customer)
def get_user_with_el_roles(project):
result = frappe.db.sql(""" select distinct(emp.user_id)
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL")
and opc.project_commercial = %(project)s
""", {'project':project}, as_list=1)
result = [record[0] for record in result if record]
return result
def make_todo(users, file_data,customer):
template = "/templates/ip_library_templates/download_request_notification.html"
subject = "IP File Download Request"
for usr in users:
todo = frappe.new_doc("ToDo")
todo.description = "Approve the download request of user {0} for file {1}".format(frappe.session.user, file_data.get("file_name"))
todo.reference_type = "Customer"
todo.reference_name = file_data.get("customer")
todo.role = "EL"
todo.owner = usr
todo.status = "Open"
todo.priority = "High"
todo.save(ignore_permissions=True)
args = {"user_name":frappe.session.user, "file_name":file_data.get("file_name"),"customer":file_data.get("customer")}
frappe.sendmail(recipients=users, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
@frappe.whitelist()
def create_ip_download_log(file_name, download_form, validity):
start_download_validity_count_down(download_form, validity)
idl = frappe.new_doc("IP Download Log")
idl.user_id = frappe.session.user
idl.full_name = get_full_name_of_user()
idl.file_name = file_name
idl.downloaded_datetime = now()
idl.save(ignore_permissions=True)
def get_full_name_of_user():
first_name, last_name = frappe.db.get_value("User", {"name":frappe.session.user}, ["first_name", "last_name"])
return first_name + " " + last_name if last_name else first_name
@frappe.whitelist()
def get_my_download(search_filters):
search_filters = json.loads(search_filters)
download_query = get_downloads_query()
response_data, total_pages = prepare_response_data(search_filters, download_query)
return response_data, total_pages
def get_downloads_query():
return """ select ipf.*, ipd.name as download_form, ipd.file_name, ipd.approval_status, ipd.validity_end_date as download_validity from `tabIP Download Approval` ipd
join `tabIP File` ipf
on ipf.name = ipd.file_name
where ipd.approval_status="Download Allowed" and ipd.ip_file_requester='{0}'
order by ipd.creation desc """.format(frappe.session.user)
def get_request_status(response_data):
for response in response_data:
response["download_validity_end_date"] = response.get("download_validity", "")
response["approval_status"] = response.get("approval_status", "")
response["download_form"] = response.get("download_form", "")
get_comments_reviews(response)
@frappe.whitelist()
def get_my_pending_requests(search_filters):
search_filters = json.loads(search_filters)
pending_query = get_pending_request_query()
response_data, total_pages = prepare_response_data(search_filters, pending_query)
return response_data, total_pages
def prepare_response_data(search_filters, query):
response_data, total_pages = [], 0
limit_query = "LIMIT 5 OFFSET {0}".format(search_filters.get("page_no") * 5 )
total_records = len(frappe.db.sql(query, as_dict=1))
response_data = frappe.db.sql(query + limit_query, as_dict=True)
get_request_status(response_data)
total_pages = math.ceil(total_records/5.0)
return response_data, total_pages
def get_pending_request_query():
return """ select ipf.*, ipd.name as download_form, ipd.file_name, ipd.approval_status, ipd.validity_end_date as download_validity from `tabIP Download Approval` ipd
join `tabIP File` ipf
on ipf.name = ipd.file_name
where ipd.approval_status in ("Pending", "Approved by Approver", "Rejected by Approver") and ipd.ip_file_requester='{0}'
order by ipd.creation desc """.format(frappe.session.user)
@frappe.whitelist()
def get_customers_of_user(doctype, txt, searchfield, start, page_len, filters):
query = """ select distinct(opc.project_commercial) from
`tabOperation And Project Commercial` opc left join
`tabOperation And Project Details` opd
on opc.name = opd.parent
where opd.email_id = '{0}'
and opc.customer like '%{1}%' limit 20""".format(frappe.session.user, txt)
return frappe.db.sql(query, as_list=1)
def start_download_validity_count_down(ip_download_approver, validity_end_date):
if not validity_end_date and ip_download_approver:
validity_end_time = add_days(now(), 2)
frappe.db.sql("update `tabIP Download Approval` set validity_end_date=%s where name = %s",
(validity_end_time, ip_download_approver))
def get_comments_reviews(response):
response["download_count"] = frappe.get_list("IP Download Log", fields=["count(*)"], filters={ "file_name":response.get("file_name") }, as_list=True)[0][0]
response["avg_ratings"] = frappe.get_list("IP Review", fields=["ifnull(avg(ratings),0.0)"], filters={ "file_name":response.get("file_name") }, as_list=True)[0][0]
response["comments"] = frappe.db.sql(""" select ipr.user_id, ipr.comments, ipr.ratings, concat(usr.first_name , ' ',usr.last_name) as full_name
from `tabIP Review` ipr left join `tabUser` usr
on usr.name = ipr.user_id
where file_name = %s""",(response.get("file_name")),as_dict=1)
response["download_flag"] = frappe.db.get_value("IP Download Log", {"file_name":response.get("file_name"), "user_id":frappe.session.user}, "name")
response["panel_class"] = "panel panel-primary ip-file-panel" if response.get("published_flag") else "panel panel-archive ip-file-panel"
get_feed_back_questionnaire_form(response)
def get_feed_back_questionnaire_form(response):
cond_dict = {"user":frappe.session.user, "ip_file":response.get("file_name")}
if "Central Delivery" not in frappe.get_roles():
feedback = """ select ipd.name
from `tabIP Download Approval` ipd
where ipd.file_name = '{0}'
and ipd.ip_file_requester = '{1}'
and ipd.approval_status in ('Download Allowed', 'Expired')
order by ipd.creation desc limit 1 """.format(frappe.db.escape(response.get("file_name")), frappe.session.user)
fdbk_response = frappe.db.sql(feedback, as_dict=1)
if fdbk_response:
response["download_feedback_form"] = fdbk_response[0].get("name", "")
cond_dict.update({"ip_download_request":fdbk_response[0].get("name", "")})
response["feedback_form"] = frappe.db.get_value("IP File Feedback", cond_dict, "name")
def get_customer_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select name from `tabCustomer`
where name like %(txt)s limit 10 """, {"txt":"%%%s%%" % txt }, as_list=1)
@frappe.whitelist()
def validate_user_is_el(customer):
employee = frappe.db.get_value("Employee", {"user_id":frappe.session.user}, "name")
response = frappe.db.sql(""" select distinct(opd.user_name), emp.employee_name
from `tabOperation And Project Details` opd
join `tabOperation And Project Commercial` opc
on opd.parent = opc.name
join `tabEmployee` emp
on emp.name = opd.user_name
where opd.role in ("EL")
and opd.user_name = '%s'
and opc.operational_matrix_status = "Active"
and opc.customer = '%s' """%(employee, customer), as_list=1)
return {"is_el":1} if len(response) else {"is_el":0}
@frappe.whitelist()
def get_feedback_questionnaire():
qtns = frappe.get_all("IP Questionnaire", filters={"parent":"IP File Questionnaire", "status":1}, fields=["*"])
return qtns
@frappe.whitelist()
def create_feedback_questionnaire_form(answer_dict, download_request, ip_file):
answer_dict = json.loads(answer_dict)
fdbk = frappe.get_doc({
"doctype": "IP File Feedback",
"user":frappe.session.user,
"user_answers":answer_dict,
"ip_file":ip_file,
"ip_download_request":download_request or ""
})
fdbk.flags.ignore_permissions = True
fdbk.insert()
return "success" |
shrikant9867/mycfo | mycfo/discussion_forum/doctype/discussion_topic/discussion_topic.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from datetime import datetime, timedelta
from frappe.model.document import Document
from mycfo.mycfo_utils import get_mycfo_users
from frappe.utils import get_url
class DiscussionTopic(Document):
def after_insert(self):
email_ids = get_mycfo_users()
template = "/templates/discussion_forum_templates/topic_creation_notification.html"
owner = frappe.db.get_value("User", self.owner, ["concat(first_name, ' ', ifnull(last_name,'') )"])
args = {"owner" :owner, "subject":self.title, "category":self.blog_category, "host_url":get_url()}
frappe.sendmail(recipients=email_ids, sender=None, subject="New Discussion Topic Posted",
message=frappe.get_template(template).render(args))
def mail_topic_list():
template = "/templates/discussion_topics.html"
send_mail(template)
def send_mail(template):
subject = "Discussion Topic Notification"
now = datetime.now()
past = now - timedelta(days=1)
previous_day_topic = frappe.db.sql("""select t1.title,
(select employee_name from `tabEmployee` where name = t1.post_by)
as post_by
from `tabDiscussion Topic`t1
where t1.creation > '{1}'
and t1.creation < '{0}' """
.format(now,past),as_dict=1)
unanswer_topic = frappe.db.sql("""select t1.title,
(select employee_name from `tabEmployee` where name = t1.post_by)
as post_by
from `tabDiscussion Topic` t1
left join `tabComment` com
on t1.name = com.comment_docname
and com.comment_type = "Comment"
and com.comment_doctype = "Discussion Topic"
group by t1.title
having count(com.name) < 1
""",as_dict=1)
email = frappe.db.sql("""select email from `tabUser`""",as_list=1)
list_email = [e[0] for e in email]
args = {"p_day_topic":previous_day_topic if previous_day_topic else "","unans":unanswer_topic if unanswer_topic else ""}
frappe.sendmail(recipients= list_email, sender=None, subject=subject,
message=frappe.get_template(template).render(args)) |
shrikant9867/mycfo | mycfo/kpi/doctype/skill_mapping/skill_mapping.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt, getdate, nowdate, now_datetime
from frappe import msgprint, _
from frappe.utils import flt, getdate, nowdate
from datetime import date
import json
class SkillMapping(Document):
def update_skill_mapping_details(self, args):
self.set('skill_mapping_details', [])
for data in args.get('data'):
if data.get('industry')!=None:
nl = self.append('skill_mapping_details',{})
nl.skill = data.get('master_industry')
nl.sub_skill = data.get('industry')
nl.beginner = data.get('beginner')
nl.imtermediatory = data.get('imtermediatory')
nl.expert = data.get('expert')
nl.none_field = data.get('none_field')
self.save()
frappe.msgprint("Skill Mapping Details Saved")
def validate(self):
pass
@frappe.whitelist()
def get_sample_data():
return {
"get_sample_data": frappe.db.sql("""select skill_matrix_18,sub_skill,1 from `tabSkill Matrix 120` order by skill_matrix_18 asc, sub_skill asc""", as_list=1)
}
@frappe.whitelist()
def get_sample_data_from_table(doc_name):
return {
"get_sample_data": frappe.db.sql("""select skill,sub_skill,none_field,beginner,imtermediatory,expert from `tabSkill Mapping Details` where parent='%s' order by skill asc, sub_skill asc"""%doc_name, as_list=1)
}
|
shrikant9867/mycfo | mycfo/config/desktop.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
from frappe import _
def get_data():
roles = frappe.get_roles(frappe.session.user)
if 'Prospect' not in (roles):
return {
"mycfo": {
"color": "grey",
"icon": "icon-th",
"type": "module",
"label": _("Customer Details")
},
"Checklist":{
"color": "blue",
"icon": "icon-list",
"type": "module",
"label": _("Checklist")
},
"IP Library": {
"color": "#8e44ad",
"icon": "octicon octicon-database",
"type": "page",
"label": _("IP Library"),
"link":"ip-file-dashboard"
},
"Trainings":{
"color": "#4aa3df",
"icon": "octicon octicon-device-camera-video",
"type": "page",
"label": _("Trainings"),
"link":"training-dashboard"
},
"Discussion Forum": {
"color": "#8e44ad",
"icon": "octicon octicon-organization",
"type": "page",
"label": _("Discussion Forum"),
"link":"discussion-forum"
}
}
else:
return {
"Skill Mapping": {
"color": "grey",
"icon": "icon-th",
"type": "doctype",
"label": "Skill Mapping",
"link": "List/Skill Mapping",
"description": _("Skill Mapping Details"),
},
"Resource Pool":{
"color": "blue",
"icon": "icon-list",
"type": "page",
"label": _("Resource Pool"),
"link":"resourcepool"
}
}
|
shrikant9867/mycfo | mycfo/kpi/report/employee_skill_mapping/employee_skill_mapping.py | # Copyright (c) 2013, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
result = []
result = frappe.db.sql(""" select skmt.skill_matrix_18, skmt.name as skill_matrix_120,
sum(smd.none_field) as none_field, sum(smd.beginner) as beginner, sum(smd.imtermediatory) as intermediatory, sum(smd.expert) as expert
from `tabSkill Matrix 120` skmt
join `tabSkill Mapping Details` smd
on skmt.name = smd.sub_skill
and smd.parenttype = "Skill Mapping"
group by skmt.name order by skmt.skill_matrix_18 """, as_list=1)
return result
def get_columns():
return [
_("Skill Matrix 18") + ":Link/:200",
_("Skill Matrix 120") + ":Link/:300",
_("None") + ":Int:100",
_("Beginner") + ":Int:100",
_("Intermediatory") + ":Int:100",
_("Expert") + ":Int:100"
] |
shrikant9867/mycfo | mycfo/mycfo/doctype/ffww/ffww.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
#from erpnext.utilities.address_and_contact import load_address_and_contact
import json
class FFWW(Document):
def validate(self):
self.validate_designation()
self.validate_ffww()
self.validate_duplication_emailid()
# self.validate_dupicate_designation()
self.set_fww_name()
if self.contact:
self.update_contact_status()
def on_update(self):
# if any extra row is added in contact details child table same data will be reflected in contact doctype against same contact.
if self.get('more_contact_details'):
for d in self.get('more_contact_details'):
if d.ffww == 'New FFWW 1' or self.name:
if d.contact_name:
contact = frappe.get_doc("Contact Details", d.contact_name)
contact.ffww = self.name
contact.save()
else:
main_contact = frappe.get_doc('Contact',self.contact)
if not filter(lambda co:co.email_id == d.email_id, main_contact.contacts):
ch = main_contact.append('contacts', {})
ch.contact_type = d.contact_type
ch.country_name = d.country_name
ch.country_code = d.country_code
ch.mobile_no = d.mobile_no
ch.email_id = d.email_id
ch.landline = d.landline
ch.ffww = self.name
main_contact.save()
if ch.name:
ffww_contact = frappe.get_doc("FFWW Contact Details", d.name)
ffww_contact.contact_name = ch.name
ffww_contact.save()
if d.name and d.ffww == 'New FFWW 1':
ffww_contact = frappe.get_doc("FFWW Contact Details", d.name)
ffww_contact.ffww = self.name
ffww_contact.save()
def validate_designation(self):
if not self.get('designation'):
frappe.msgprint("At least one designation must be specified in designation child table",raise_exception=1)
def validate_ffww(self):
if frappe.db.sql("""select name from `tabFFWW` where customer='%s' and contact='%s' and name!='%s'"""%(self.customer,self.contact,self.name)):
name = frappe.db.sql("""select name from `tabFFWW` where customer='%s' and contact='%s'
and name!='%s'"""%(self.customer,self.contact,self.name),as_list=1)
frappe.msgprint("Customer %s already linked with contact %s in record %s"%(self.customer,self.contact,name[0][0]),raise_exception=1)
def validate_dupicate_designation(self):
designation_list = []
if self.get('designation'):
for d in self.get('designation'):
if d.designation not in designation_list:
designation_list.append(d.designation)
else:
frappe.msgprint("Duplicate designation name is not allowed",raise_exception=1)
break
def validate_duplication_emailid(self):
email_list = []
if self.get('more_contact_details'):
for d in self.get('more_contact_details'):
if d.email_id not in email_list:
email_list.append(d.email_id)
else:
frappe.msgprint("Duplicate Email ID is not allowed",raise_exception=1)
break
def update_contact_status(self):
contact = frappe.get_doc('Contact',self.contact)
contact.status = 'Active'
contact.save()
def set_fww_name(self):
self.ffww_record = self.name
def clear_child_table(self):
self.set('more_contact_details', [])
# Create address............................................
@frappe.whitelist()
def make_address(source_name, target_doc=None):
return _make_address(source_name, target_doc)
def _make_address(source_name, target_doc=None, ignore_permissions=False):
def set_missing_values(source, target):
pass
doclist = get_mapped_doc("FFWW", source_name,
{"FFWW": {
"doctype": "Address",
"field_map": {
"contact": "contact"
# "company_name": "customer_name",
# "contact_no": "phone_1",
# "fax": "fax_1"
}
}}, target_doc, set_missing_values, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_contact(contact=None):
contact_details = []
contact_details = frappe.db.get_values('Contact Details',{'parent':contact},['contact_type','email_id','mobile_no','country_code','ffww','name','country_name'])
if len(contact_details)>0:
return contact_details
else:
return contact_details
def get_active_customers(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, customer_name
from `tabCustomer` where customer_name like %(txt)s
or name like %(txt)s""",{"txt":"%%%s%%" % txt}, as_list=1)
def get_contact_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select name, email, mobile
from `tabContact`
where name like %(txt)s or email like %(txt)s
or mobile like %(txt)s """,{"txt": "%%%s%%" % txt}, as_list=1) |
shrikant9867/mycfo | mycfo/trainings/doctype/training/training.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe.model.document import Document
import zipfile
import json
import base64
import os
from mycfo.mycfo_utils import get_central_delivery
class Training(Document):
def validate(self):
self.validate_for_negative_completion_value()
self.validity_for_cd_users()
self.validate_for_training_data()
self.store_training_data()
def validate_for_negative_completion_value(self):
if self.validity_for_completion <= 0:
frappe.throw("Validity of training completion must be grater than 0 hours.")
def validity_for_cd_users(self):
if not len(get_central_delivery()):
frappe.throw("There are no Central Delivery users in system.Please upload training later.")
def validate_for_training_data(self):
if cint(self.get("__islocal")) and not self.training_file_data:
frappe.throw("Please Upload the Training documents.")
def store_training_data(self):
if self.training_file_data:
self.create_training_directory()
training_files = json.loads(self.training_file_data)
try:
zip_path = frappe.get_site_path("public", "files", "trainings", self.training_name + '.zip')
tr_zip = zipfile.ZipFile(zip_path, 'w')
for training in training_files:
base64_data = training.get("file_data").encode("utf8")
base64_data = base64_data.split(',')[1]
base64_data = base64.b64decode(base64_data)
tr_zip.writestr(training.get("file_name"), base64_data)
self.training_file_data = ""
self.training_path = '/'.join(["files", "trainings", self.training_name + '.zip'])
self.init_for_approver_form()
except Exception,e:
frappe.throw("Error Occured while storing training files")
finally:
tr_zip.close()
def create_training_directory(self):
if not os.path.exists(frappe.get_site_path("public", "files", "trainings")):
os.mkdir(frappe.get_site_path("public", "files", "trainings"))
def init_for_approver_form(self):
self.make_training_approver_form()
self.send_cd_notification()
self.add_comment("Training Document {0} status changed to pending".format(self.training_name))
def make_training_approver_form(self):
tr_appr = frappe.new_doc("Training Approver")
tr_appr.update(self.get_training_approver_data())
tr_appr.save(ignore_permissions=True)
self.training_status = "Pending"
def get_training_approver_data(self):
return {
"training_name":self.training_name,
"training_documents":self.training_documents,
"training_author":frappe.session.user,
"description":self.description,
"document_type":self.document_type,
"validity_for_completion":self.validity_for_completion,
"training_status":"Open",
"industry":self.industry,
"skill_matrix_120":self.skill_matrix_120,
"skill_matrix_18":self.skill_matrix_18,
"evaluator":self.evaluator,
"evaluator_name":self.evaluator_name,
"training_path":self.training_path,
"assessment":self.assessment
}
def send_cd_notification(self):
subject = "Training Document Notification"
template = "/templates/training_templates/cd_training_notification.html"
args = {"training_name":self.training_name, "user_name":frappe.session.user }
central_delivery = get_central_delivery()
frappe.sendmail(recipients=central_delivery, sender=None, subject=subject,
message=frappe.get_template(template).render(args))
def after_insert(self):
ass_mnt = frappe.get_doc("Assessment", self.assessment)
ass_mnt.training_name = self.name
ass_mnt.save(ignore_permissions=1)
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
roles = frappe.get_roles()
if "Central Delivery" not in roles and frappe.session.user != "Administrator":
return """(`tabTraining`.owner = '{user}' )""".format(user = frappe.session.user) |
shrikant9867/mycfo | mycfo/mycfo/doctype/ffww_details/ffww_details.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FFWWDetails(Document):
pass
modules = []
doctypes = []
folders = []
single_type = []
parent_list = []
docnames = []
response = []
@frappe.whitelist()
def load_address_and_contact(record,key,key1,customer):
ffww = frappe.db.get_value('FFWW',{'contact':record,'customer':customer},'name')
contact_details = frappe.db.sql("""select contact_type,email_id,mobile_no,country_code from `tabContact Details` where
parent='%s' and ffww='%s' and preffered=0"""%(record,ffww),as_dict=1)
personal_emailid = []
personal_mobileno = []
personal_code = []
official_emailid = []
official_mobileno = []
official_code = []
if len(contact_details)>0:
for i in contact_details:
if i['contact_type'] == 'Personal':
personal_emailid.append(i['email_id'])
personal_mobileno.append(i['mobile_no'])
personal_code.append(i['country_code'])
else:
official_emailid.append(i['email_id'])
official_mobileno.append(i['mobile_no'])
official_code.append(i['country_code'])
contact_list = frappe.get_all("Contact",
fields="*", filters={key:record})
from erpnext.utilities.doctype.address.address import get_address_display
if ffww:
addr_list = [a.update({"display": get_address_display(a)}) \
for a in frappe.get_all("Address",
fields="*", filters={key1: record,'ffww_record':ffww},
order_by="is_primary_address desc, modified desc")]
if len(contact_list)>0:
contact_list[0].update({'personal_emailid':personal_emailid})
contact_list[0].update({'personal_mobileno':personal_mobileno})
contact_list[0].update({'personal_code':personal_code})
contact_list[0].update({'official_emailid':official_emailid})
contact_list[0].update({'official_mobileno':official_mobileno})
contact_list[0].update({'official_code':official_code})
args = {'contact_list':contact_list}
else:
args = {'contact_list':''}
if len(addr_list)>0:
#args = {'address_list':address_list}
args['addr_list'] = addr_list
else:
args['addr_list'] = ''
if ffww:
args['ffww'] = ffww
else:
args['ffww'] = ''
if args:
return args
@frappe.whitelist()
def load_operational_data(doc,key):
doc = json.loads(doc)
if doc.get('doctype') == "Operational Matrix Details":
operational_matrix_list = frappe.get_all("Operational Matrix",
fields="*", filters={key: doc.get('customer')})
args = {'operational_matrix_list':operational_matrix_list}
return args
@frappe.whitelist()
def get_children():
args = frappe.local.form_dict
response = []
final_response = []
docn = {}
if args.get('parent') == 'Category':
single_types = frappe.db.sql("""Select distinct name from `tabCategory` where is_child=0""",as_dict=1)
[response.append({"value":d["name"],"expandable":1,"type":"Parent"}) for d in single_types]
[single_type.append(d["name"]) for d in single_types]
elif args.get('parent') in single_type:
child_list = frappe.db.sql("""select distinct contact from `tabFFWW` where customer = '%s' and name in (select parent from `tabFFWW Designation` where designation='%s')"""%(args['customer'],args.get('parent')),as_dict=1)
child_name = frappe.db.sql("""select name from `tabCategory` where is_child=1 and parent_category='%s'"""%args.get('parent'),as_dict=1)
[response.append({"value":d["contact"],"expandable":0,"type":"contact"}) for d in child_list]
[response.append({"value":i["name"],"expandable":1,"type":"child"}) for i in child_name]
[doctypes.append(d["name"]) for d in child_name]
elif args.get('parent') in doctypes:
doctypes_list = frappe.db.sql("""select distinct contact from `tabFFWW` where customer = '%s' and name in (select parent from `tabFFWW Designation` where designation='%s')"""%(args['customer'],args.get('parent')),as_dict=1)
[response.append({"value":d["contact"],"expandable":0,"type":"contact"}) for d in doctypes_list]
calculate_count_against_category(response, args)
return response
def calculate_count_against_category(response, args):
for record in response:
if record.get("expandable"):
mapper = {"Parent":calculate_contact_against_parent, "child":calculate_contact_against_child}
result = mapper[record.get("type")](record, args)
record["count"] = result[0].get("contact_count", 0) if result else 0
def calculate_contact_against_parent(record, args):
return frappe.db.sql("""select count(distinct contact) as contact_count from `tabFFWW` where customer = '%s' and name in
(select parent from `tabFFWW Designation` where designation in
( (select name from `tabCategory` where is_child=1 and parent_category='%s') , '%s')
)""" %(args['customer'], record.get('value'), record.get('value')),as_dict=1)
def calculate_contact_against_child(record, args):
return frappe.db.sql("""select count(distinct contact) as contact_count from `tabFFWW` where customer = '%s' and
name in (select parent from `tabFFWW Designation` where designation='%s')"""
%(args['customer'], record.get('value')),as_dict=1) |
MajorArkwolf/POTMBot | main.py | <reponame>MajorArkwolf/POTMBot
import discord
import asyncio
import os
from os.path import join, dirname
from dotenv import load_dotenv
guildID = 0000 # Not set, the guild ID of the server
execRoom = 0000 # Not set, room to post for admin selection
AnnRoom = 0000 # Not set, room to post for public
class MyClient(discord.Client):
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.TaskChecker())
async def TaskChecker(self):
await self.wait_until_ready()
i = 0
while not self.is_closed():
for file in os.listdir("./images/"):
await command.AutoEnd(file, client)
client = self.get_guild(guildID)
i = i + 1
if i == 5:
break;
await asyncio.sleep(10)
async def SubmitPlay(self, content):
# check how many submissions
# if already submitted override existing submission.
# insert / update
print("submitting play")
async def RemovePlay(self, content):
# check to see if a submission is entered
# remove if there
# notify what was removed if anything was removed
print("remove play")
async def MoveStateOn(self):
# look at current state and move it to the next state
print("advancing state")
async def PostSelection(self):
# post the current plays to chat
print("post to exec")
async def PostToPublic(self):
# post the current POTM to a public channel to vote.
async def on_message(self, message):
prefix = "!!"
defaultRoom = "agenda"
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith(prefix + "ping"):
if command.VerifyRole(message.author.id, message.guild.id, 0):
await message.author.send("Pong")
return
if message.content.startswith(prefix + "submit"):
print("SUBMIT")
if message.content.startswith(prefix + "help"):
print("HELP MENU")
if message.content.startswith(prefix + "rules"):
print("rules")
if message.content.startswith(prefix + "step"):
print("MOVE STEP ON")
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
TOKEN = os.environ.get("DISCORD_BOT_SECRET")
client = MyClient()
client.run(TOKEN)
|
pptemprdbms/Project | Project1/urls.py | <reponame>pptemprdbms/Project
"""Project1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from article.views import RSSFeed
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'my_blog.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'article.views.home', name = 'home'),
url(r'^(?P<id>\d+)/$', 'article.views.detail', name='detail'),
url(r'^archives/$', 'article.views.archives', name = 'archives'),
url(r'^aboutme/$', 'article.views.about_me', name = 'about_me'),
url(r'^tag(?P<tag>\w+)/$', 'article.views.search_tag', name = 'search_tag'),
url(r'^search/$','article.views.blog_search', name = 'search'),
url(r'^feed/$', RSSFeed(), name = "RSS"), #new added urlconf, nameed as RSS
) |
pptemprdbms/Project | article/templatetags/custom_markdown.py | <gh_stars>0
import markdown
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True) #register template filter
@stringfilter #filter string
def custom_markdown(value):
#extensions = ["nl2br", ]
return mark_safe(markdown.markdown(value,
extensions = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite'],
safe_mode=True,
enable_attributes=False))
# return mark_safe(markdown2.markdown(force_text(value),
# extras=["fenced-code-blocks", "cuddled-lists", "metadata", "tables", "spoiler"]))
|
pptemprdbms/Project | article/views.py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from article.models import Article
from datetime import datetime
from django.http import Http404
from django.contrib.syndication.views import Feed
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def home(request):
posts = Article.objects.all() #Get all objects from Article
paginator = Paginator(posts, 2) #show 2 blogs per page
page = request.GET.get('page')
try :
post_list = paginator.page(page)
except PageNotAnInteger :
post_list = paginator.page(1)
except EmptyPage :
post_list = paginator.paginator(paginator.num_pages)
return render(request, 'home.html', {'post_list' : post_list})
def detail(request, id):
try:
post = Article.objects.get(id=str(id))
except Article.DoesNotExist:
raise Http404
return render(request, 'post.html', {'post' : post})
def archives(request) :
try:
post_list = Article.objects.all()
except Article.DoesNotExist :
raise Http404
return render(request, 'archives.html', {'post_list' : post_list,
'error' : False})
def about_me(request) :
return render(request, 'aboutme.html')
def blog_search(request):
if 's' in request.GET:
s = request.GET['s']
if not s:
return render(request,'home.html')
else:
post_list = Article.objects.filter(title__icontains = s)
if len(post_list) == 0 :
return render(request,'archives.html', {'post_list' : post_list,
'error' : True})
else :
return render(request,'archives.html', {'post_list' : post_list,
'error' : False})
return redirect('/')
class RSSFeed(Feed) :
title = "RSS feed - article"
link = "feeds/posts/"
description = "RSS feed - blog posts"
def items(self):
return Article.objects.order_by('-date_time')
def item_title(self, item):
return item.title
def item_pubdate(self, item):
return item.add_date
def item_description(self, item):
return item.content |
smk762/dex_stats_pymongo | dex_stats/utils/enable_coins.py | from batch_params import enable_calls, electrum_calls
from adex_calls import batch_request
batch_request("http://127.0.0.1:7783", "testuser", enable_calls)
batch_request("http://127.0.0.1:7783", "testuser", electrum_calls)
|
smk762/dex_stats_pymongo | dex_stats/Observer.py | import os
import sys
import time
import pickle
import logging
from Parser import Parser
from datetime import datetime, timedelta
from watchdog.events import (FileCreatedEvent,
FileModifiedEvent,
FileSystemEventHandler)
from watchdog.observers import Observer as Watchdog
from watchdog.events import PatternMatchingEventHandler
from watchdog.utils.dirsnapshot import (DirectorySnapshot,
DirectorySnapshotDiff,
EmptyDirectorySnapshot)
class Observer(Watchdog):
def __init__(self,
mask=".json",
snap_path="/Users/dathbezumniy/kmd-qa/dex_stats-data/STATS/MAKER/",
*args, **kwargs):
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
self.snap_path = path
self.mask = mask
self.path = ""
Watchdog.__init__(self, *args, **kwargs)
def start(self):
'''
if os.path.exists(self.snap_path):
with open(self.snap_path, 'rb') as f:
pre_snap = pickle.load(f)
else:
'''
pre_snap = EmptyDirectorySnapshot()
for watcher, handler in self._handlers.items(): # dict { watcher: handler }
self.path = watcher.path # we have single watcher: handler item, thus it works
curr_snap = DirectorySnapshot(path)
diff = DirectorySnapshotDiff(pre_snap, curr_snap)
for h in handler:
for new_path in diff.files_created:
if self.mask in new_path:
h.dispatch(FileCreatedEvent(new_path))
'''
for mod_path in diff.files_modified:
if self.mask in mod_path:
h.dispatch(FileModifiedEvent(mod_path))
'''
Observer.start(self)
def stop(self):
snapshot = DirectorySnapshot(self.path)
with open(self.snap_path, 'wb') as f:
pickle.dump(snapshot, f, -1)
Watchdog.stop(self)
class MyHandler(FileSystemEventHandler):
def __init__(self):
self.started_at = datetime.now()
def on_created(self, event):
logging.debug(f'Event type: {event.event_type} path : {event.src_path}')
parser.insert_into_swap_collection(event.src_path)
'''
def on_modified(self, event):
if datetime.now() - self.started_at < timedelta(hours=1):
return
logging.debug(f'Event type: {event.event_type} path : {event.src_path}')
parser.insert_into_swap_collection(event.src_path)
'''
if __name__ == "__main__":
path = "/Users/dathbezumniy/kmd-qa/dex_stats-data/STATS/MAKER/"
snap_path = path + "backup.pickle"
parser = Parser(swaps_folder_path=path)
pattern = "*.json"
observer = Observer(snap_path=snap_path,
mask=".json")
observer.schedule(MyHandler(),
path,
recursive=False)
logging.debug("Starting observer")
observer.start()
logging.debug("Observer started")
try:
while True:
pass
except KeyboardInterrupt:
observer.stop()
finally:
parser.insert_into_parsed_files_collection()
parser.insert_into_unique_pairs_collection()
observer.join()
|
smk762/dex_stats_pymongo | dex_stats/utils/adex_calls.py | import json
import requests
def batch_request(node_ip, user_pass, requests_list):
r = requests.post(node_ip, json=requests_list)
return r
def get_orderbook(node_ip, user_pass, base, rel):
params = {
'userpass': <PASSWORD>,
'method': 'orderbook',
'base': base,
'rel': rel
}
r = requests.post(node_ip, json=params)
return json.loads(r.text)
|
smk762/dex_stats_pymongo | qa/pytest_utils/utils.py | <reponame>smk762/dex_stats_pymongo
import jsonschema
def validate_template(json: dict, schema: dict) -> bool:
try:
jsonschema.validate(instance=json, schema=schema)
except jsonschema.exceptions.ValidationError as e:
print("Validation failed :", e)
return False
return True
|
smk762/dex_stats_pymongo | dex_stats/utils/get_orderbooks.py | import itertools
from adex_tickers import tickers
from adex_calls import get_orderbook
# 45 tickers atm = 1980 pairs
possible_pairs = list(itertools.permutations(adex_tickers, 2))
for pair in possible_pairs:
print(get_orderbook("http://127.0.0.1:7783", "testuser", pair[0], pair[1]).text)
|
smk762/dex_stats_pymongo | dex_stats/utils/adex_tickers.py | adex_tickers = [
"AWC", "AXE", "BAT", "BCH", "BET",
"BOTS", "BTC", "BUSD", "CCL", "CHIPS",
"CRYPTO", "DAI", "DASH", "DEX", "DGB",
"DOGE", "ECA", "EMC2", "ETH", "FTC",
"HUSH", "ILN", "JUMBLR", "KMD", "LABS",
"LTC", "MCL", "MGW", "NAV",
"OOT", "PANGEA", "PAX", "QTUM", "REVS",
"RFOX", "RVN", "SUPERNET", "TUSD",
"USDC", "VRSC", "XZC", "ZEC", "ZER"
]
|
smk762/dex_stats_pymongo | dex_stats/utils/swap_events.py | <gh_stars>1-10
maker_swap_success_events = [
"Started",
"Negotiated",
"TakerFeeValidated",
"MakerPaymentSent",
"TakerPaymentReceived",
"TakerPaymentWaitConfirmStarted",
"TakerPaymentValidatedAndConfirmed",
"TakerPaymentSpent",
"Finished"
]
taker_swap_success_events = [
"Started",
"Negotiated",
"TakerFeeSent",
"MakerPaymentReceived",
"MakerPaymentWaitConfirmStarted",
"MakerPaymentValidatedAndConfirmed",
"TakerPaymentSent",
"TakerPaymentSpent",
"MakerPaymentSpent",
"Finished"
]
maker_swap_error_events = [
"StartFailed",
"NegotiateFailed",
"TakerFeeValidateFailed",
"MakerPaymentTransactionFailed",
"MakerPaymentDataSendFailed",
"TakerPaymentValidateFailed",
"TakerPaymentSpendFailed",
"MakerPaymentRefunded",
"MakerPaymentRefundFailed"
]
taker_swap_error_events = [
"StartFailed",
"NegotiateFailed",
"TakerFeeSendFailed",
"MakerPaymentValidateFailed",
"MakerPaymentWaitConfirmFailed",
"TakerPaymentTransactionFailed",
"TakerPaymentWaitConfirmFailed",
"TakerPaymentDataSendFailed",
"TakerPaymentWaitForSpendFailed",
"MakerPaymentSpendFailed",
"TakerPaymentWaitRefundStarted",
"TakerPaymentRefunded",
"TakerPaymentRefundFailed"
]
|
smk762/dex_stats_pymongo | app/main.py | import json
import logging
from fastapi import FastAPI
app = FastAPI()
@app.get('/api/v1/summary')
async def summary():
with open('data/summary.json') as f:
summary = json.load(f)
return summary
@app.get('/api/v1/ticker')
async def ticker():
with open('data/ticker.json') as f:
ticker = json.load(f)
return ticker
@app.get('/api/v1/orderbook/{market_pair}')
async def orderbook(market_pair: str = "ALL"):
with open('data/orderbook.json') as f:
orderbook = json.load(f)
try:
return orderbook[market_pair]
except KeyError:
return {'error': 'no such pair'}
@app.get('/api/v1/trades/{market_pair}')
async def trades(market_pair: str = "ALL"):
with open('data/trades.json') as f:
trades = json.load(f)
try:
return trades[market_pair]
except KeyError:
return {'error': 'no such pair'}
|
smk762/dex_stats_pymongo | dex_stats/ParserError.py | class Parser_Error(Exception):
pass
class ArgumentInputParserError(Parser_Error):
def __init__(self, expression, message):
self.expression = expression
self.message = message
|
smk762/dex_stats_pymongo | dex_stats/MongoAPI.py | <gh_stars>1-10
import os
import json
from pymongo import MongoClient
from datetime import datetime, timedelta
class MongoAPI:
def __init__(self):
self.client = MongoClient("mongodb://localhost:27017/")
self.db = self.client["swaps"]
self.swaps_collection = self.db.successful
self.trading_pairs = self.db.trading_pairs
def find_swap_by_uuid(self, uuid):
query = { "uuid" : uuid }
result = self.swaps_collection.find(query)
return dict(result)
def find_swaps_since_timestamp(self, timestamp):
query = { "events.0.event.data.started_at": {"$gt": timestamp} }
result = self.swaps_collection.find(query)
return list(result)
def find_swaps_for_market(self, maker_coin, taker_coin):
query = { "$and":[{ "events.0.event.data.maker_coin" : maker_coin },
{ "events.0.event.data.taker_coin" : taker_coin }]
}
result = self.swaps_collection.find(query)
return list(result)
def find_swaps_for_market_since_timestamp(self,
maker_coin,
taker_coin,
timestamp):
query = {"$and":[{ "events.0.event.data.maker_coin" : maker_coin },
{ "events.0.event.data.taker_coin" : taker_coin },
{ "events.0.event.data.started_at" : {"$gt" : timestamp} }]
}
result = self.swaps_collection.find(query)
return list(result)
def get_trading_pairs(self):
query = { 'data' : {'$exists' : 'true','$ne': {}} }
projection = { 'data' : 1 , '_id' : 0 }
result = self.trading_pairs.find_one(query, projection=projection)
return dict(result)['data']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.