max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tweetable/admin.py | kkiyama117/django-HP | 1 | 12764551 | from django.contrib import admin
from .models import *
class TrainAdmin(admin.ModelAdmin):
pass
admin.site.register(User)
admin.site.register(Tweet)
| 1.523438 | 2 |
language_apps/expr1/gen/testParser.py | SadraGoudarzdashti/IUSTCompiler | 3 | 12764552 | # Generated from D:/AnacondaProjects/iust_start/grammars\expr3.g4 by ANTLR 4.8
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\r")
buf.write("\64\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\3\2\3\2\3\2\3\2\3")
buf.write("\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\31\n\3\f\3")
buf.write("\16\3\34\13\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4")
buf.write("\'\n\4\f\4\16\4*\13\4\3\5\3\5\3\5\3\5\3\5\3\5\5\5\62\n")
buf.write("\5\3\5\2\4\4\6\6\2\4\6\b\2\2\2\65\2\n\3\2\2\2\4\17\3\2")
buf.write("\2\2\6\35\3\2\2\2\b\61\3\2\2\2\n\13\7\n\2\2\13\f\7\t\2")
buf.write("\2\f\r\5\4\3\2\r\16\7\2\2\3\16\3\3\2\2\2\17\20\b\3\1\2")
buf.write("\20\21\5\6\4\2\21\32\3\2\2\2\22\23\f\5\2\2\23\24\7\5\2")
buf.write("\2\24\31\5\6\4\2\25\26\f\4\2\2\26\27\7\6\2\2\27\31\5\6")
buf.write("\4\2\30\22\3\2\2\2\30\25\3\2\2\2\31\34\3\2\2\2\32\30\3")
buf.write("\2\2\2\32\33\3\2\2\2\33\5\3\2\2\2\34\32\3\2\2\2\35\36")
buf.write("\b\4\1\2\36\37\5\b\5\2\37(\3\2\2\2 !\f\5\2\2!\"\7\7\2")
buf.write("\2\"\'\5\b\5\2#$\f\4\2\2$%\7\b\2\2%\'\5\b\5\2& \3\2\2")
buf.write("\2&#\3\2\2\2\'*\3\2\2\2(&\3\2\2\2()\3\2\2\2)\7\3\2\2\2")
buf.write("*(\3\2\2\2+\62\7\n\2\2,\62\7\13\2\2-.\7\3\2\2./\5\4\3")
buf.write("\2/\60\7\4\2\2\60\62\3\2\2\2\61+\3\2\2\2\61,\3\2\2\2\61")
buf.write("-\3\2\2\2\62\t\3\2\2\2\7\30\32&(\61")
return buf.getvalue()
class testParser ( Parser ):
grammarFileName = "expr3.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "')'", "'+'", "'-'", "'*'", "'/'",
"'='", "<INVALID>", "<INVALID>", "<INVALID>", "'\n'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "Plus", "MINUS",
"MUL", "DIVIDE", "ASSIGN", "Id", "Number", "Whitespace",
"Newline" ]
RULE_start = 0
RULE_expr = 1
RULE_term = 2
RULE_fact = 3
ruleNames = [ "start", "expr", "term", "fact" ]
EOF = Token.EOF
T__0=1
T__1=2
Plus=3
MINUS=4
MUL=5
DIVIDE=6
ASSIGN=7
Id=8
Number=9
Whitespace=10
Newline=11
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Id(self):
return self.getToken(testParser.Id, 0)
def ASSIGN(self):
return self.getToken(testParser.ASSIGN, 0)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def EOF(self):
return self.getToken(testParser.EOF, 0)
def getRuleIndex(self):
return testParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStart" ):
return visitor.visitStart(self)
else:
return visitor.visitChildren(self)
def start(self):
localctx = testParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.enterOuterAlt(localctx, 1)
self.state = 8
self.match(testParser.Id)
self.state = 9
self.match(testParser.ASSIGN)
self.state = 10
self.expr(0)
self.state = 11
self.match(testParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return testParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class Rule_minusContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def MINUS(self):
return self.getToken(testParser.MINUS, 0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule_minus" ):
listener.enterRule_minus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule_minus" ):
listener.exitRule_minus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule_minus" ):
return visitor.visitRule_minus(self)
else:
return visitor.visitChildren(self)
class Rule_plusContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def Plus(self):
return self.getToken(testParser.Plus, 0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule_plus" ):
listener.enterRule_plus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule_plus" ):
listener.exitRule_plus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule_plus" ):
return visitor.visitRule_plus(self)
else:
return visitor.visitChildren(self)
class Rule3Context(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a testParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRule3" ):
listener.enterRule3(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRule3" ):
listener.exitRule3(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRule3" ):
return visitor.visitRule3(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = testParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 2
self.enterRecursionRule(localctx, 2, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
localctx = testParser.Rule3Context(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 14
self.term(0)
self._ctx.stop = self._input.LT(-1)
self.state = 24
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 22
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
localctx = testParser.Rule_plusContext(self, testParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 16
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 17
self.match(testParser.Plus)
self.state = 18
self.term(0)
pass
elif la_ == 2:
localctx = testParser.Rule_minusContext(self, testParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 19
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 20
self.match(testParser.MINUS)
self.state = 21
self.term(0)
pass
self.state = 26
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def fact(self):
return self.getTypedRuleContext(testParser.FactContext,0)
def term(self):
return self.getTypedRuleContext(testParser.TermContext,0)
def MUL(self):
return self.getToken(testParser.MUL, 0)
def DIVIDE(self):
return self.getToken(testParser.DIVIDE, 0)
def getRuleIndex(self):
return testParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerm" ):
listener.exitTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerm" ):
return visitor.visitTerm(self)
else:
return visitor.visitChildren(self)
def term(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = testParser.TermContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 4
self.enterRecursionRule(localctx, 4, self.RULE_term, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 28
self.fact()
self._ctx.stop = self._input.LT(-1)
self.state = 38
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 36
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
localctx = testParser.TermContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_term)
self.state = 30
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 31
self.match(testParser.MUL)
self.state = 32
self.fact()
pass
elif la_ == 2:
localctx = testParser.TermContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_term)
self.state = 33
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 34
self.match(testParser.DIVIDE)
self.state = 35
self.fact()
pass
self.state = 40
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,3,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class FactContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def Id(self):
return self.getToken(testParser.Id, 0)
def Number(self):
return self.getToken(testParser.Number, 0)
def expr(self):
return self.getTypedRuleContext(testParser.ExprContext,0)
def getRuleIndex(self):
return testParser.RULE_fact
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFact" ):
listener.enterFact(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFact" ):
listener.exitFact(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFact" ):
return visitor.visitFact(self)
else:
return visitor.visitChildren(self)
def fact(self):
localctx = testParser.FactContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_fact)
try:
self.state = 47
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [testParser.Id]:
self.enterOuterAlt(localctx, 1)
self.state = 41
self.match(testParser.Id)
pass
elif token in [testParser.Number]:
self.enterOuterAlt(localctx, 2)
self.state = 42
self.match(testParser.Number)
pass
elif token in [testParser.T__0]:
self.enterOuterAlt(localctx, 3)
self.state = 43
self.match(testParser.T__0)
self.state = 44
self.expr(0)
self.state = 45
self.match(testParser.T__1)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[1] = self.expr_sempred
self._predicates[2] = self.term_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 3)
if predIndex == 1:
return self.precpred(self._ctx, 2)
def term_sempred(self, localctx:TermContext, predIndex:int):
if predIndex == 2:
return self.precpred(self._ctx, 3)
if predIndex == 3:
return self.precpred(self._ctx, 2)
| 1.539063 | 2 |
scripts/stats_test.py | mtcrawshaw/meta-world | 4 | 12764553 | """
Script to analyze distribution of squared Euclidean distance between gradients.
"""
from math import sqrt
import numpy as np
from scipy import stats
# Set constants.
k_vals = [35, 30, 36]
n_vals = [1, 18, 1]
total_n = sum(n_vals)
sigma = 0.01
start_t = 200
t = 250
num_trials = 100
alpha = 0.05
load = "vecs.np"
reject_probs = []
outlier_count = 0
for m in range(num_trials):
max_k = max(k_vals)
vecs = np.zeros((t, total_n, max_k, 2))
if load is None:
start = 0
for k, n in zip(k_vals, n_vals):
vecs[:, start : start + n, :k, :] = np.random.normal(
scale=sigma, size=(t, n, k, 2)
)
start = start + n
else:
with open(load, "rb") as f:
vecs = np.load(f)
count = 0
for current_t in range(start_t, t):
z = []
start = 0
for k, n in zip(k_vals, n_vals):
# Compute expected distribution of sample means.
length_mu = 2 * k * (sigma ** 2)
length_sigma = 2 * sqrt(2 * k) * (sigma ** 2)
# Compute sample means and z-scores.
diffs = (
vecs[: current_t + 1, start : start + n, :, 0]
- vecs[: current_t + 1, start : start + n, :, 1]
)
lengths = np.linalg.norm(diffs, ord=2, axis=2) ** 2
sample_mean = np.mean(lengths, axis=0)
current_z = (sample_mean - length_mu) / (length_sigma / sqrt(current_t + 1))
z.append(current_z)
start = start + n
z = np.concatenate(z)
# Check sizes.
assert z.shape == (total_n,)
"""
# Compute QQ plot correlation coefficient
baseline = np.random.normal(size=z_sample_size)
sorted_z = np.sort(z)
sorted_baseline = np.sort(baseline)
_, _, r, p, _ = stats.linregress(sorted_z, sorted_baseline)
print("Correlation coefficient: %f" % r)
print("p-value: %f" % p)
print("")
"""
# Compare z-score distribution against standard normal.
s, p = stats.kstest(z, "norm")
if p < alpha:
count += 1
reject_prob = count / (t - start_t)
reject_probs.append(reject_prob)
if count > 0:
outlier_count += 1
"""
for outlier in outliers:
print("Total outliers: %d/%d" % (outlier, (t - start_t)))
"""
avg_reject_prob = sum(reject_probs) / len(reject_probs)
print("reject_probs: %s" % str(reject_probs))
print("avg reject_prob: %f" % avg_reject_prob)
print("num rejects: %d/%d" % (outlier_count, num_trials))
| 2.9375 | 3 |
OOPS/oops-intro.py | tverma332/python3 | 3 | 12764554 | import os
class Tomcat:
def get_details_for_each_tomcat(self,server_xml):
self.tcf = server_xml
self.th = os.path.dirname(os.path.dirname(server_xml))
return None
def display_details(self):
print(f'The tomcat config file is : {self.tcf} \nThe tomcat home is : {self.th}')
return None
def main():
tomcat7 = Tomcat()
tomcat9 = Tomcat()
tomcat7.get_details_for_each_tomcat("/home/Automation/tomcat7/conf/server.xml")
# The above line is same as:
# tomcat7.get_details_for_each_tomcat("tomcat7",/home/Automation/tomcat7/conf/server.xml")
tomcat9.get_details_for_each_tomcat("/home/Automation/tomcat9/conf/server.xml")
# The above line is same as:
# tomcat7.get_details_for_each_tomcat("tomcat9",/home/Automation/tomcat9/conf/server.xml")
print(tomcat9.tcf)
tomcat9.display_details()
# The above line is same as:
#display_details("tomcat9")
tomcat7.display_details()
return None
if __name__ == "__main__":
main() | 3.25 | 3 |
ephypype/compute_inv_problem.py | annapasca/ephypype | 18 | 12764555 | <filename>ephypype/compute_inv_problem.py
"""Inverse problem functions."""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne
import glob
import os.path as op
import numpy as np
from mne.io import read_raw_fif, read_raw_ctf
from mne import read_epochs
from mne.evoked import write_evokeds, read_evokeds
from mne.minimum_norm import make_inverse_operator, apply_inverse_raw
from mne.minimum_norm import apply_inverse_epochs, apply_inverse
from mne.beamformer import apply_lcmv_raw, make_lcmv
from mne import compute_raw_covariance, pick_types, write_cov
from nipype.utils.filemanip import split_filename as split_f
from .preproc import _create_reject_dict
from .source_estimate import _process_stc
def compute_noise_cov(fname_template, raw_filename):
"""
Compute noise covariance data from a continuous segment of raw data.
Employ empty room data (collected without the subject) to calculate
the full noise covariance matrix.
This is recommended for analyzing ongoing spontaneous activity.
Inputs
cov_fname : str
noise covariance file name template
raw_filename : str
raw filename
Output
cov_fname : str
noise covariance file name in which is saved the noise covariance
matrix
"""
# Check if cov matrix exists
cov_fname = _get_cov_fname(fname_template)
if not op.isfile(cov_fname):
er_raw, cov_fname = _get_er_data(fname_template)
if not op.isfile(cov_fname) and er_raw:
reject = _create_reject_dict(er_raw.info)
picks = pick_types(er_raw.info, meg=True,
ref_meg=False, exclude='bads')
noise_cov = compute_raw_covariance(er_raw, picks=picks,
reject=reject)
write_cov(cov_fname, noise_cov)
elif op.isfile(cov_fname):
print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
elif not er_raw:
cov_fname = compute_cov_identity(raw_filename)
else:
print(('*** NOISE cov file {} exists!!!'.format(cov_fname)))
return cov_fname
def _get_cov_fname(cov_fname_template):
"Check if a covariance matrix already exists."
# if a cov matrix exists returns its file name
for cov_fname in glob.glob(cov_fname_template):
if cov_fname.rfind('cov.fif') > -1:
return cov_fname
return ''
def _get_er_data(er_fname_template):
"Check if empty room data exists in order to compute noise cov matrix."
# if empty room data exists returns both the raw instance of the empty room
# data and the cov filename where we'll save the cov matrix.
for er_fname in glob.glob(er_fname_template):
print(('*** {} \n'.format(er_fname)))
if er_fname.rfind('.fif') > -1:
er_raw = read_raw_fif(er_fname)
cov_fname = er_fname.replace('.fif', '-raw-cov.fif')
elif er_fname.rfind('.ds') > -1:
cov_fname = er_fname.replace('.ds', '-raw-cov.fif')
er_raw = read_raw_ctf(er_fname)
return er_raw, cov_fname
return None, ''
def compute_cov_identity(raw_filename):
"Compute Identity Noise Covariance matrix."
raw = read_raw_fif(raw_filename)
data_path, basename, ext = split_f(raw_filename)
cov_fname = op.join(data_path, 'identity_noise-cov.fif')
if not op.isfile(cov_fname):
picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
noise_cov = mne.Covariance(np.identity(len(picks)), ch_names, bads,
raw.info['projs'], nfree=0)
write_cov(cov_fname, noise_cov)
return cov_fname
'''
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| Inverse desired | Forward parameters allowed | # noqa
+=====================+===========+===========+===========+=================+==============+
| | **loose** | **depth** | **fixed** | **force_fixed** | **surf_ori** |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Loose constraint, | 0.2 | 0.8 | False | False | True |
| | Depth weighted | | | | | |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Loose constraint | 0.2 | None | False | False | True |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Free orientation, | None | 0.8 | False | False | True |
| | Depth weighted | | | | | |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Free orientation | None | None | False | False | True | False |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Fixed constraint, | None | 0.8 | True | False | True |
| | Depth weighted | | | | | |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
| | Fixed constraint | None | None | True | True | True |
+---------------------+-----------+-----------+-----------+-----------------+--------------+
'''
def _compute_inverse_solution(raw_filename, sbj_id, subjects_dir, fwd_filename,
cov_fname, is_epoched=False, events_id=None,
condition=None, is_ave=False,
t_min=None, t_max=None, is_evoked=False,
snr=1.0, inv_method='MNE',
parc='aparc', aseg=False, aseg_labels=[],
all_src_space=False, ROIs_mean=True,
is_fixed=False):
"""
Compute the inverse solution on raw/epoched data and return the average
time series computed in the N_r regions of the source space defined by
the specified cortical parcellation
Inputs
raw_filename : str
filename of the raw/epoched data
sbj_id : str
subject name
subjects_dir : str
Freesurfer directory
fwd_filename : str
filename of the forward operator
cov_filename : str
filename of the noise covariance matrix
is_epoched : bool
if True and events_id = None the input data are epoch data
in the format -epo.fif
if True and events_id is not None, the raw data are epoched
according to events_id and t_min and t_max values
events_id: dict
the dict of events
t_min, t_max: int
define the time interval in which to epoch the raw data
is_evoked: bool
if True the raw data will be averaged according to the events
contained in the dict events_id
inv_method : str
the inverse method to use; possible choices: MNE, dSPM, sLORETA
snr : float
the SNR value used to define the regularization parameter
parc: str
the parcellation defining the ROIs atlas in the source space
aseg: bool
if True a mixed source space will be created and the sub cortical
regions defined in aseg_labels will be added to the source space
aseg_labels: list
list of substructures we want to include in the mixed source space
all_src_space: bool
if True we compute the inverse for all points of the s0urce space
ROIs_mean: bool
if True we compute the mean of estimated time series on ROIs
Outputs
ts_file : str
filename of the file where are saved the estimated time series
labels_file : str
filename of the file where are saved the ROIs of the parcellation
label_names_file : str
filename of the file where are saved the name of the ROIs of the
parcellation
label_coords_file : str
filename of the file where are saved the coordinates of the
centroid of the ROIs of the parcellation
"""
print(('\n*** READ raw filename %s ***\n' % raw_filename))
if is_epoched:
epochs = read_epochs(raw_filename)
info = epochs.info
elif is_ave:
evokeds = read_evokeds(raw_filename)
info = evokeds[0].info
else:
raw = read_raw_fif(raw_filename, preload=True)
info = raw.info
subj_path, basename, ext = split_f(raw_filename)
print(('\n*** READ noise covariance %s ***\n' % cov_fname))
noise_cov = mne.read_cov(cov_fname)
print(('\n*** READ FWD SOL %s ***\n' % fwd_filename))
forward = mne.read_forward_solution(fwd_filename)
# TODO check use_cps for force_fixed=True
if not aseg:
print(('\n*** fixed orientation {} ***\n'.format(is_fixed)))
# is_fixed=True => to convert the free-orientation fwd solution to
# (surface-oriented) fixed orientation.
forward = mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=is_fixed,
use_cps=False)
lambda2 = 1.0 / snr ** 2
# compute inverse operator
print('\n*** COMPUTE INV OP ***\n')
if is_fixed:
loose = 0
depth = None
pick_ori = None
elif aseg:
loose = 1
depth = None
pick_ori = None
else:
loose = 0.2
depth = 0.8
pick_ori = 'normal'
print(('\n *** loose {} depth {} ***\n'.format(loose, depth)))
inverse_operator = make_inverse_operator(info, forward, noise_cov,
loose=loose, depth=depth,
fixed=is_fixed)
# apply inverse operator to the time windows [t_start, t_stop]s
print('\n*** APPLY INV OP ***\n')
stc_files = list()
if is_epoched and events_id != {}:
if is_evoked:
stc = list()
if events_id != condition and condition:
events_name = condition
else:
events_name = events_id
evoked = [epochs[k].average() for k in events_name]
if 'epo' in basename:
basename = basename.replace('-epo', '')
fname_evo = op.abspath(basename + '-ave.fif')
write_evokeds(fname_evo, evoked)
for k in range(len(events_name)):
print(evoked[k])
stc_evo = apply_inverse(evoked[k], inverse_operator, lambda2,
inv_method, pick_ori=pick_ori)
print(('\n*** STC for event %s ***\n' % k))
print('***')
print(('stc dim ' + str(stc_evo.shape)))
print('***')
stc_evo_file = op.abspath(basename + '-%d' % k)
stc_evo.save(stc_evo_file)
stc.append(stc_evo)
stc_files.append(stc_evo_file)
else:
stc = apply_inverse_epochs(epochs, inverse_operator, lambda2,
inv_method, pick_ori=pick_ori)
elif is_epoched and events_id == {}:
stc = apply_inverse_epochs(epochs, inverse_operator, lambda2,
inv_method, pick_ori=pick_ori)
elif is_ave:
if events_id != condition and condition:
events_name = condition
else:
events_name = events_id
stc = list()
for evo, cond_name in zip(evokeds, events_name):
print(evo.comment, cond_name)
stc_evo = apply_inverse(evo, inverse_operator, lambda2,
inv_method, pick_ori=pick_ori)
print(('\n*** STC for event %s ***\n' % cond_name))
print('***')
print(('stc dim ' + str(stc_evo.shape)))
print('***')
stc_evo_file = op.join(subj_path, basename + '-%s' % cond_name)
print(stc_evo_file)
stc_evo.save(stc_evo_file)
stc.append(stc_evo)
stc_files.append(stc_evo_file)
else:
stc = apply_inverse_raw(raw, inverse_operator, lambda2, inv_method,
label=None,
start=None, stop=None,
buffer_size=1000,
pick_ori=pick_ori) # None 'normal'
ts_file, labels_file, label_names_file, label_coords_file = \
_process_stc(stc, basename, sbj_id, subjects_dir, parc, forward,
aseg, is_fixed, all_src_space=all_src_space,
ROIs_mean=ROIs_mean)
return ts_file, labels_file, label_names_file, \
label_coords_file, stc_files
def _compute_LCMV_inverse_solution(raw_filename, sbj_id, subjects_dir,
fwd_filename, cov_fname, parc='aparc',
all_src_space=False, ROIs_mean=True,
is_fixed=False):
"""
Compute the inverse solution on raw data by LCMV and return the average
time series computed in the N_r regions of the source space defined by
the specified cortical parcellation
Inputs
raw_filename : str
filename of the raw data
sbj_id : str
subject name
subjects_dir : str
Freesurfer directory
fwd_filename : str
filename of the forward operator
cov_filename : str
filename of the noise covariance matrix
parc: str
the parcellation defining the ROIs atlas in the source space
all_src_space: bool
if True we compute the inverse for all points of the s0urce space
ROIs_mean: bool
if True we compute the mean of estimated time series on ROIs
Outputs
ts_file : str
filename of the file where are saved the estimated time series
labels_file : str
filename of the file where are saved the ROIs of the parcellation
label_names_file : str
filename of the file where are saved the name of the ROIs of the
parcellation
label_coords_file : str
filename of the file where are saved the coordinates of the
centroid of the ROIs of the parcellation
"""
print(('\n*** READ raw filename %s ***\n' % raw_filename))
raw = read_raw_fif(raw_filename, preload=True)
subj_path, basename, ext = split_f(raw_filename)
print(('\n*** READ noise covariance %s ***\n' % cov_fname))
noise_cov = mne.read_cov(cov_fname)
print(('\n*** READ FWD SOL %s ***\n' % fwd_filename))
forward = mne.read_forward_solution(fwd_filename)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# compute data covariance matrix
# reject = _create_reject_dict(raw.info)
picks = pick_types(raw.info, meg=True, ref_meg=False, exclude='bads')
data_cov = mne.compute_raw_covariance(raw, picks=picks)
# compute LCMV filters
filters = make_lcmv(raw.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='normal',
weight_norm='nai', depth=0.8)
# apply spatial filter
stc = apply_lcmv_raw(raw, filters, max_ori_out='signed')
ts_file, labels_file, label_names_file, label_coords_file = \
_process_stc(stc, basename, sbj_id, subjects_dir, parc, forward,
False, is_fixed, all_src_space=False, ROIs_mean=True)
return ts_file, labels_file, label_names_file, \
label_coords_file
| 2.203125 | 2 |
tests/h/indexer/reindexer_test.py | ssin122/test-h | 0 | 12764556 | <filename>tests/h/indexer/reindexer_test.py
# -*- coding: utf-8 -*-
import mock
import pytest
from memex.search import client
from h.indexer.reindexer import reindex, SETTING_NEW_INDEX
@pytest.mark.usefixtures('BatchIndexer',
'configure_index',
'get_aliased_index',
'update_aliased_index',
'settings_service')
class TestReindex(object):
def test_sets_op_type_to_create(self, pyramid_request, es, BatchIndexer):
reindex(mock.sentinel.session, es, pyramid_request)
_, kwargs = BatchIndexer.call_args
assert kwargs['op_type'] == 'create'
def test_indexes_annotations(self, pyramid_request, es, batchindexer):
"""Should call .index() on the batch indexer instance."""
reindex(mock.sentinel.session, es, pyramid_request)
batchindexer.index.assert_called_once_with()
def test_retries_failed_annotations(self, pyramid_request, es, batchindexer):
"""Should call .index() a second time with any failed annotation IDs."""
batchindexer.index.return_value = ['abc123', 'def456']
reindex(mock.sentinel.session, es, pyramid_request)
assert batchindexer.index.mock_calls == [
mock.call(),
mock.call(['abc123', 'def456']),
]
def test_creates_new_index(self, pyramid_request, es, configure_index, matchers):
"""Creates a new target index."""
reindex(mock.sentinel.session, es, pyramid_request)
configure_index.assert_called_once_with(es)
def test_passes_new_index_to_indexer(self, pyramid_request, es, configure_index, BatchIndexer):
"""Pass the name of the new index as target_index to indexer."""
configure_index.return_value = 'hypothesis-abcd1234'
reindex(mock.sentinel.session, es, pyramid_request)
_, kwargs = BatchIndexer.call_args
assert kwargs['target_index'] == 'hypothesis-abcd1234'
def test_updates_alias_when_reindexed(self, pyramid_request, es, configure_index, update_aliased_index):
"""Call update_aliased_index on the client with the new index name."""
configure_index.return_value = 'hypothesis-abcd1234'
reindex(mock.sentinel.session, es, pyramid_request)
update_aliased_index.assert_called_once_with(es, 'hypothesis-abcd1234')
def test_does_not_update_alias_if_indexing_fails(self, pyramid_request, es, batchindexer, update_aliased_index):
"""Don't call update_aliased_index if index() fails..."""
batchindexer.index.side_effect = RuntimeError('fail')
try:
reindex(mock.sentinel.session, es, pyramid_request)
except RuntimeError:
pass
assert not update_aliased_index.called
def test_raises_if_index_not_aliased(self, es, get_aliased_index):
get_aliased_index.return_value = None
with pytest.raises(RuntimeError):
reindex(mock.sentinel.session, es, mock.sentinel.request)
def test_stores_new_index_name_in_settings(self, pyramid_request, es, settings_service, configure_index):
configure_index.return_value = 'hypothesis-abcd1234'
reindex(mock.sentinel.session, es, pyramid_request)
settings_service.put.assert_called_once_with(SETTING_NEW_INDEX, 'hypothesis-abcd1234')
def test_deletes_index_name_setting(self, pyramid_request, es, settings_service):
reindex(mock.sentinel.session, es, pyramid_request)
settings_service.delete.assert_called_once_with(SETTING_NEW_INDEX)
def test_deletes_index_name_setting_when_exception_raised(self, pyramid_request, es, settings_service, batchindexer):
batchindexer.index.side_effect = RuntimeError('boom!')
with pytest.raises(RuntimeError):
reindex(mock.sentinel.session, es, pyramid_request)
settings_service.delete.assert_called_once_with(SETTING_NEW_INDEX)
@pytest.fixture
def BatchIndexer(self, patch):
return patch('h.indexer.reindexer.BatchIndexer')
@pytest.fixture
def configure_index(self, patch):
return patch('h.indexer.reindexer.configure_index')
@pytest.fixture
def get_aliased_index(self, patch):
func = patch('h.indexer.reindexer.get_aliased_index')
func.return_value = 'foobar'
return func
@pytest.fixture
def update_aliased_index(self, patch):
return patch('h.indexer.reindexer.update_aliased_index')
@pytest.fixture
def batchindexer(self, BatchIndexer):
indexer = BatchIndexer.return_value
indexer.index.return_value = []
return indexer
@pytest.fixture
def es(self):
mock_es = mock.Mock(spec=client.Client('localhost', 'hypothesis'))
mock_es.index = 'hypothesis'
mock_es.t.annotation = 'annotation'
return mock_es
@pytest.fixture
def settings_service(self, pyramid_config):
service = mock.Mock()
pyramid_config.register_service(service, name='settings')
return service
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.tm = mock.Mock()
return pyramid_request
| 2.21875 | 2 |
fibonacci/test_fibonacci.py | codenameyau/python-recursion | 0 | 12764557 | import unittest
import fibonacci
class TestFibonacci(unittest.TestCase):
def test_fib(self):
self.assertEqual(fibonacci.fib(1), 1)
self.assertEqual(fibonacci.fib(2), 1)
self.assertEqual(fibonacci.fib(3), 2)
self.assertEqual(fibonacci.fib(4), 3)
self.assertEqual(fibonacci.fib(5), 5)
self.assertEqual(fibonacci.fib(6), 8)
self.assertEqual(fibonacci.fib(7), 13)
self.assertEqual(fibonacci.fib(8), 21)
def test_fib_rec(self):
self.assertEqual(fibonacci.fib_rec(1), 1)
self.assertEqual(fibonacci.fib_rec(2), 1)
self.assertEqual(fibonacci.fib_rec(3), 2)
self.assertEqual(fibonacci.fib_rec(4), 3)
self.assertEqual(fibonacci.fib_rec(5), 5)
self.assertEqual(fibonacci.fib_rec(6), 8)
self.assertEqual(fibonacci.fib_rec(7), 13)
self.assertEqual(fibonacci.fib_rec(8), 21)
def test_fib_binet(self):
self.assertEqual(fibonacci.fib_binet(1), 1)
self.assertEqual(fibonacci.fib_binet(2), 1)
self.assertEqual(fibonacci.fib_binet(3), 2)
self.assertEqual(fibonacci.fib_binet(4), 3)
self.assertEqual(fibonacci.fib_binet(5), 5)
self.assertEqual(fibonacci.fib_binet(6), 8)
self.assertEqual(fibonacci.fib_binet(7), 13)
self.assertEqual(fibonacci.fib_binet(8), 21)
if __name__ == '__main__':
unittest.main()
| 3.453125 | 3 |
academic_helper/migrations/0001_initial.py | asaf-kali/coursist | 6 | 12764558 | # Generated by Django 3.0.6 on 2020-05-25 13:25
from decimal import Decimal
import django.contrib.auth.models
import django.contrib.auth.validators
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0011_update_proxy_permissions"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="CoursistUser",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(blank=True, null=True, verbose_name="last login"),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
(
"first_name",
models.CharField(blank=True, max_length=30, verbose_name="first name"),
),
(
"last_name",
models.CharField(blank=True, max_length=150, verbose_name="last name"),
),
(
"email",
models.EmailField(blank=True, max_length=254, verbose_name="email address"),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined"),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
"abstract": False,
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name="Course",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("course_number", models.IntegerField(unique=True)),
("name", models.CharField(max_length=100, unique=True)),
("credits", models.IntegerField(default=0)),
],
options={
"ordering": ["course_number"],
},
),
migrations.CreateModel(
name="StudyBlock",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("name", models.CharField(max_length=50)),
("min_credits", models.IntegerField()),
("courses", models.ManyToManyField(to="academic_helper.Course")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="StudyPlan",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("name", models.CharField(max_length=50)),
("credits", models.IntegerField()),
("is_public", models.BooleanField(default=True)),
("blocks", models.ManyToManyField(to="academic_helper.StudyBlock")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ExtendedRating",
fields=[
(
"id",
models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID"),
),
("count", models.PositiveIntegerField(default=0)),
("total", models.PositiveIntegerField(default=0)),
(
"average",
models.DecimalField(decimal_places=3, default=Decimal("0"), max_digits=6),
),
("object_id", models.PositiveIntegerField(blank=True, null=True)),
(
"content_type",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
],
options={
"abstract": False,
"unique_together": {("content_type", "object_id")},
},
),
migrations.CreateModel(
name="CompletedCourse",
fields=[
("id", models.AutoField(editable=False, primary_key=True, serialize=False)),
("grade", models.IntegerField(blank=True, null=True)),
(
"block",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="academic_helper.StudyBlock"),
),
(
"course",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="academic_helper.Course"),
),
(
"user",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
],
options={
"unique_together": {("user", "course")},
},
),
]
| 1.96875 | 2 |
src/integrated_models/tim_veh/solve.py | railtoolkit/OpenLinTim | 0 | 12764559 | <filename>src/integrated_models/tim_veh/solve.py
from core.io.config import ConfigReader
from read_csv import *
import sys
from tim_veh_generic import TimVehGenericModel
from tim_veh_gurobi import TimVehGurobiModel
from tim_veh_helper import TimVehParameters
from vs_helper import TurnaroundData
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logger.info("Begin reading configuration")
if len(sys.argv) < 2:
logger.fatal("No config file name given to read!")
exit(1)
# Config
config = ConfigReader.read(sys.argv[1])
parameters = TimVehParameters(config)
logger.info("Finished reading configuration")
logger.info("Begin reading input data")
# Read PTN
ptn = Ptn()
read_ptn(ptn, parameters)
# Read line pool
line_pool = LinePool()
read_line_pool(parameters.line_concept_file_name, line_pool, ptn, parameters.directed,
restrict_to_frequency_1=False, read_line_concept=True)
# Read EAN
ean = Ean()
read_ean(parameters, ptn, line_pool, ean, parameters.set_starting_timetable)
logger.info("Finished reading input data")
logger.info("Begin execution of integrated timetabling vehicle scheduling model")
turnaround_data = TurnaroundData(ptn, parameters.vs_depot_index, parameters.vs_turn_over_time)
model = TimVehGenericModel(ean, line_pool, turnaround_data, parameters)
model.create_model()
model.solve()
logger.info("Finished execution of integrated timetabling vehicle scheduling model")
logger.info("Begin writing output data")
if model.is_feasible:
model.write_output()
logger.info("Finished writing output data") | 2.234375 | 2 |
tf_unet/scripts/rfi_launcher.py | abhineet123/river_ice_segmentation | 10 | 12764560 | # tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 28, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import glob
import click
from tf_unet import unet
from tf_unet import util
from scripts.radio_util import DataProvider
def create_training_path(output_path):
idx = 0
path = os.path.join(output_path, "run_{:03d}".format(idx))
while os.path.exists(path):
idx += 1
path = os.path.join(output_path, "run_{:03d}".format(idx))
return path
@click.command()
@click.option('--data_root', default="./bleien_data")
@click.option('--output_path', default="./daint_unet_trained_rfi_bleien")
@click.option('--training_iters', default=32)
@click.option('--epochs', default=100)
@click.option('--restore', default=False)
@click.option('--layers', default=5)
@click.option('--features_root', default=64)
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
print("Using data from: %s"%data_root)
data_provider = DataProvider(600, glob.glob(data_root+"/*"))
net = unet.Unet(channels=data_provider.channels,
n_class=data_provider.n_class,
layers=layers,
features_root=features_root,
cost_kwargs=dict(regularizer=0.001),
)
path = output_path if restore else create_training_path(output_path)
trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
path = trainer.train(data_provider, path,
training_iters=training_iters,
epochs=epochs,
dropout=0.5,
display_step=2,
restore=restore)
x_test, y_test = data_provider(1)
prediction = net.predict(path, x_test)
print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
if __name__ == '__main__':
launch() | 2.046875 | 2 |
HackerRank/10 Days of Statistics/Poisson Distribution I.py | will-data/Self-Study-WIL | 1 | 12764561 | <gh_stars>1-10
import math
p = float(input())
n = int(input())
factorial = lambda n: 1 if n==1 else n*factorial(n-1)
poisson = lambda p, n: p**n*math.exp(-p)/factorial(n)
print(poisson(p,n)) | 3.234375 | 3 |
menu.py | yanzi113/python | 0 | 12764562 | import pygame,sys
import random
import math
from pygame.locals import *
from pygame.sprite import Group
import gF
import Bullet
import DADcharacter
import Slave
import global_var
import Effect
import Item
import gameRule
class Menu():
def __init__(self):
super(Menu,self).__init__()
self.image=pygame.image.load('resource/title/menu.png').convert()
self.sign=global_var.get_value('menuSign')
self.shadow=global_var.get_value('menuShadow')
self.playerTitleImg=global_var.get_value('playerTitleImg')
self.kanjiLogo=global_var.get_value('kanjiLogo')
self.engLogo=global_var.get_value('engLogo')
self.lightLogo=global_var.get_value('lightLogo')
self.tachie=global_var.get_value('reimuLogo')
self.selectImg=global_var.get_value('menuSelectImg')
self.levelImg=global_var.get_value('levelImg')
self.font=pygame.font.SysFont('arial', 20)
self.selectNum=[0,0,0,0]
self.stairMax=[7,0,1,1]
self.menuStair=0 #0:main menu, 1 stage selection, 2 player selection, 3 practice menu
self.playerReset=False
self.lightStrength=0.0
self.logoPosAdj=[0,0]
self.lastFrame=0
self.testSpellNum=1
self.ifSpell=False
self.substract=False
self.plus=False
def update(self,screen,pressed_keys,pressed_keys_last,player):
self.lastFrame+=1
if self.lastFrame>360:
self.lastFrame=self.lastFrame%360
screen.blit(self.image,(0,0))
self.alterSelect(pressed_keys,pressed_keys_last)
self.drawSign(screen)
self.doSelection(pressed_keys,pressed_keys_last,player)
def alterSelect(self,pressed_keys,pressed_keys_last):
if self.menuStair!=2 and self.menuStair!=3:
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==2:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==3:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.testSpellNum-=1
self.substract=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.testSpellNum+=1
self.plus=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if self.testSpellNum>10:
self.testSpellNum=1
elif self.testSpellNum<1:
self.testSpellNum=10
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.ifSpell=False
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.ifSpell=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not self.ifSpell and self.testSpellNum==10:
if self.substract:
self.testSpellNum=9
elif self.plus:
self.testSpellNum=1
else:
self.ifSpell=True
self.substract=False
self.plus=False
if (pressed_keys[K_ESCAPE]!=pressed_keys_last[K_ESCAPE] and pressed_keys[K_ESCAPE]) or (pressed_keys[K_x]!=pressed_keys_last[K_x] and pressed_keys[K_x]):
if self.menuStair>0:
self.menuStair-=1
global_var.get_value('cancel_sound').play()
else:
if self.selectNum[0]!=7:
self.selectNum[0]=7
global_var.get_value('cancel_sound').play()
else:
global_var.get_value('cancel_sound').play()
sys.exit()
if self.selectNum[self.menuStair]>self.stairMax[self.menuStair]:
self.selectNum[self.menuStair]=0
elif self.selectNum[self.menuStair]<0:
self.selectNum[self.menuStair]=self.stairMax[self.menuStair]
def drawSign(self,screen):
if self.menuStair==0:
self.logoPosAdj=[math.sin(self.lastFrame*math.pi/180)*20,math.sin(self.lastFrame*0.5*math.pi/180)*5]
screen.blit(self.kanjiLogo,(100+self.logoPosAdj[0],30+self.logoPosAdj[1]))
self.lightStrength=0.5*math.sin(self.lastFrame*2*math.pi/180)+0.5
alpha=round(self.lightStrength*256)
self.lightLogo.set_alpha(alpha)
screen.blit(self.lightLogo,(100-5,164))
screen.blit(self.engLogo,(100,164))
screen.blit(self.tachie,(600,90))
for i in range(0,8):
if i!=self.selectNum[self.menuStair]:
screen.blit(self.shadow[i],(100,250+i*48))
else:
screen.blit(self.sign[i],(100,250+i*48))
elif self.menuStair==1:
screen.blit(self.selectImg[0],(40,10))
screen.blit(self.levelImg[0],(288,264))
elif self.menuStair==2:
if self.selectNum[0]==0 or self.selectNum[0]==2:
screen.blit(self.selectImg[1],(40,10))
for i in range(0,2):
self.playerTitleImg[i].set_alpha(256)
if self.selectNum[2]==0:
self.playerTitleImg[1].set_alpha(100)
elif self.selectNum[2]==1:
self.playerTitleImg[0].set_alpha(100)
for i in range(0,2):
screen.blit(self.playerTitleImg[i],(450*i,120))
elif self.menuStair==3:
if self.selectNum[0]==2:
if self.ifSpell:
pracText=self.font.render('Test: Start From Spell No.'+str(self.testSpellNum),True,(255,255,255))
else:
pracText=self.font.render('Test: Start From non-Spell No.'+str(self.testSpellNum),True,(255,255,255))
screen.blit(pracText,(200,300))
def doSelection(self,pressed_keys,pressed_keys_last,player):
if pressed_keys[K_z]!=pressed_keys_last[K_z] and pressed_keys[K_z]:
if self.menuStair==0:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==2:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==7:
global_var.get_value('ok_sound').play()
sys.exit()
else:
global_var.get_value('invalid_sound').stop()
global_var.get_value('invalid_sound').play()
elif self.menuStair==1:
if self.selectNum[0]==0 or self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==2:
if self.selectNum[0]==0:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',False)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True
if self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==3:
if self.selectNum[0]==2:
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',True)
global_var.set_value('ifSpellTest',self.ifSpell)
global_var.set_value('spellNum',self.testSpellNum)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True | 2.578125 | 3 |
dsi/expansions.py | mongodb/dsi | 9 | 12764563 | <gh_stars>1-10
"""
Glue between expansions.yml (written by convention in evergreen configs)
and DSI's internal config yamls.
"""
import os
import re
import typing as typ
from collections import namedtuple
import yaml
import structlog
_StrList = typ.List[str]
def _bootstrap_contents(exps: dict, ers: _StrList) -> dict:
"""
Contents of bootstrap.yml
:param exps: expansions loaded from expansions.yml
:param ers: list of errors (e.g. missing values)
:return: entries to write to bootstrap.yml
"""
out = dict()
_copy_keys(
exps,
out,
{
"infrastructure_provisioning": "cluster",
"platform": "platform",
"mongodb_setup": "setup",
"storageEngine": "storageEngine",
"test_control": "test",
"test_name": "test",
"authentication": "authentication",
},
ers,
)
out["production"] = True
out["auto_genny_workload"] = _get(exps, "auto_workload_path", ers, None)
out["infrastructure_provisioning"] = _get(exps, "cluster", ers)
out["overrides"] = {
"infrastructure_provisioning": {
# This is currently only used by initialsync-logkeeper.
# It is empty and not used for other tests.
"tfvars": {"mongod_seeded_ebs_snapshot_id": _get(exps, "snapshotId", ers, None)}
},
"workload_setup": {
"local_repos": {
"workloads": "../src/workloads/workloads",
"ycsb": "../src/YCSB/YCSB",
"linkbench": "../src/linkbench/linkbench",
"tpcc": "../src/tpcc/tpcc",
"genny": "../src/genny/genny",
},
},
"mongodb_setup": {
# # This is currently only used by initialsync-logkeeper-short.
# It is empty and not used for other tests.
"mongodb_dataset": _get(exps, "mongodb_dataset", ers, None),
"mongodb_binary_archive": _mongodb_binary_archive(exps, ers),
},
}
# Initially used by sb_large_scale to override
# expire-on-delta to allow longer runtime on otherwise
# standard variants
if "additional_tfvars" in exps:
vals = yaml.safe_load(exps["additional_tfvars"])
out["overrides"]["infrastructure_provisioning"]["tfvars"].update(vals)
# Do not run canary tests in patches
if _get(exps, "is_patch", ers, "false") == "true":
out["canaries"] = "none"
return out
def _runtime_contents(exps: dict, ers: _StrList) -> dict:
"""
Contents of runtime.yml
:param exps: expansions loaded from expansions.yml
:param ers: list of errors (e.g. missing values)
:return: entries to write to runtime.yml
"""
out = dict()
# evergreen default expansions
_copy_keys(
exps,
out,
{
"branch_name": "branch_name",
"build_id": "build_id",
"build_variant": "build_variant",
"project": "project",
"project_dir": "project_dir",
"revision": "revision",
"task_id": "task_id",
"task_name": "task_name",
"version_id": "version_id",
"workdir": "workdir",
"order": "revision_order_id",
},
ers,
)
out["is_patch"] = bool(_get(exps, "is_patch", ers, False))
out["execution"] = int(_get(exps, "execution", ers))
# sys-perf expansions
_copy_revision_keys(exps, out)
out["ext"] = _get(exps, "ext", ers, None)
out["script_flags"] = _get(exps, "script_flags", ers, None)
return out
def _runtime_secret_contents(exps: dict, ers: _StrList) -> dict:
"""
Contents of runtime_secret.yml
:param exps: expansions loaded from expansions.yml
:param ers: list of errors (e.g. missing values)
:return: entries to write to runtime_secret.yml
"""
out = dict()
_copy_keys(
exps,
out,
{
"aws_access_key": "terraform_key",
"aws_secret_key": "terraform_secret",
"perf_jira_user": "perf_jira_user",
"perf_jira_pw": "perf_jira_pw",
"dsi_analysis_atlas_user": "dsi_analysis_atlas_user",
"dsi_analysis_atlas_pw": "dsi_analysis_atlas_pw",
},
ers,
)
return out
_SLOG = structlog.get_logger(__name__)
_SECRET_VARS_KEYS_MATCHERS = [
re.compile(line)
for line in [
# Known "legacy" expansions set in project settings:
r"^aws_secret$",
r"^dsi_analysis_atlas_pw$",
r"^evergreen_api_key$",
r"^perf_jira_pw$",
r"^ec2_pem$",
r"^terraform_secret$",
r"^atlas_api_private_key$",
r"^atlas_database_password$",
# From DSI's self-tests
r"^evergreen_token$",
r"^github_token$",
r"^global_github_oauth_token$",
# Conventions:
r"^SECRET_.*$",
]
]
"""
When displaying expansions, entries with keys matching these regexes are considered "secret",
and their values need to be redacted.
New conventions ask that secret values all have a SECRET_ prefix, but this is a new convention.
"""
_INVALID_VALUE_SENTINEL = namedtuple("InvalidValue", [])()
"""
Used to indicate that a default value isn't specified (since None is a valid default in some cases).
"""
FileToWrite = namedtuple("FileToWrite", ["file_name", "text", "perms"])
"""
Represents a file that should be written as a result of processing expansions.yml.
NOTE: You probably want to write perms in octal e.g. 0o755 to use traditional unix-style permission
numbers.
"""
class InvalidConfigurationException(Exception):
"""
Missing or improper value.
"""
def _get(vals: dict, key: str, ers: _StrList, default=_INVALID_VALUE_SENTINEL):
"""
:param vals:
source dictionary
:param key:
key to get
:param ers:
"out" parameter indicating an error; will add an error if not present
and default is not given
:param default:
default to use if key not in vals
:return:
dict.get(key, default); appending to ers if not present and no default given
"""
if key in vals:
return vals[key]
if default is not _INVALID_VALUE_SENTINEL:
return default
ers.append(f"Missing key {key} in dict that has {vals.keys()}.")
return _INVALID_VALUE_SENTINEL
def _copy_keys(source: dict, dest: dict, keys: typ.Mapping[str, str], ers: _StrList):
for dest_key, source_key in keys.items():
dest[dest_key] = _get(source, source_key, ers)
def _redact(env: dict, placeholder: str) -> dict:
"""
:param env: environment variables or expansions.yml (serialized into a dict).
:param placeholder: what to replace redacted content with
:return: env with any sensitive values redacted.
"""
def redacted_value(to_redact: str) -> str:
if not to_redact or not isinstance(to_redact, str):
return to_redact
to_redact = re.sub(r"\s", "", str(to_redact))
# If we have more than 10 chars, preserve the first and last 2 chars
# to make it easier to debug where values
# are coming from.
if len(to_redact) >= 10:
# first two chars + redacted + last two chars
return "".join([*to_redact[:2], placeholder, *[to_redact[-2:]]])
return placeholder
def should_redact(redactable_key: str) -> bool:
return any(rex.match(redactable_key) for rex in _SECRET_VARS_KEYS_MATCHERS)
out = dict()
for key, value in env.items():
if should_redact(key):
value = redacted_value(value)
out[key] = value
return out
def _redact_file(fpath, placeholder="[REDACTED]") -> str:
with open(fpath) as exps_file:
exps = yaml.safe_load(exps_file)
exps = _redact(exps, placeholder)
dirname = os.path.dirname(fpath)
basename = os.path.basename(fpath)
redacted_name = os.path.join(dirname, basename)
with open(redacted_name, "w", encoding="utf-8") as out:
yaml.dump(exps, out)
return redacted_name
_REV_REX = re.compile(r"^.*_rev$")
"""
Matches _rev expansions that evergreen sets. This is used for modules where, for example,
if you have a module "foo", evergreen will set "foo_rev" to the revision of the foo repo.
We copy all _rev expansions into runtime.yml so we know the revisions of all modules for
reproducibility.
"""
def _copy_revision_keys(exps: dict, out: dict) -> None:
for (key, value) in exps.items():
if key == "revision" or _REV_REX.match(key):
out[key] = value
def _mongodb_binary_archive(exps: dict, ers: _StrList) -> str:
# In DSI's evergreen-dsitest.yml we skip compile and put the
# archive url in the project's configs (on the UI)
if "mongodb_binary_archive" in exps:
return _get(exps, "mongodb_binary_archive", ers)
return (
f"https://s3.amazonaws.com/mciuploads/"
f"{_get(exps, 'project_dir', ers)}/"
f"{_get(exps, 'version_id', ers)}/"
f"{_get(exps, 'revision', ers)}/"
f"{_get(exps, 'platform', ers)}/"
f"mongodb{_get(exps, 'compile-variant', ers, '')}"
f"-{_get(exps, 'version_id', ers)}.tar.gz"
)
# AWS ssh secret key
def _pem_contents(exps: dict, ers: _StrList) -> FileToWrite:
return FileToWrite("aws_ssh_key.pem", _get(exps, "ec2_pem", ers), 0o400)
def _files_to_write(exps: dict, ers: _StrList) -> typ.Mapping[str, FileToWrite]:
out = dict()
out["bootstrap"] = FileToWrite("bootstrap.yml", _bootstrap_contents(exps, ers), 0o755)
out["runtime"] = FileToWrite("runtime.yml", _runtime_contents(exps, ers), 0o755)
out["runtime_secret"] = FileToWrite(
"runtime_secret.yml", _runtime_secret_contents(exps, ers), 0o400
)
out["pem"] = _pem_contents(exps, ers)
return out
def write_files(exps_path: str, into_dir: str, placeholder: str) -> None:
"""
Unconditionally writes files to the current dir - you probably want `write_if_necessary`.
:param exps_path:
path to expansions.yml (usually just cwd)
:param into_dir:
where to write the "runtime_secret.yml" etc (usually this is a 'work' directory)
:param placeholder:
text to use in secret values when printing expansions in log message.
:return None
"""
with open(exps_path) as exps_file:
exps = yaml.safe_load(exps_file)
_SLOG.info(
"Loaded expansions",
expansions_file=exps_path,
expansions=yaml.dump(_redact(exps, placeholder)),
)
ers = []
to_write = _files_to_write(exps, ers) # Mapping[str, FileToWrite]
if ers:
_SLOG.fatal("Errors when reading expansions.yml", errors="\n".join(ers))
msg = "\n".join(ers)
raise InvalidConfigurationException(f"Errors: {msg}")
for (name, file) in to_write.items():
contents = file.text if isinstance(file.text, str) else yaml.dump(file.text)
# print(f"Going to write {contents} to {into_dir}/{file.file_name} with perms {file.perms}")
path = os.path.join(into_dir, file.file_name)
with open(path, "w", encoding="utf-8") as output:
output.writelines(contents)
os.chmod(path, file.perms)
_SLOG.info(
"Wrote dsi environment file",
dirname=into_dir,
basename=file.file_name,
contents_size=len(contents),
)
def write_if_necessary(write_to_dir: str) -> None:
"""
Main entry-point. Write bootstrap.yml, runtime.yml etc given expansions.yml in the cwd.
Will nop if bootstrap.yml already exists.
(Wrapper atop write_files that only does its work if necessary.)
:param write_to_dir: directory into which to write bootstrap.yml etc
"""
if os.path.exists(os.path.join(write_to_dir, "bootstrap.yml")):
_SLOG.info(
"Existing bootstrap file (running locally) so not writing based on expansions.yml",
work_dir=write_to_dir,
)
return
if not os.path.exists("expansions.yml"):
_SLOG.fatal("Missing expansions.yml file", cwd=os.getcwd())
raise FileNotFoundError("Missing expansions file")
write_files("expansions.yml", write_to_dir, "[REDACTED]")
| 1.875 | 2 |
maistra/vendor/com_googlesource_chromium_v8/wee8/build/fuchsia/boot_data.py | knm3000/proxy | 643 | 12764564 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions used to provision Fuchsia boot images."""
import common
import logging
import os
import subprocess
import tempfile
import time
import uuid
_SSH_CONFIG_TEMPLATE = """
Host *
CheckHostIP no
StrictHostKeyChecking no
ForwardAgent no
ForwardX11 no
UserKnownHostsFile {known_hosts}
User fuchsia
IdentitiesOnly yes
IdentityFile {identity}
ServerAliveInterval 2
ServerAliveCountMax 5
ControlMaster auto
ControlPersist 1m
ControlPath /tmp/ssh-%r@%h:%p
ConnectTimeout 5
"""
FVM_TYPE_QCOW = 'qcow'
FVM_TYPE_SPARSE = 'sparse'
# Specifies boot files intended for use by an emulator.
TARGET_TYPE_QEMU = 'qemu'
# Specifies boot files intended for use by anything (incl. physical devices).
TARGET_TYPE_GENERIC = 'generic'
def _GetPubKeyPath(output_dir):
"""Returns a path to the generated SSH public key."""
return os.path.join(output_dir, 'id_ed25519.pub')
def ProvisionSSH(output_dir):
"""Generates a keypair and config file for SSH."""
host_key_path = os.path.join(output_dir, 'ssh_key')
host_pubkey_path = host_key_path + '.pub'
id_key_path = os.path.join(output_dir, 'id_ed25519')
id_pubkey_path = _GetPubKeyPath(output_dir)
known_hosts_path = os.path.join(output_dir, 'known_hosts')
ssh_config_path = os.path.join(output_dir, 'ssh_config')
logging.debug('Generating SSH credentials.')
if not os.path.isfile(host_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f',
host_key_path, '-P', '', '-N', ''],
stdout=open(os.devnull))
if not os.path.isfile(id_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-f', id_key_path,
'-P', '', '-N', ''], stdout=open(os.devnull))
with open(ssh_config_path, "w") as ssh_config:
ssh_config.write(
_SSH_CONFIG_TEMPLATE.format(identity=id_key_path,
known_hosts=known_hosts_path))
if os.path.exists(known_hosts_path):
os.remove(known_hosts_path)
def GetTargetFile(filename, target_arch, target_type):
"""Computes a path to |filename| in the Fuchsia boot image directory specific
to |target_type| and |target_arch|."""
assert target_type == TARGET_TYPE_QEMU or target_type == TARGET_TYPE_GENERIC
return os.path.join(common.IMAGES_ROOT, target_arch, target_type, filename)
def GetSSHConfigPath(output_dir):
return output_dir + '/ssh_config'
def GetBootImage(output_dir, target_arch, target_type):
""""Gets a path to the Zircon boot image, with the SSH client public key
added."""
ProvisionSSH(output_dir)
pubkey_path = _GetPubKeyPath(output_dir)
zbi_tool = common.GetHostToolPathFromPlatform('zbi')
image_source_path = GetTargetFile('zircon-a.zbi', target_arch, target_type)
image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')
cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
'-e', 'data/ssh/authorized_keys=' + pubkey_path ]
subprocess.check_call(cmd)
return image_dest_path
def GetKernelArgs(output_dir):
return ['devmgr.epoch=%d' % time.time()]
def AssertBootImagesExist(arch, platform):
assert os.path.exists(GetTargetFile('zircon-a.zbi', arch, platform)), \
'This checkout is missing the files necessary for\n' \
'booting this configuration of Fuchsia.\n' \
'To check out the files, add this entry to the "custom_vars"\n' \
'section of your .gclient file:\n\n' \
' "checkout_fuchsia_boot_images": "%s.%s"\n\n' % \
(platform, arch)
| 1.953125 | 2 |
lib/datasets/SearchDatasetWrap.py | Mirofil/AutoDL-Projects | 3 | 12764565 | ##################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2019 #
##################################################
import torch, copy, random
import torch.utils.data as data
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, direct_index=False, check=True, true_length=None, merge_train_val=False):
self.datasetname = name
self.direct_index = direct_index
self.merge_train_val = merge_train_val
if isinstance(data, (list, tuple)): # new type of SearchDataset
assert len(data) == 2, 'invalid length: {:}'.format( len(data) )
print("V2 SearchDataset")
self.train_data = data[0]
self.valid_data = data[1]
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
self.mode_str = 'V2' # new mode
else:
print("V1 Search Dataset")
self.mode_str = 'V1' # old mode
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
if check:
if len(train_split) != len(valid_split) and len(train_split) < 48000 and not merge_train_val:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
else:
print(f"Skipping checking intersection because since len(train_split)={len(train_split)}, len(valid_split)={len(valid_split)}")
self.length = len(self.train_split) if true_length is None else true_length
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L}, version={ver})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split), ver=self.mode_str))
def __len__(self):
return self.length
def __getitem__(self, index):
if self.direct_index:
assert index in self.train_split and index not in self.valid_split
train_index = index
else:
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
if not self.merge_train_val:
assert valid_index not in self.train_split or (self.datasetname in ["cifar100", "ImageNet16-120"] and not self.merge_train_val)
if self.mode_str == 'V1':
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
elif self.mode_str == 'V2':
train_image, train_label = self.train_data[train_index]
valid_image, valid_label = self.valid_data[valid_index]
else: raise ValueError('invalid mode : {:}'.format(self.mode_str))
return train_image, train_label, valid_image, valid_label
| 2.484375 | 2 |
lib/utils/utils.py | spaenigs/ensemble-performance | 1 | 12764566 | from functools import reduce
from typing import List
from snakemake.io import glob_wildcards
import pandas as pd
import numpy as np
import os
N_JOBS, MAX_ITER, MAX_NR = 28, 100, 20
MODEL_NAMES = [
# "lda", "bayes",
# "log_reg",
"rf"
]
META_MODEL_NAMES = [
"stacking",
"voting_hard",
"voting_soft"
]
def get_csv_names(dataset):
wc = glob_wildcards(f"data/{dataset}/csv/all/{{csv_names}}")[0]
return [e for e in wc if "csv" in e]
def get_model():
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
return {
"lda": LinearDiscriminantAnalysis(),
"bayes": GaussianNB(),
"log_reg": LogisticRegression(max_iter=2000),
"rf": RandomForestClassifier(n_jobs=-1)
}
def get_meta_model():
from optimizer.ensemble import StackingClassifier
from optimizer.ensemble import VotingClassifier
return {
"stacking": StackingClassifier(estimators=None, n_jobs=-1),
"voting_hard": VotingClassifier(estimators=None, voting="hard", n_jobs=-1),
"voting_soft": VotingClassifier(estimators=None, voting="soft", n_jobs=-1)
}
def concat_datasets(paths_list):
encoded_datasets_tmp = [pd.read_csv(p, index_col=0) for p in paths_list]
df_dummy = get_all_present_indices_df(encoded_datasets_tmp)
df = pd.concat([df.loc[df_dummy.index, :].iloc[:, :-1].sort_index()
for df in encoded_datasets_tmp], axis=1)
return df.values, df_dummy.y.values
def get_all_present_indices_df(df_list: List[pd.DataFrame]):
# get indices present in all encoded datasets: {1,2,3,4,5}, {1,3,5}, {1,2,3,4} -> {1,3}
idcs_new = sorted(
set.intersection(*[set(df.index) for df in df_list]),
key=lambda n: int(n.split("_")[1])
)
df_dummy = pd.DataFrame(np.zeros((len(idcs_new), 1)), index=idcs_new)
df_dummy["y"] = df_list[0].loc[idcs_new, "y"]
return df_dummy
| 2.3125 | 2 |
programming-laboratory-I/nhwk/pesquisa.py | MisaelAugusto/computer-science | 0 | 12764567 | <filename>programming-laboratory-I/nhwk/pesquisa.py
# coding: utf-8
# Aluno: <NAME>
# Matrícula: 117110525
# Problema: Pesquisa Hotéis
valores = []
tamanhos = []
confortos = []
hoteis = []
while True:
entrada = raw_input().split(",")
if entrada[0] == "---":
break
valores.append(entrada[0])
tamanhos.append(entrada[1])
confortos.append(entrada[2])
hoteis.append(entrada[3])
while True:
criterio = raw_input()
if criterio == "fim":
break
hotel = hoteis[0]
if criterio == "valor":
menor_valor = float(valores[0])
for i in range(1, len(valores)):
if float(valores[i]) < menor_valor:
menor_valor = float(valores[i])
hotel = hoteis[i]
elif criterio == "tamanho":
maior_tamanho = float(tamanhos[0])
for i in range(1, len(tamanhos)):
if int(tamanhos[i]) > maior_tamanho:
maior_tamanho = int(tamanhos[i])
hotel = hoteis[i]
else:
maior_conforto = int(confortos[0])
for i in range(1, len(confortos)):
if int(confortos[i]) > maior_conforto:
maior_conforto = int(confortos[i])
hotel = hoteis[i]
print hotel
| 3.625 | 4 |
userid_country.py | PittayutSothanakul/TinyTourMiner | 0 | 12764568 | userid_country = {
60: ("US", "United States", "アメリカ合衆国"),
71: ("CL", "Chile", "チリ"),
95: ("CL", "Chile", "チリ"),
95: ("BR", "Brazil", "ブラジル"),
95: ("PE", "Peru", "ペルー"),
95: ("PT", "Portugal", "ポルトガル"),
170: ("US", "United States", "アメリカ合衆国"),
219: ("CN", "China", "中華人民共和国"),
219: ("PH", "Philippines", "フィリピン"),
238: ("BR", "Brazil", "ブラジル"),
266: ("TR", "Turkey", "トルコ"),
331: ("TH", "Thailand", "タイ"),
367: ("MY", "Malaysia", "マレーシア"),
376: ("ID", "Indonesia", "インドネシア"),
419: ("GB", "United Kingdom", "イギリス"),
419: ("MY", "Malaysia", "マレーシア"),
422: ("US", "United States", "アメリカ合衆国"),
527: ("PT", "Portugal", "ポルトガル"),
539: ("ID", "Indonesia", "インドネシア"),
565: ("TH", "Thailand", "タイ"),
626: ("CN", "China", "中華人民共和国"),
653: ("US", "United States", "アメリカ合衆国"),
653: ("GB", "United Kingdom", "イギリス"),
779: ("KR", "Korea", "韓国"),
793: ("MY", "Malaysia", "マレーシア"),
1107: ("MY", "Malaysia", "マレーシア"),
1109: ("TH", "Thailand", "タイ"),
1224: ("TH", "Thailand", "タイ"),
1247: ("MY", "Malaysia", "マレーシア"),
1478: ("DE", "Germany", "ドイツ"),
1523: ("CN", "China", "中華人民共和国"),
1523: ("VE", "Venezuela","ベネズエラ"),
1546: ("KR", "Korea", "韓国"),
1658: ("TH", "Thailand", "タイ"),
1659: ("TH", "Thailand", "タイ"),
1887: ("US", "United States", "アメリカ合衆国"),
1887: ("ID", "Indonesia", "インドネシア"),
2019: ("TR", "Turkey", "トルコ"),
2070: ("MY", "Malaysia", "マレーシア"),
2189: ("US", "United States", "アメリカ合衆国"),
2189: ("CN", "China", "中華人民共和国"),
2283: ("MY", "Malaysia", "マレーシア"),
2288: ("TH", "Thailand", "タイ"),
2426: ("MY", "Malaysia", "マレーシア"),
2440: ("CA", "Canada", "カナダ"),
2440: ("TH", "Thailand", "タイ"),
2550: ("MY", "Malaysia", "マレーシア"),
2584: ("TH", "Thailand", "タイ"),
2650: ("US", "United States", "アメリカ合衆国"),
2729: ("BR", "Brazil", "ブラジル"),
2769: ("TH", "Thailand", "タイ"),
2769: ("JP", "Japan", "日本"),
2877: ("SG", "Singapore", "シンガポール"),
2877: ("MY", "Malaysia", "マレーシア"),
2932: ("ES", "Spain", "スペイン"),
2942: ("TH", "Thailand", "タイ"),
3042: ("TH", "Thailand", "タイ"),
3203: ("KR", "Korea", "韓国"),
3242: ("MY", "Malaysia", "マレーシア"),
3250: ("IN", "India", "インド"),
3372: ("TH", "Thailand", "タイ"),
3378: ("BR", "Brazil", "ブラジル"),
3520: ("MY", "Malaysia", "マレーシア"),
3524: ("US", "United States", "アメリカ合衆国"),
3525: ("ID", "Indonesia", "インドネシア"),
3559: ("SG", "Singapore", "シンガポール"),
3559: ("MY", "Malaysia", "マレーシア"),
3579: ("MY", "Malaysia", "マレーシア"),
3594: ("ES", "Spain", "スペイン"),
3659: ("ID", "Indonesia", "インドネシア"),
3670: ("US", "United States", "アメリカ合衆国"),
3670: ("ID", "Indonesia", "インドネシア"),
3680: ("ID", "Indonesia", "インドネシア"),
3703: ("BR", "Brazil", "ブラジル"),
3830: ("US", "United States", "アメリカ合衆国"),
4033: ("US", "United States", "アメリカ合衆国"),
4037: ("US", "United States", "アメリカ合衆国"),
4037: ("ID", "Indonesia", "インドネシア"),
4037: ("CA", "Canada", "カナダ"),
4037: ("TR", "Turkey", "トルコ"),
4037: ("HU", "Hungary", "ハンガリー"),
4037: ("RU", "Russian Federation", "ロシア連邦"),
4140: ("US", "United States", "アメリカ合衆国"),
4243: ("AU", "Australia", "オーストラリア"),
4414: ("ID", "Indonesia", "インドネシア"),
4599: ("TH", "Thailand", "タイ"),
4678: ("SG", "Singapore", "シンガポール"),
4757: ("PH", "Philippines", "フィリピン"),
4862: ("ID", "Indonesia", "インドネシア"),
4988: ("US", "United States", "アメリカ合衆国"),
4988: ("CN", "China", "中華人民共和国"),
5033: ("ID", "Indonesia", "インドネシア"),
5041: ("TH", "Thailand", "タイ"),
5042: ("KR", "Korea", "韓国"),
5163: ("MY", "Malaysia", "マレーシア"),
5231: ("DE", "Germany", "ドイツ"),
5249: ("TH", "Thailand", "タイ"),
5294: ("DE", "Germany", "ドイツ"),
5408: ("MY", "Malaysia", "マレーシア"),
5499: ("TH", "Thailand", "タイ"),
5499: ("BE", "Belgium", "ベルギー"),
5545: ("TH", "Thailand", "タイ"),
5564: ("US", "United States", "アメリカ合衆国"),
5575: ("MY", "Malaysia", "マレーシア"),
5694: ("TH", "Thailand", "タイ"),
5710: ("US", "United States", "アメリカ合衆国"),
5839: ("KR", "Korea", "韓国"),
5848: ("BR", "Brazil", "ブラジル"),
6002: ("TH", "Thailand", "タイ"),
6112: ("CN", "China", "中華人民共和国"),
6277: ("MY", "Malaysia", "マレーシア"),
6351: ("ID", "Indonesia", "インドネシア"),
6369: ("EC", "Ecuador", "エクアドル"),
6369: ("BR", "Brazil", "ブラジル"),
6384: ("ID", "Indonesia", "インドネシア"),
6672: ("ID", "Indonesia", "インドネシア"),
6672: ("BR", "Brazil", "ブラジル"),
6798: ("TH", "Thailand", "タイ"),
6833: ("US", "United States", "アメリカ合衆国"),
6848: ("TH", "Thailand", "タイ"),
7207: ("SA", "Saudi Arabia", "サウジアラビア"),
7357: ("KR", "Korea", "韓国"),
7599: ("JP", "Japan", "日本"),
7599: ("RU", "Russian Federation", "ロシア連邦"),
7705: ("RU", "Russian Federation", "ロシア連邦"),
7782: ("KR", "Korea", "韓国"),
7877: ("KR", "Korea", "韓国"),
7886: ("ID", "Indonesia", "インドネシア"),
7888: ("SG", "Singapore", "シンガポール"),
8106: ("MY", "Malaysia", "マレーシア"),
8139: ("SG", "Singapore", "シンガポール"),
8165: ("ID", "Indonesia", "インドネシア"),
8234: ("TH", "Thailand", "タイ"),
8330: ("TH", "Thailand", "タイ"),
8434: ("TH", "Thailand", "タイ"),
8435: ("ID", "Indonesia", "インドネシア"),
8468: ("PT", "Portugal", "ポルトガル"),
8478: ("TH", "Thailand", "タイ"),
8544: ("PH", "Philippines", "フィリピン"),
8568: ("AU", "Australia", "オーストラリア"),
8568: ("SG", "Singapore", "シンガポール"),
8784: ("TR", "Turkey", "トルコ"),
8853: ("US", "United States", "アメリカ合衆国"),
8859: ("TH", "Thailand", "タイ"),
8859: ("PH", "Philippines", "フィリピン"),
9222: ("TH", "Thailand", "タイ"),
9425: ("TH", "Thailand", "タイ"),
9566: ("ID", "Indonesia", "インドネシア"),
9566: ("JP", "Japan", "日本"),
9605: ("BE", "Belgium", "ベルギー"),
9606: ("ID", "Indonesia", "インドネシア"),
9613: ("TH", "Thailand", "タイ"),
9618: ("KR", "Korea", "韓国"),
9661: ("US", "United States", "アメリカ合衆国"),
9886: ("ID", "Indonesia", "インドネシア"),
9887: ("US", "United States", "アメリカ合衆国"),
9887: ("ID", "Indonesia", "インドネシア"),
10046: ("ID", "Indonesia", "インドネシア"),
10256: ("US", "United States", "アメリカ合衆国"),
10267: ("TH", "Thailand", "タイ"),
10333: ("CN", "China", "中華人民共和国"),
10386: ("TH", "Thailand", "タイ"),
10475: ("US", "United States", "アメリカ合衆国"),
10475: ("JP", "Japan", "日本"),
10553: ("BR", "Brazil", "ブラジル"),
10667: ("TH", "Thailand", "タイ"),
10781: ("US", "United States", "アメリカ合衆国"),
10868: ("US", "United States", "アメリカ合衆国"),
10868: ("GR", "Greece", "ギリシャ"),
10868: ("CO", "Colombia", "コロンビア"),
10868: ("SE", "Sweden", "スウェーデン"),
10868: ("TR", "Turkey", "トルコ"),
10915: ("US", "United States", "アメリカ合衆国"),
10995: ("BR", "Brazil", "ブラジル"),
11002: ("CN", "China", "中華人民共和国"),
11069: ("KR", "Korea", "韓国"),
11130: ("TH", "Thailand", "タイ"),
11130: ("JP", "Japan", "日本"),
11226: ("ID", "Indonesia", "インドネシア"),
11289: ("TH", "Thailand", "タイ"),
11358: ("SG", "Singapore", "シンガポール"),
11546: ("SG", "Singapore", "シンガポール"),
11593: ("TH", "Thailand", "タイ"),
11870: ("KR", "Korea", "韓国"),
12103: ("NL", "Netherlands", "オランダ"),
12309: ("US", "United States", "アメリカ合衆国"),
12319: ("PH", "Philippines", "フィリピン"),
12426: ("TH", "Thailand", "タイ"),
12438: ("US", "United States", "アメリカ合衆国"),
12515: ("MY", "Malaysia", "マレーシア"),
12600: ("UA", "Ukraine", "ウクライナ"),
12646: ("US", "United States", "アメリカ合衆国"),
12913: ("KR", "Korea", "韓国"),
13046: ("KR", "Korea", "韓国"),
13432: ("TH", "Thailand", "タイ"),
13681: ("ID", "Indonesia", "インドネシア"),
13681: ("MY", "Malaysia", "マレーシア"),
13795: ("TH", "Thailand", "タイ"),
13827: ("ID", "Indonesia", "インドネシア"),
13890: ("US", "United States", "アメリカ合衆国"),
13890: ("IT", "Italy", "イタリア"),
14142: ("KR", "Korea", "韓国"),
14155: ("ID", "Indonesia", "インドネシア"),
14307: ("MY", "Malaysia", "マレーシア"),
14314: ("US", "United States", "アメリカ合衆国"),
14449: ("US", "United States", "アメリカ合衆国"),
14664: ("BR", "Brazil", "ブラジル"),
14885: ("KR", "Korea", "韓国"),
14930: ("KR", "Korea", "韓国"),
15059: ("BR", "Brazil", "ブラジル"),
15068: ("BR", "Brazil", "ブラジル"),
15161: ("US", "United States", "アメリカ合衆国"),
15186: ("CN", "China", "中華人民共和国"),
15293: ("US", "United States", "アメリカ合衆国"),
15293: ("AE", "United Arab Emirates", "アラブ首長国連邦"),
15293: ("RU", "Russian Federation", "ロシア連邦"),
15356: ("TH", "Thailand", "タイ"),
15413: ("CN", "China", "中華人民共和国"),
15497: ("US", "United States", "アメリカ合衆国"),
15642: ("MY", "Malaysia", "マレーシア"),
15722: ("MY", "Malaysia", "マレーシア"),
15880: ("TH", "Thailand", "タイ"),
16207: ("TH", "Thailand", "タイ"),
16257: ("KR", "Korea", "韓国"),
16262: ("CO", "Colombia", "コロンビア"),
16376: ("TH", "Thailand", "タイ"),
16382: ("MY", "Malaysia", "マレーシア"),
16546: ("KR", "Korea", "韓国"),
16673: ("KR", "Korea", "韓国"),
16673: ("JP", "Japan", "日本"),
16780: ("TH", "Thailand", "タイ"),
16866: ("US", "United States", "アメリカ合衆国"),
17079: ("ID", "Indonesia", "インドネシア"),
17146: ("TH", "Thailand", "タイ"),
17148: ("IT", "Italy", "イタリア"),
17168: ("DE", "Germany", "ドイツ"),
17168: ("HU", "Hungary", "ハンガリー"),
17257: ("US", "United States", "アメリカ合衆国"),
17348: ("MY", "Malaysia", "マレーシア"),
17398: ("MX", "Mexico", "メキシコ"),
17594: ("ID", "Indonesia", "インドネシア"),
17603: ("PH", "Philippines", "フィリピン"),
18301: ("MY", "Malaysia", "マレーシア"),
18335: ("PH", "Philippines", "フィリピン"),
18617: ("MX", "Mexico", "メキシコ"),
18695: ("TH", "Thailand", "タイ"),
18704: ("ID", "Indonesia", "インドネシア"),
18742: ("KR", "Korea", "韓国"),
18810: ("US", "United States", "アメリカ合衆国"),
18810: ("GB", "United Kingdom", "イギリス"),
18968: ("TH", "Thailand", "タイ"),
19143: ("ID", "Indonesia", "インドネシア"),
19149: ("MY", "Malaysia", "マレーシア"),
19276: ("TH", "Thailand", "タイ"),
19284: ("TH", "Thailand", "タイ"),
19565: ("US", "United States", "アメリカ合衆国"),
19738: ("TH", "Thailand", "タイ"),
19784: ("MY", "Malaysia", "マレーシア"),
19821: ("FR", "France", "フランス"),
19931: ("CN", "China", "中華人民共和国"),
19931: ("JP", "Japan", "日本"),
19975: ("TH", "Thailand", "タイ"),
19981: ("TH", "Thailand", "タイ"),
20031: ("QA", "Qatar", "カタール"),
20031: ("HU", "Hungary", "ハンガリー"),
20149: ("TH", "Thailand", "タイ"),
20181: ("TH", "Thailand", "タイ"),
20258: ("IT", "Italy", "イタリア"),
20416: ("TH", "Thailand", "タイ"),
20501: ("US", "United States", "アメリカ合衆国"),
20516: ("KR", "Korea", "韓国"),
20766: ("BR", "Brazil", "ブラジル"),
20787: ("TH", "Thailand", "タイ"),
21200: ("TH", "Thailand", "タイ"),
21200: ("JP", "Japan", "日本"),
21609: ("TH", "Thailand", "タイ"),
21743: ("AE", "United Arab Emirates", "アラブ首長国連邦"),
21942: ("ID", "Indonesia", "インドネシア"),
22136: ("RU", "Russian Federation", "ロシア連邦"),
22338: ("MY", "Malaysia", "マレーシア"),
22439: ("TH", "Thailand", "タイ"),
22657: ("TH", "Thailand", "タイ"),
22668: ("MY", "Malaysia", "マレーシア"),
22982: ("TR", "Turkey", "トルコ"),
23624: ("KR", "Korea", "韓国"),
23681: ("US", "United States", "アメリカ合衆国"),
23681: ("GB", "United Kingdom", "イギリス"),
23681: ("BR", "Brazil", "ブラジル"),
23864: ("TH", "Thailand", "タイ"),
24120: ("KR", "Korea", "韓国"),
24515: ("KR", "Korea", "韓国"),
24543: ("ID", "Indonesia", "インドネシア"),
24643: ("TH", "Thailand", "タイ"),
24806: ("CN", "China", "中華人民共和国"),
24936: ("TH", "Thailand", "タイ"),
24974: ("AU", "Australia", "オーストラリア"),
24974: ("MY", "Malaysia", "マレーシア"),
25252: ("US", "United States", "アメリカ合衆国"),
25452: ("MY", "Malaysia", "マレーシア"),
25539: ("TH", "Thailand", "タイ"),
26283: ("CL", "Chile", "チリ"),
26486: ("TH", "Thailand", "タイ"),
26520: ("ID", "Indonesia", "インドネシア"),
26595: ("US", "United States", "アメリカ合衆国"),
26595: ("TH", "Thailand", "タイ"),
26595: ("BR", "Brazil", "ブラジル"),
26595: ("VN", "Viet Nam", "ベトナム"),
26595: ("PE", "Peru", "ペルー"),
26977: ("US", "United States", "アメリカ合衆国"),
27349: ("KR", "Korea", "韓国"),
27390: ("IL", "Israel", "イスラエル"),
27646: ("MY", "Malaysia", "マレーシア"),
27693: ("SG", "Singapore", "シンガポール"),
27868: ("MY", "Malaysia", "マレーシア"),
27974: ("MY", "Malaysia", "マレーシア"),
28356: ("KR", "Korea", "韓国"),
29051: ("TR", "Turkey", "トルコ"),
29093: ("KR", "Korea", "韓国"),
29099: ("TR", "Turkey", "トルコ"),
29462: ("SG", "Singapore", "シンガポール"),
29462: ("MY", "Malaysia", "マレーシア"),
29474: ("TH", "Thailand", "タイ"),
29567: ("VN", "Viet Nam", "ベトナム"),
29633: ("TH", "Thailand", "タイ"),
29672: ("KR", "Korea", "韓国"),
29694: ("PH", "Philippines", "フィリピン"),
30208: ("SE", "Sweden", "スウェーデン"),
30303: ("ID", "Indonesia", "インドネシア"),
30394: ("BR", "Brazil", "ブラジル"),
30543: ("MY", "Malaysia", "マレーシア"),
30713: ("TH", "Thailand", "タイ"),
30734: ("MY", "Malaysia", "マレーシア"),
30917: ("MY", "Malaysia", "マレーシア"),
30931: ("MY", "Malaysia", "マレーシア"),
31188: ("ID", "Indonesia", "インドネシア"),
31387: ("GB", "United Kingdom", "イギリス"),
31840: ("KR", "Korea", "韓国"),
31972: ("MX", "Mexico", "メキシコ"),
32512: ("JP", "Japan", "日本"),
32512: ("MY", "Malaysia", "マレーシア"),
32542: ("TH", "Thailand", "タイ"),
32588: ("US", "United States", "アメリカ合衆国"),
32712: ("TH", "Thailand", "タイ"),
33169: ("CL", "Chile", "チリ"),
33429: ("RU", "Russian Federation", "ロシア連邦"),
33529: ("TH", "Thailand", "タイ"),
33636: ("BR", "Brazil", "ブラジル"),
33778: ("ID", "Indonesia", "インドネシア"),
34011: ("GR", "Greece", "ギリシャ"),
34305: ("MY", "Malaysia", "マレーシア"),
34320: ("MY", "Malaysia", "マレーシア"),
34596: ("BR", "Brazil", "ブラジル"),
34755: ("ID", "Indonesia", "インドネシア"),
34862: ("MY", "Malaysia", "マレーシア"),
34997: ("ID", "Indonesia", "インドネシア"),
35067: ("BR", "Brazil", "ブラジル"),
35078: ("TH", "Thailand", "タイ"),
35203: ("MY", "Malaysia", "マレーシア"),
35834: ("KR", "Korea", "韓国"),
35853: ("TH", "Thailand", "タイ"),
35888: ("TH", "Thailand", "タイ"),
36162: ("ID", "Indonesia", "インドネシア"),
36306: ("ID", "Indonesia", "インドネシア"),
36478: ("CN", "China", "中華人民共和国"),
36630: ("JM", "Jamaica", "ジャマイカ"),
36715: ("MX", "Mexico", "メキシコ"),
36806: ("CA", "Canada", "カナダ"),
36989: ("MY", "Malaysia", "マレーシア"),
37136: ("MY", "Malaysia", "マレーシア"),
37352: ("DE", "Germany", "ドイツ"),
37636: ("SG", "Singapore", "シンガポール"),
37636: ("MY", "Malaysia", "マレーシア"),
37646: ("ID", "Indonesia", "インドネシア"),
37646: ("CN", "China", "中華人民共和国"),
37683: ("SG", "Singapore", "シンガポール"),
37683: ("MY", "Malaysia", "マレーシア"),
37712: ("PH", "Philippines", "フィリピン"),
37802: ("SG", "Singapore", "シンガポール"),
37848: ("MY", "Malaysia", "マレーシア"),
37976: ("CN", "China", "中華人民共和国"),
38155: ("MY", "Malaysia", "マレーシア"),
38839: ("US", "United States", "アメリカ合衆国"),
38839: ("ID", "Indonesia", "インドネシア"),
39071: ("KR", "Korea", "韓国"),
39132: ("TH", "Thailand", "タイ"),
39484: ("MY", "Malaysia", "マレーシア"),
39497: ("TH", "Thailand", "タイ"),
39715: ("ID", "Indonesia", "インドネシア"),
39752: ("MY", "Malaysia", "マレーシア"),
39760: ("KR", "Korea", "韓国"),
40047: ("TH", "Thailand", "タイ"),
40134: ("TH", "Thailand", "タイ"),
40183: ("MY", "Malaysia", "マレーシア"),
40569: ("TH", "Thailand", "タイ"),
40725: ("KR", "Korea", "韓国"),
40744: ("US", "United States", "アメリカ合衆国"),
40831: ("TH", "Thailand", "タイ"),
40991: ("SG", "Singapore", "シンガポール"),
40991: ("MY", "Malaysia", "マレーシア"),
41069: ("MY", "Malaysia", "マレーシア"),
41086: ("TH", "Thailand", "タイ"),
41330: ("TH", "Thailand", "タイ"),
41368: ("CN", "China", "中華人民共和国"),
41427: ("US", "United States", "アメリカ合衆国"),
41456: ("ID", "Indonesia", "インドネシア"),
41666: ("KR", "Korea", "韓国"),
42299: ("TH", "Thailand", "タイ"),
42358: ("FR", "France", "フランス"),
42399: ("BR", "Brazil", "ブラジル"),
42667: ("TH", "Thailand", "タイ"),
42902: ("TH", "Thailand", "タイ"),
42936: ("KR", "Korea", "韓国"),
43499: ("US", "United States", "アメリカ合衆国"),
43499: ("BR", "Brazil", "ブラジル"),
43835: ("US", "United States", "アメリカ合衆国"),
44270: ("MY", "Malaysia", "マレーシア"),
44986: ("MY", "Malaysia", "マレーシア"),
45043: ("MY", "Malaysia", "マレーシア"),
45167: ("KR", "Korea", "韓国"),
45449: ("TH", "Thailand", "タイ"),
45662: ("GB", "United Kingdom", "イギリス"),
45662: ("CN", "China", "中華人民共和国"),
45684: ("TH", "Thailand", "タイ"),
45774: ("TH", "Thailand", "タイ"),
46555: ("KR", "Korea", "韓国"),
46599: ("JP", "Japan", "日本"),
46599: ("PH", "Philippines", "フィリピン"),
46633: ("TR", "Turkey", "トルコ"),
46887: ("ID", "Indonesia", "インドネシア"),
47297: ("US", "United States", "アメリカ合衆国"),
47535: ("BR", "Brazil", "ブラジル"),
47686: ("TH", "Thailand", "タイ"),
48006: ("TH", "Thailand", "タイ"),
48163: ("ID", "Indonesia", "インドネシア"),
48456: ("US", "United States", "アメリカ合衆国"),
48846: ("TH", "Thailand", "タイ"),
49199: ("TH", "Thailand", "タイ"),
49270: ("GB", "United Kingdom", "イギリス"),
49520: ("IL", "Israel", "イスラエル"),
49570: ("KR", "Korea", "韓国"),
49607: ("TH", "Thailand", "タイ"),
49639: ("IN", "India", "インド"),
49875: ("MY", "Malaysia", "マレーシア"),
50118: ("TH", "Thailand", "タイ"),
50153: ("ID", "Indonesia", "インドネシア"),
50167: ("US", "United States", "アメリカ合衆国"),
50605: ("US", "United States", "アメリカ合衆国"),
50643: ("SG", "Singapore", "シンガポール"),
50816: ("US", "United States", "アメリカ合衆国"),
50828: ("MY", "Malaysia", "マレーシア"),
50931: ("TH", "Thailand", "タイ"),
51095: ("TR", "Turkey", "トルコ"),
51300: ("TH", "Thailand", "タイ"),
51305: ("SG", "Singapore", "シンガポール"),
51305: ("MY", "Malaysia", "マレーシア"),
51644: ("MY", "Malaysia", "マレーシア"),
51808: ("CN", "China", "中華人民共和国"),
51879: ("TH", "Thailand", "タイ"),
52026: ("RU", "Russian Federation", "ロシア連邦"),
52084: ("BR", "Brazil", "ブラジル"),
52432: ("MY", "Malaysia", "マレーシア"),
52477: ("SG", "Singapore", "シンガポール"),
52782: ("TH", "Thailand", "タイ"),
53152: ("ID", "Indonesia", "インドネシア"),
53232: ("ID", "Indonesia", "インドネシア"),
53232: ("DE", "Germany", "ドイツ"),
53625: ("ID", "Indonesia", "インドネシア"),
53673: ("ID", "Indonesia", "インドネシア"),
53743: ("ID", "Indonesia", "インドネシア"),
53913: ("KR", "Korea", "韓国"),
53989: ("FI", "Finland", "フィンランド"),
54137: ("TH", "Thailand", "タイ"),
54169: ("SG", "Singapore", "シンガポール"),
54169: ("MY", "Malaysia", "マレーシア"),
54762: ("MY", "Malaysia", "マレーシア"),
54875: ("TH", "Thailand", "タイ"),
54930: ("US", "United States", "アメリカ合衆国"),
55093: ("ID", "Indonesia", "インドネシア"),
55338: ("MY", "Malaysia", "マレーシア"),
55964: ("CL", "Chile", "チリ"),
55995: ("TH", "Thailand", "タイ"),
56128: ("KR", "Korea", "韓国"),
56357: ("ID", "Indonesia", "インドネシア"),
56458: ("TH", "Thailand", "タイ"),
56460: ("RU", "Russian Federation", "ロシア連邦"),
56829: ("ID", "Indonesia", "インドネシア"),
56897: ("US", "United States", "アメリカ合衆国"),
56935: ("ID", "Indonesia", "インドネシア"),
56947: ("CN", "China", "中華人民共和国"),
56948: ("ID", "Indonesia", "インドネシア"),
57114: ("TH", "Thailand", "タイ"),
57388: ("MX", "Mexico", "メキシコ"),
57484: ("ID", "Indonesia", "インドネシア"),
57723: ("BR", "Brazil", "ブラジル"),
57982: ("MY", "Malaysia", "マレーシア"),
58013: ("US", "United States", "アメリカ合衆国"),
58336: ("PH", "Philippines", "フィリピン"),
58485: ("CN", "China", "中華人民共和国"),
59037: ("ID", "Indonesia", "インドネシア"),
59178: ("ID", "Indonesia", "インドネシア"),
59315: ("US", "United States", "アメリカ合衆国"),
59618: ("MX", "Mexico", "メキシコ"),
59775: ("ID", "Indonesia", "インドネシア"),
59790: ("MY", "Malaysia", "マレーシア"),
60092: ("MX", "Mexico", "メキシコ"),
60100: ("ID", "Indonesia", "インドネシア"),
60296: ("US", "United States", "アメリカ合衆国"),
60939: ("US", "United States", "アメリカ合衆国"),
61467: ("TH", "Thailand", "タイ"),
61526: ("TH", "Thailand", "タイ"),
61859: ("US", "United States", "アメリカ合衆国"),
62410: ("ID", "Indonesia", "インドネシア"),
62423: ("CA", "Canada", "カナダ"),
62521: ("CL", "Chile", "チリ"),
62613: ("ID", "Indonesia", "インドネシア"),
62825: ("CN", "China", "中華人民共和国"),
62862: ("TH", "Thailand", "タイ"),
63009: ("BE", "Belgium", "ベルギー"),
63224: ("TH", "Thailand", "タイ"),
63225: ("MY", "Malaysia", "マレーシア"),
63254: ("TH", "Thailand", "タイ"),
63804: ("TH", "Thailand", "タイ"),
63804: ("JP", "Japan", "日本"),
64150: ("SG", "Singapore", "シンガポール"),
64287: ("PT", "Portugal", "ポルトガル"),
64392: ("CN", "China", "中華人民共和国"),
64452: ("US", "United States", "アメリカ合衆国"),
64520: ("TH", "Thailand", "タイ"),
64694: ("AR", "Argentina", "アルゼンチン"),
64842: ("ID", "Indonesia", "インドネシア"),
65339: ("ID", "Indonesia", "インドネシア"),
65343: ("CN", "China", "中華人民共和国"),
65547: ("VN", "Viet Nam", "ベトナム"),
65821: ("US", "United States", "アメリカ合衆国"),
66468: ("KR", "Korea", "韓国"),
66578: ("MY", "Malaysia", "マレーシア"),
67081: ("SG", "Singapore", "シンガポール"),
67081: ("MY", "Malaysia", "マレーシア"),
67717: ("US", "United States", "アメリカ合衆国"),
67717: ("CN", "China", "中華人民共和国"),
67776: ("GB", "United Kingdom", "イギリス"),
68352: ("TH", "Thailand", "タイ"),
68789: ("ID", "Indonesia", "インドネシア"),
68908: ("CO", "Colombia", "コロンビア"),
68956: ("MY", "Malaysia", "マレーシア"),
68981: ("ES", "Spain", "スペイン"),
69006: ("KR", "Korea", "韓国"),
69299: ("ID", "Indonesia", "インドネシア"),
69368: ("ID", "Indonesia", "インドネシア"),
69542: ("TH", "Thailand", "タイ"),
69659: ("MY", "Malaysia", "マレーシア"),
70219: ("CN", "China", "中華人民共和国"),
70835: ("CN", "China", "中華人民共和国"),
71213: ("TH", "Thailand", "タイ"),
71213: ("JP", "Japan", "日本"),
71213: ("RU", "Russian Federation", "ロシア連邦"),
71232: ("TH", "Thailand", "タイ"),
71441: ("CN", "China", "中華人民共和国"),
71476: ("SG", "Singapore", "シンガポール"),
72728: ("PH", "Philippines", "フィリピン"),
72915: ("MY", "Malaysia", "マレーシア"),
73264: ("US", "United States", "アメリカ合衆国"),
73293: ("TH", "Thailand", "タイ"),
73436: ("GB", "United Kingdom", "イギリス"),
73670: ("ID", "Indonesia", "インドネシア"),
73847: ("AU", "Australia", "オーストラリア"),
74458: ("US", "United States", "アメリカ合衆国"),
75710: ("CN", "China", "中華人民共和国"),
75832: ("TH", "Thailand", "タイ"),
75863: ("TH", "Thailand", "タイ"),
75877: ("TH", "Thailand", "タイ"),
76004: ("MY", "Malaysia", "マレーシア"),
76173: ("KR", "Korea", "韓国"),
76450: ("BR", "Brazil", "ブラジル"),
76729: ("US", "United States", "アメリカ合衆国"),
77080: ("KR", "Korea", "韓国"),
77344: ("ID", "Indonesia", "インドネシア"),
77344: ("MY", "Malaysia", "マレーシア"),
77624: ("GB", "United Kingdom", "イギリス"),
77827: ("BR", "Brazil", "ブラジル"),
78139: ("PY", "Paraguay", "パラグアイ"),
78639: ("MY", "Malaysia", "マレーシア"),
78734: ("CA", "Canada", "カナダ"),
78778: ("KR", "Korea", "韓国"),
78825: ("TH", "Thailand", "タイ"),
79551: ("TH", "Thailand", "タイ"),
79583: ("MY", "Malaysia", "マレーシア"),
79797: ("MY", "Malaysia", "マレーシア"),
81031: ("TH", "Thailand", "タイ"),
81579: ("ID", "Indonesia", "インドネシア"),
82814: ("US", "United States", "アメリカ合衆国"),
82814: ("CA", "Canada", "カナダ"),
82814: ("BE", "Belgium", "ベルギー"),
82890: ("MY", "Malaysia", "マレーシア"),
83138: ("CL", "Chile", "チリ"),
83337: ("MY", "Malaysia", "マレーシア"),
83452: ("TH", "Thailand", "タイ"),
83615: ("TH", "Thailand", "タイ"),
83717: ("TH", "Thailand", "タイ"),
83780: ("MY", "Malaysia", "マレーシア"),
83986: ("GR", "Greece", "ギリシャ"),
83986: ("JP", "Japan", "日本"),
84098: ("US", "United States", "アメリカ合衆国"),
84098: ("PH", "Philippines", "フィリピン"),
85014: ("CN", "China", "中華人民共和国"),
85195: ("MY", "Malaysia", "マレーシア"),
85326: ("MY", "Malaysia", "マレーシア"),
85501: ("PH", "Philippines", "フィリピン"),
85701: ("MY", "Malaysia", "マレーシア"),
85732: ("TH", "Thailand", "タイ"),
85752: ("TH", "Thailand", "タイ"),
86024: ("MY", "Malaysia", "マレーシア"),
86573: ("MY", "Malaysia", "マレーシア"),
86644: ("ID", "Indonesia", "インドネシア"),
86730: ("SG", "Singapore", "シンガポール"),
86730: ("MY", "Malaysia", "マレーシア"),
86736: ("ES", "Spain", "スペイン"),
86846: ("TH", "Thailand", "タイ"),
87463: ("ID", "Indonesia", "インドネシア"),
88187: ("ID", "Indonesia", "インドネシア"),
88871: ("ID", "Indonesia", "インドネシア"),
88963: ("TH", "Thailand", "タイ"),
89493: ("KR", "Korea", "韓国"),
89527: ("ID", "Indonesia", "インドネシア"),
90114: ("TH", "Thailand", "タイ"),
90172: ("CA", "Canada", "カナダ"),
90300: ("CA", "Canada", "カナダ"),
90427: ("TH", "Thailand", "タイ"),
90712: ("ID", "Indonesia", "インドネシア"),
90892: ("KR", "Korea", "韓国"),
91089: ("TN", "Tunisia", "チュニジア"),
91089: ("FR", "France", "フランス"),
91324: ("PH", "Philippines", "フィリピン"),
91385: ("TH", "Thailand", "タイ"),
91388: ("PH", "Philippines", "フィリピン"),
91857: ("MY", "Malaysia", "マレーシア"),
92152: ("KR", "Korea", "韓国"),
92413: ("MY", "Malaysia", "マレーシア"),
92607: ("TH", "Thailand", "タイ"),
92708: ("ID", "Indonesia", "インドネシア"),
93546: ("ID", "Indonesia", "インドネシア"),
94200: ("TH", "Thailand", "タイ"),
94237: ("TR", "Turkey", "トルコ"),
94637: ("TH", "Thailand", "タイ"),
95120: ("TH", "Thailand", "タイ"),
95307: ("US", "United States", "アメリカ合衆国"),
95445: ("TR", "Turkey", "トルコ"),
95779: ("ES", "Spain", "スペイン"),
96069: ("TR", "Turkey", "トルコ"),
97118: ("TH", "Thailand", "タイ"),
97952: ("TH", "Thailand", "タイ"),
98150: ("TH", "Thailand", "タイ"),
98180: ("TH", "Thailand", "タイ"),
98281: ("CL", "Chile", "チリ"),
98555: ("TH", "Thailand", "タイ"),
98595: ("TH", "Thailand", "タイ"),
98704: ("MY", "Malaysia", "マレーシア"),
98776: ("SG", "Singapore", "シンガポール"),
98855: ("TH", "Thailand", "タイ"),
99901: ("SE", "Sweden", "スウェーデン"),
100053: ("TH", "Thailand", "タイ"),
100194: ("TH", "Thailand", "タイ"),
100362: ("ID", "Indonesia", "インドネシア"),
100788: ("SG", "Singapore", "シンガポール"),
100880: ("TH", "Thailand", "タイ"),
100893: ("US", "United States", "アメリカ合衆国"),
100893: ("CN", "China", "中華人民共和国"),
103091: ("ID", "Indonesia", "インドネシア"),
103152: ("TH", "Thailand", "タイ"),
103476: ("ID", "Indonesia", "インドネシア"),
103918: ("US", "United States", "アメリカ合衆国"),
104086: ("ES", "Spain", "スペイン"),
104251: ("US", "United States", "アメリカ合衆国"),
104393: ("RU", "Russian Federation", "ロシア連邦"),
104605: ("CN", "China", "中華人民共和国"),
105360: ("UY", "Uruguay", "ウルグアイ"),
105595: ("TH", "Thailand", "タイ"),
106430: ("AR", "Argentina", "アルゼンチン"),
107720: ("ID", "Indonesia", "インドネシア"),
107736: ("TH", "Thailand", "タイ"),
108034: ("TH", "Thailand", "タイ"),
108114: ("AE", "United Arab Emirates", "アラブ首長国連邦"),
108114: ("PH", "Philippines", "フィリピン"),
108494: ("TH", "Thailand", "タイ"),
108653: ("TH", "Thailand", "タイ"),
108952: ("TH", "Thailand", "タイ"),
108972: ("GB", "United Kingdom", "イギリス"),
109185: ("TH", "Thailand", "タイ"),
109281: ("TH", "Thailand", "タイ"),
109321: ("MY", "Malaysia", "マレーシア"),
109949: ("ID", "Indonesia", "インドネシア"),
110605: ("TH", "Thailand", "タイ"),
110665: ("ID", "Indonesia", "インドネシア"),
110966: ("TH", "Thailand", "タイ"),
111181: ("PH", "Philippines", "フィリピン"),
111241: ("SG", "Singapore", "シンガポール"),
111241: ("MY", "Malaysia", "マレーシア"),
111575: ("TH", "Thailand", "タイ"),
111777: ("TH", "Thailand", "タイ"),
112124: ("ID", "Indonesia", "インドネシア"),
112133: ("TH", "Thailand", "タイ"),
112542: ("TH", "Thailand", "タイ"),
112834: ("DE", "Germany", "ドイツ"),
113104: ("RU", "Russian Federation", "ロシア連邦"),
114349: ("ID", "Indonesia", "インドネシア"),
114592: ("ID", "Indonesia", "インドネシア"),
114787: ("MY", "Malaysia", "マレーシア"),
114864: ("KR", "Korea", "韓国"),
114907: ("MY", "Malaysia", "マレーシア"),
115185: ("CA", "Canada", "カナダ"),
116268: ("PH", "Philippines", "フィリピン"),
117015: ("US", "United States", "アメリカ合衆国"),
117408: ("MY", "Malaysia", "マレーシア"),
117557: ("ID", "Indonesia", "インドネシア"),
117731: ("ID", "Indonesia", "インドネシア"),
118292: ("PH", "Philippines", "フィリピン"),
118306: ("TH", "Thailand", "タイ"),
118306: ("JP", "Japan", "日本"),
118435: ("SG", "Singapore", "シンガポール"),
118551: ("TH", "Thailand", "タイ"),
119167: ("TH", "Thailand", "タイ"),
119683: ("TH", "Thailand", "タイ"),
119795: ("ID", "Indonesia", "インドネシア"),
121031: ("US", "United States", "アメリカ合衆国"),
121108: ("ID", "Indonesia", "インドネシア"),
121455: ("KR", "Korea", "韓国"),
122107: ("KR", "Korea", "韓国"),
122403: ("MY", "Malaysia", "マレーシア"),
122452: ("BR", "Brazil", "ブラジル"),
122456: ("ID", "Indonesia", "インドネシア"),
122543: ("IT", "Italy", "イタリア"),
122599: ("ID", "Indonesia", "インドネシア"),
122616: ("MY", "Malaysia", "マレーシア"),
123243: ("MY", "Malaysia", "マレーシア"),
123615: ("US", "United States", "アメリカ合衆国"),
123753: ("MY", "Malaysia", "マレーシア"),
123898: ("TH", "Thailand", "タイ"),
123991: ("KR", "Korea", "韓国"),
124023: ("ID", "Indonesia", "インドネシア"),
124376: ("TH", "Thailand", "タイ"),
124431: ("MY", "Malaysia", "マレーシア"),
125256: ("TH", "Thailand", "タイ"),
125627: ("TH", "Thailand", "タイ"),
126155: ("ID", "Indonesia", "インドネシア"),
126372: ("TH", "Thailand", "タイ"),
126836: ("ID", "Indonesia", "インドネシア"),
127350: ("RU", "Russian Federation", "ロシア連邦"),
127489: ("ID", "Indonesia", "インドネシア"),
128187: ("TH", "Thailand", "タイ"),
128351: ("US", "United States", "アメリカ合衆国"),
128541: ("IN", "India", "インド"),
128968: ("SG", "Singapore", "シンガポール"),
130018: ("CN", "China", "中華人民共和国"),
130098: ("TH", "Thailand", "タイ"),
130363: ("CN", "China", "中華人民共和国"),
130369: ("KR", "Korea", "韓国"),
130905: ("TH", "Thailand", "タイ"),
131534: ("TH", "Thailand", "タイ"),
132500: ("GB", "United Kingdom", "イギリス"),
132549: ("PY", "Paraguay", "パラグアイ"),
132638: ("CN", "China", "中華人民共和国"),
132659: ("CN", "China", "中華人民共和国"),
132773: ("TR", "Turkey", "トルコ"),
132852: ("ID", "Indonesia", "インドネシア"),
132852: ("SG", "Singapore", "シンガポール"),
132852: ("MY", "Malaysia", "マレーシア"),
132902: ("MX", "Mexico", "メキシコ"),
132929: ("MY", "Malaysia", "マレーシア"),
133119: ("TH", "Thailand", "タイ"),
133232: ("RU", "Russian Federation", "ロシア連邦"),
133239: ("US", "United States", "アメリカ合衆国"),
133845: ("ID", "Indonesia", "インドネシア"),
133845: ("SG", "Singapore", "シンガポール"),
133888: ("NL", "Netherlands", "オランダ"),
134415: ("TH", "Thailand", "タイ"),
135470: ("TH", "Thailand", "タイ"),
135478: ("ID", "Indonesia", "インドネシア"),
135646: ("CN", "China", "中華人民共和国"),
136308: ("US", "United States", "アメリカ合衆国"),
136766: ("KR", "Korea", "韓国"),
137406: ("TH", "Thailand", "タイ"),
137425: ("ID", "Indonesia", "インドネシア"),
137653: ("KR", "Korea", "韓国"),
137853: ("PH", "Philippines", "フィリピン"),
138239: ("TH", "Thailand", "タイ"),
138703: ("MY", "Malaysia", "マレーシア"),
138836: ("TH", "Thailand", "タイ"),
138901: ("BR", "Brazil", "ブラジル"),
139727: ("MY", "Malaysia", "マレーシア"),
140414: ("TH", "Thailand", "タイ"),
140689: ("ID", "Indonesia", "インドネシア"),
140811: ("TH", "Thailand", "タイ"),
141230: ("MY", "Malaysia", "マレーシア"),
141302: ("US", "United States", "アメリカ合衆国"),
141302: ("DE", "Germany", "ドイツ"),
141419: ("US", "United States", "アメリカ合衆国"),
141586: ("KR", "Korea", "韓国"),
142155: ("GR", "Greece", "ギリシャ"),
142155: ("DE", "Germany", "ドイツ"),
143279: ("US", "United States", "アメリカ合衆国"),
143485: ("ID", "Indonesia", "インドネシア"),
143508: ("PH", "Philippines", "フィリピン"),
143993: ("TH", "Thailand", "タイ"),
144601: ("NL", "Netherlands", "オランダ"),
144601: ("BE", "Belgium", "ベルギー"),
144753: ("BR", "Brazil", "ブラジル"),
145930: ("TR", "Turkey", "トルコ"),
146352: ("MY", "Malaysia", "マレーシア"),
146775: ("ID", "Indonesia", "インドネシア"),
147131: ("MY", "Malaysia", "マレーシア"),
147283: ("TH", "Thailand", "タイ"),
147371: ("ID", "Indonesia", "インドネシア"),
147491: ("RU", "Russian Federation", "ロシア連邦"),
147536: ("UA", "Ukraine", "ウクライナ"),
147950: ("PH", "Philippines", "フィリピン"),
147981: ("BR", "Brazil", "ブラジル"),
148755: ("ID", "Indonesia", "インドネシア"),
148841: ("ID", "Indonesia", "インドネシア"),
148854: ("KR", "Korea", "韓国"),
149024: ("TH", "Thailand", "タイ"),
149031: ("MY", "Malaysia", "マレーシア"),
149151: ("PH", "Philippines", "フィリピン"),
150541: ("SG", "Singapore", "シンガポール"),
151365: ("SG", "Singapore", "シンガポール"),
151830: ("US", "United States", "アメリカ合衆国"),
152094: ("FR", "France", "フランス"),
152209: ("ID", "Indonesia", "インドネシア"),
152267: ("KR", "Korea", "韓国"),
152353: ("ID", "Indonesia", "インドネシア"),
152353: ("MY", "Malaysia", "マレーシア"),
152393: ("MY", "Malaysia", "マレーシア"),
152735: ("TH", "Thailand", "タイ"),
152894: ("MY", "Malaysia", "マレーシア"),
153107: ("ID", "Indonesia", "インドネシア"),
153393: ("TH", "Thailand", "タイ"),
153700: ("MY", "Malaysia", "マレーシア"),
154516: ("IT", "Italy", "イタリア"),
154569: ("TH", "Thailand", "タイ"),
154686: ("TH", "Thailand", "タイ"),
154774: ("ID", "Indonesia", "インドネシア"),
155802: ("SE", "Sweden", "スウェーデン"),
156087: ("ID", "Indonesia", "インドネシア"),
156352: ("SG", "Singapore", "シンガポール"),
156650: ("KR", "Korea", "韓国"),
156802: ("ID", "Indonesia", "インドネシア"),
157048: ("MY", "Malaysia", "マレーシア"),
157606: ("TH", "Thailand", "タイ"),
157780: ("ID", "Indonesia", "インドネシア"),
158367: ("US", "United States", "アメリカ合衆国"),
159762: ("TH", "Thailand", "タイ"),
160450: ("TH", "Thailand", "タイ"),
160601: ("MY", "Malaysia", "マレーシア"),
161429: ("TH", "Thailand", "タイ"),
162014: ("KR", "Korea", "韓国"),
162932: ("ID", "Indonesia", "インドネシア"),
163294: ("KR", "Korea", "韓国"),
163294: ("MY", "Malaysia", "マレーシア"),
163692: ("US", "United States", "アメリカ合衆国"),
163692: ("NL", "Netherlands", "オランダ"),
163742: ("ID", "Indonesia", "インドネシア"),
163961: ("US", "United States", "アメリカ合衆国"),
164039: ("TH", "Thailand", "タイ"),
164063: ("TH", "Thailand", "タイ"),
164275: ("ID", "Indonesia", "インドネシア"),
164777: ("ID", "Indonesia", "インドネシア"),
165084: ("ID", "Indonesia", "インドネシア"),
165860: ("TH", "Thailand", "タイ"),
165908: ("TH", "Thailand", "タイ"),
166115: ("MY", "Malaysia", "マレーシア"),
166542: ("TH", "Thailand", "タイ"),
166585: ("ID", "Indonesia", "インドネシア"),
166797: ("RU", "Russian Federation", "ロシア連邦"),
167307: ("ID", "Indonesia", "インドネシア"),
167823: ("US", "United States", "アメリカ合衆国"),
168377: ("ID", "Indonesia", "インドネシア"),
168518: ("MY", "Malaysia", "マレーシア"),
169079: ("MY", "Malaysia", "マレーシア"),
169083: ("KR", "Korea", "韓国"),
169091: ("MY", "Malaysia", "マレーシア"),
169716: ("TH", "Thailand", "タイ"),
170510: ("AU", "Australia", "オーストラリア"),
171577: ("SG", "Singapore", "シンガポール"),
171940: ("ID", "Indonesia", "インドネシア"),
172148: ("TH", "Thailand", "タイ"),
172222: ("ID", "Indonesia", "インドネシア"),
172891: ("ID", "Indonesia", "インドネシア"),
173137: ("MY", "Malaysia", "マレーシア"),
174407: ("SG", "Singapore", "シンガポール"),
174662: ("TH", "Thailand", "タイ"),
174894: ("US", "United States", "アメリカ合衆国"),
175196: ("ID", "Indonesia", "インドネシア"),
175928: ("MY", "Malaysia", "マレーシア"),
179331: ("US", "United States", "アメリカ合衆国"),
179453: ("TH", "Thailand", "タイ"),
180811: ("BR", "Brazil", "ブラジル"),
180929: ("TH", "Thailand", "タイ"),
181027: ("BR", "Brazil", "ブラジル"),
181256: ("KR", "Korea", "韓国"),
181629: ("TH", "Thailand", "タイ"),
181814: ("US", "United States", "アメリカ合衆国"),
181844: ("GB", "United Kingdom", "イギリス"),
182324: ("TR", "Turkey", "トルコ"),
183289: ("TH", "Thailand", "タイ"),
183557: ("PH", "Philippines", "フィリピン"),
184455: ("TH", "Thailand", "タイ"),
184651: ("MY", "Malaysia", "マレーシア"),
184678: ("US", "United States", "アメリカ合衆国"),
185855: ("TH", "Thailand", "タイ"),
185880: ("MY", "Malaysia", "マレーシア"),
186987: ("FR", "France", "フランス"),
187032: ("MY", "Malaysia", "マレーシア"),
187777: ("ID", "Indonesia", "インドネシア"),
188046: ("ID", "Indonesia", "インドネシア"),
188274: ("MY", "Malaysia", "マレーシア"),
188281: ("TH", "Thailand", "タイ"),
188396: ("TH", "Thailand", "タイ"),
189455: ("KR", "Korea", "韓国"),
190761: ("TH", "Thailand", "タイ"),
191437: ("CL", "Chile", "チリ"),
191778: ("ID", "Indonesia", "インドネシア"),
192447: ("KR", "Korea", "韓国"),
193111: ("MY", "Malaysia", "マレーシア"),
193229: ("US", "United States", "アメリカ合衆国"),
193353: ("MY", "Malaysia", "マレーシア"),
193581: ("GB", "United Kingdom", "イギリス"),
194052: ("CN", "China", "中華人民共和国"),
194136: ("TR", "Turkey", "トルコ"),
194393: ("TH", "Thailand", "タイ"),
194879: ("MY", "Malaysia", "マレーシア"),
195416: ("TH", "Thailand", "タイ"),
195780: ("BE", "Belgium", "ベルギー"),
197002: ("US", "United States", "アメリカ合衆国"),
197243: ("ID", "Indonesia", "インドネシア"),
198279: ("MY", "Malaysia", "マレーシア"),
198535: ("ID", "Indonesia", "インドネシア"),
198752: ("TH", "Thailand", "タイ"),
199126: ("KR", "Korea", "韓国"),
199564: ("TH", "Thailand", "タイ"),
199790: ("MX", "Mexico", "メキシコ"),
200005: ("PY", "Paraguay", "パラグアイ"),
200451: ("KW", "Kuwait", "クウェート"),
201397: ("MY", "Malaysia", "マレーシア"),
201618: ("CN", "China", "中華人民共和国"),
201786: ("KR", "Korea", "韓国"),
201902: ("US", "United States", "アメリカ合衆国"),
202296: ("KR", "Korea", "韓国"),
202607: ("DO", "Dominican Republic", "ドミニカ共和国"),
203001: ("TH", "Thailand", "タイ"),
203227: ("TH", "Thailand", "タイ"),
203298: ("TH", "Thailand", "タイ"),
204068: ("TH", "Thailand", "タイ"),
204376: ("TH", "Thailand", "タイ"),
204376: ("JP", "Japan", "日本"),
205345: ("ID", "Indonesia", "インドネシア"),
205385: ("TH", "Thailand", "タイ"),
205803: ("RU", "Russian Federation", "ロシア連邦"),
206615: ("ID", "Indonesia", "インドネシア"),
206755: ("IT", "Italy", "イタリア"),
206943: ("NL", "Netherlands", "オランダ"),
206976: ("SG", "Singapore", "シンガポール"),
206976: ("MY", "Malaysia", "マレーシア"),
207337: ("KR", "Korea", "韓国"),
207380: ("TH", "Thailand", "タイ"),
207933: ("TH", "Thailand", "タイ"),
208037: ("SG", "Singapore", "シンガポール"),
209019: ("TH", "Thailand", "タイ"),
209305: ("TH", "Thailand", "タイ"),
209747: ("NL", "Netherlands", "オランダ"),
209775: ("IT", "Italy", "イタリア"),
209874: ("TH", "Thailand", "タイ"),
210099: ("ID", "Indonesia", "インドネシア"),
210978: ("PH", "Philippines", "フィリピン"),
212227: ("US", "United States", "アメリカ合衆国"),
212758: ("ID", "Indonesia", "インドネシア"),
213001: ("TH", "Thailand", "タイ"),
213512: ("ID", "Indonesia", "インドネシア"),
215541: ("US", "United States", "アメリカ合衆国"),
216078: ("TR", "Turkey", "トルコ"),
216108: ("TH", "Thailand", "タイ"),
216458: ("TH", "Thailand", "タイ"),
216658: ("US", "United States", "アメリカ合衆国"),
217107: ("CA", "Canada", "カナダ"),
217910: ("TH", "Thailand", "タイ"),
218421: ("MY", "Malaysia", "マレーシア"),
218744: ("SG", "Singapore", "シンガポール"),
219663: ("MY", "Malaysia", "マレーシア"),
219804: ("TH", "Thailand", "タイ"),
220111: ("MY", "Malaysia", "マレーシア"),
220992: ("KR", "Korea", "韓国"),
221577: ("TH", "Thailand", "タイ"),
222112: ("KR", "Korea", "韓国"),
222401: ("MY", "Malaysia", "マレーシア"),
222638: ("PH", "Philippines", "フィリピン"),
224441: ("SG", "Singapore", "シンガポール"),
224441: ("MY", "Malaysia", "マレーシア"),
224554: ("ID", "Indonesia", "インドネシア"),
225326: ("TH", "Thailand", "タイ"),
225745: ("PH", "Philippines", "フィリピン"),
226178: ("CN", "China", "中華人民共和国"),
228287: ("ID", "Indonesia", "インドネシア"),
229783: ("TH", "Thailand", "タイ"),
230429: ("IT", "Italy", "イタリア"),
230985: ("KR", "Korea", "韓国"),
231034: ("MY", "Malaysia", "マレーシア"),
231602: ("SA", "Saudi Arabia", "サウジアラビア"),
232544: ("TH", "Thailand", "タイ"),
232551: ("ID", "Indonesia", "インドネシア"),
234153: ("AU", "Australia", "オーストラリア"),
234484: ("KR", "Korea", "韓国"),
235078: ("SG", "Singapore", "シンガポール"),
235546: ("TH", "Thailand", "タイ"),
236967: ("GB", "United Kingdom", "イギリス"),
237582: ("TH", "Thailand", "タイ"),
237662: ("KR", "Korea", "韓国"),
238122: ("ID", "Indonesia", "インドネシア"),
238326: ("KR", "Korea", "韓国"),
240750: ("ID", "Indonesia", "インドネシア"),
240865: ("TH", "Thailand", "タイ"),
241264: ("PH", "Philippines", "フィリピン"),
241650: ("KR", "Korea", "韓国"),
241719: ("TH", "Thailand", "タイ"),
241817: ("MY", "Malaysia", "マレーシア"),
242278: ("ID", "Indonesia", "インドネシア"),
242361: ("LK", "Sri Lanka", "スリランカ"),
243628: ("TH", "Thailand", "タイ"),
244599: ("SG", "Singapore", "シンガポール"),
244599: ("DE", "Germany", "ドイツ"),
244851: ("ID", "Indonesia", "インドネシア"),
246485: ("MY", "Malaysia", "マレーシア"),
246737: ("TR", "Turkey", "トルコ"),
247464: ("TH", "Thailand", "タイ"),
248754: ("CN", "China", "中華人民共和国"),
249891: ("US", "United States", "アメリカ合衆国"),
250146: ("US", "United States", "アメリカ合衆国"),
250768: ("ID", "Indonesia", "インドネシア"),
251103: ("AE", "United Arab Emirates", "アラブ首長国連邦"),
251784: ("AU", "Australia", "オーストラリア"),
251814: ("ID", "Indonesia", "インドネシア"),
253036: ("ID", "Indonesia", "インドネシア"),
254403: ("ID", "Indonesia", "インドネシア"),
254715: ("SG", "Singapore", "シンガポール"),
254715: ("MY", "Malaysia", "マレーシア"),
256374: ("MY", "Malaysia", "マレーシア"),
259280: ("ID", "Indonesia", "インドネシア"),
259304: ("BR", "Brazil", "ブラジル"),
259326: ("TH", "Thailand", "タイ"),
259351: ("TH", "Thailand", "タイ"),
259785: ("GB", "United Kingdom", "イギリス"),
259804: ("MY", "Malaysia", "マレーシア"),
259925: ("TH", "Thailand", "タイ"),
259971: ("PH", "Philippines", "フィリピン"),
260285: ("TH", "Thailand", "タイ"),
261060: ("ID", "Indonesia", "インドネシア"),
261072: ("CN", "China", "中華人民共和国"),
261296: ("TR", "Turkey", "トルコ"),
262882: ("ID", "Indonesia", "インドネシア"),
263182: ("MY", "Malaysia", "マレーシア"),
263876: ("PH", "Philippines", "フィリピン"),
264241: ("MY", "Malaysia", "マレーシア"),
264332: ("MY", "Malaysia", "マレーシア"),
264932: ("US", "United States", "アメリカ合衆国"),
265291: ("CL", "Chile", "チリ"),
266460: ("TR", "Turkey", "トルコ")
}
# print(userid_country[263876][2])
| 1.210938 | 1 |
main.py | sms77io/active-workflow-voice | 0 | 12764569 | <gh_stars>0
import os
from flask import Flask
from flask import jsonify
from flask import request
from sms77api.Sms77api import Sms77api
app = Flask(__name__)
@app.route('/', methods=['POST'])
def handle():
return handle_agent(Sms77VoiceAgent, request)
def handle_agent(cls, req):
"""Helper that routes 'method calls' to a real agent object."""
content = req.json
method = content['method']
params = content.get('params')
if method == 'register':
response = cls.register()
elif method == 'check':
response = cls(params).check()
elif method == 'receive':
response = cls(params).receive(params['message'])
else:
response = {}
return jsonify(response)
class Sms77VoiceAgent:
def __init__(self, params):
"""Set some convenience variables.
Object is created from scratch on each method invocation"""
self.credentials = params['credentials']
self.options = params['options']
self.memory = params['memory'] or {}
# noinspection PyMethodParameters
def register():
"""Register our metadata"""
return {
'result': {
'default_options': {
'apiKey': '{% credential sms77_api_key %}',
'from': None,
'text': None,
'to': None,
'xml': False,
},
'description': 'Agent to issue Text2Speech calls via Sms77.io.',
'display_name': 'Sms77 Voice Agent',
'name': 'Sms77VoiceAgent',
}
}
def check(self):
"""This is run on schedule. Do something useful."""
messages = []
if self.memory.get('last_message'):
messages.append(self.memory['last_message'])
memory = self.memory.copy()
memory.pop('last_message', None)
return {
'result': {
'errors': [],
'logs': ['Check done'],
'memory': memory,
'messages': messages,
}
}
def receive(self, message):
"""Process message and do something with it."""
errors = []
messages = []
payload = message['payload']
self.memory['last_message'] = payload
api_key = payload.pop('apiKey', os.getenv('SMS77_API_KEY'))
if api_key is None:
errors.append('Missing API key')
text = payload.pop('text', None)
if text is None:
errors.append('Missing text')
to = payload.pop('to', None)
if to is None:
errors.append('Missing to')
if 0 == len(errors):
messages.append(Sms77api(api_key, 'active-workflow').voice(
to, text,
payload.pop('xml', False), payload.pop('from', None)))
return {
'result': {
'errors': errors,
'logs': ['New message received'],
'memory': self.memory,
'messages': messages,
}
}
if __name__ == '__main__':
app.run()
| 2.78125 | 3 |
autoload/leaderf/python/filer/commands/input.py | tsuyoshicho/LeaderF-filer | 43 | 12764570 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from leaderf.utils import lfCmd
_switch_normal_mode_key = ""
_context = {}
# search_func
# additional_prompt_string
# cli_key_dict
# cli_cmdline
# cli_cursor_pos
# cli_pattern
# cursor_pos
# results
# input_prompt_prompts
# input_prompt_command
def command___input_cancel(manager):
_restore_context(manager)
_switch_normal_mode(manager)
def command___input_prompt(manager):
global _context
prompts = _context["input_prompt_prompts"]
command = _context["input_prompt_command"]
# Chains the input prompt
# Save the previous input value.
if "results" not in _context:
_context["results"] = []
_context["results"].append(manager._instance._cli.pattern)
_context["input_prompt_prompts"] = prompts[1:]
# reset prompt text
prompt = prompts[0].get("prompt")
text = prompts[0].get("text", "")
manager._instance._cli._additional_prompt_string = prompt
manager._instance._cli.setPattern(text)
key_dict = manager._instance._cli._key_dict
if len(prompts) == 1:
# done
key_dict["<CR>"] = "_do_" + command
else:
key_dict["<CR>"] = "_input_prompt"
manager._instance._cli._key_dict = key_dict
def input_prompt(manager, command, prompts=[]):
"""
params:
command:
"delete" or "edit" or ("add")
When <CR> is pressed, manager.command___co_{command}() is executed.
prompts:
input prompts
[
{"prompt": prompt1, "text": text1},
...
]
"""
global _context
_context["input_prompt_prompts"] = prompts[1:]
_context["input_prompt_command"] = command
# set pattern
prompt = prompts[0].get("prompt", "")
text = prompts[0].get("text", "")
manager._instance._cli._additional_prompt_string = prompt
manager._instance._cli.setPattern(text)
# update key_dict
key_dict = {
lhs: rhs
for [lhs, rhs] in manager._instance._cli._key_dict.items()
if rhs.lower()
in {
"<esc>",
"<c-c>",
"<bs>",
"<c-h>",
"<c-u>",
"<c-w>",
"<del>",
"<c-v>",
"<s-insert>",
"<home>",
"<c-b>",
"<end>",
"<c-e>",
"<left>",
"<right>",
"<up>",
"<down>",
"open_parent_or_backspace",
"open_parent_or_clear_line",
}
}
# To be able to do general input
for [lrs, rhs] in key_dict.items():
rhs_low = rhs.lower()
if rhs_low in {"open_parent_or_backspace", "open_parent_or_clear_line"}:
key_dict[lrs] = "<BS>"
elif rhs_low in {"<esc>", "<c-c>"}:
key_dict[lrs] = "_input_cancel"
# add command
if len(prompts) == 1:
key_dict["<CR>"] = "_do_" + command
else:
# chain
key_dict["<CR>"] = "_input_prompt"
manager._instance._cli._key_dict = key_dict
manager.input()
def do_command(func):
"""
example:
@do_command
def command___do_xxx(manager, context, results):
...
"""
# Give a list of input results to a function
def inner_func(manager):
# The first argument must be manager
global _context
results = _context.get("results", [])
results.append(manager._instance._cli.pattern)
try:
func(manager, _context, results)
finally:
_restore_context(
manager, restore_input_pattern=False, restore_cursor_pos=False
)
_switch_normal_mode(manager)
manager._instance._cli.setPattern("")
return inner_func
def save_context(manager, **kwargs):
""" For input_prompt
"""
global _context
_context = {}
_context["search_func"] = manager._search
_context[
"additional_prompt_string"
] = manager._instance._cli._additional_prompt_string
_context["cli_key_dict"] = dict(manager._instance._cli._key_dict)
_context["cli_cmdline"] = list(manager._instance._cli._cmdline)
_context["cli_cursor_pos"] = manager._instance._cli._cursor_pos
_context["cli_pattern"] = manager._instance._cli._pattern
_context["cursor_pos"] = manager._getInstance()._window_object.cursor
_context.update(**kwargs)
manager._search = lambda content, is_continue=False, step=0: ""
def _restore_context(manager, restore_input_pattern=True, restore_cursor_pos=True):
""" For input_prompt
params:
restore_input_pattern:
The following attributes of the `cli` will not be restored
* _cmdline
* _cursor_pos
* _pattern
restore_cursor_pos:
cursor position
"""
global _context
manager._search = _context["search_func"]
manager._instance._cli._additional_prompt_string = _context[
"additional_prompt_string"
]
manager._instance._cli._key_dict = _context["cli_key_dict"]
if restore_cursor_pos:
# To restore only in case of cancel
[row, col] = _context["cursor_pos"]
lfCmd(
"""call win_execute({}, 'exec "norm! {}G"')""".format(
manager._instance._popup_winid, row
)
)
manager._getInstance().refreshPopupStatusline()
if restore_input_pattern:
# To restore only in case of cancel
manager._instance._cli._cmdline = _context["cli_cmdline"]
manager._instance._cli._cursor_pos = _context["cli_cursor_pos"]
manager._instance._cli._pattern = _context["cli_pattern"]
_context = {}
def _switch_normal_mode(manager):
lfCmd(r'call feedkeys("{}", "n")'.format(_get_switch_normal_mode_key(manager)))
def _get_switch_normal_mode_key(manager):
""" Returns the key to go into normal mode.
"""
global _switch_normal_mode_key
if _switch_normal_mode_key:
return _switch_normal_mode_key
keys = [
lrs
for [lrs, rhs] in manager._instance._cli._key_dict.items()
if rhs.lower() == "<tab>"
]
if len(keys) == 0:
_switch_normal_mode_key = r"\<Tab>"
else:
# <Tab> => \<Tab>
_switch_normal_mode_key = keys[0].replace("<", r"\<")
return _switch_normal_mode_key
| 2.34375 | 2 |
venvs/sitio_web/restaurantes/forms.py | mmaguero/MII-SSBW16-17 | 1 | 12764571 | # -*- coding: utf-8 -*-
from django import forms
from .models import restaurants
# Para campos individuales:
class RestaurantesForm(forms.Form):
nombre = forms.CharField(required=True, label='Name', max_length=80)
cocina = forms.CharField(required=True, label='Cuisine', widget=forms.TextInput(attrs={'placeholder': 'Granaina'}))
direccion = forms.CharField(required=True, label='Street')
barrio = forms.CharField(required=True, label='Borough', widget=forms.TextInput())
imagen = forms.ImageField(required=False, label='Photo')
'''
#for mongoengine
class RestaurantesForm(ModelForm):
class Meta:
model = restaurants
fields = ['name', 'cuisine', 'address.street', 'borough', 'image']
'''
| 2.5625 | 3 |
bin/bacWGS_readQC.py | pkmitchell/bacWGS | 0 | 12764572 | <filename>bin/bacWGS_readQC.py
#!/workdir/miniconda3/envs/bacWGS/bin/python
import sys
from subprocess import call
import argparse
import re
import multiprocessing as mp
'''
Run fastqc on a input samples, then parse output to produce table
'''
#Argument Parser
parser = argparse.ArgumentParser(description="Run FastQC and parse output to produce table of read quality metrics")
parser.add_argument("-p", "--threads", help="Maximum number of processors to use (Default = 1)", type = int, default=1)
parser.add_argument("Fastqs", help="Input fastqs to be processed", nargs='+')
args = parser.parse_args()
#Determining the number of jobs and threads per job
ncpus = args.threads #The total number of CPUs to be used simultaneously
totjobs = len(args.Fastqs) #The total number of input sequences, which is also the total number of jobs to run
#Determining the number of threads using the bounds provided by the user
tot_cpus = mp.cpu_count()
if ncpus > tot_cpus:
ncpus = tot_cpus
if ncpus > totjobs:
ncpus = totjobs
#Call FastQC
fqc_prog = open("fastqc_progress.log", 'w')
fqc_cmd = ["fastqc", "--extract", "--threads", str(ncpus)] + args.Fastqs
call(fqc_cmd, stdout=fqc_prog, stderr=fqc_prog)
#Process FastQC output and write QC table
sys.stdout.write("Isolate\tDirection\tReads\tMean Length\tMean Q\tQ30%\t Est. Coverage\n")
fqc_files = [f.split(sep=".")[0]+"_fastqc/fastqc_data.txt" for f in args.Fastqs]
dirdict = {"1":"Forward", "2":"Reverse"} #silly little dictionary to say 1 means forward and 2 means reverse
for file in fqc_files:
section=""
qreadsum = 0.0
qscoresum = 0.0
q30readsum = 0.0
lreadsum = 0.0
lensum = 0.0
stem=""
direction=""
with open(file, 'r') as f:
for line in f:
if line[0:2] == ">>":
section = re.split(" ", line[2:])[0]
elif section not in ["", "Per sequence quality scores", "Sequence Length Distribution", "Basic Statistics"]:
pass
elif section == "Per sequence quality scores" and line[0] != "#":
comps = re.split(" ", line)
qreadsum += float(comps[1])
qscoresum += float(comps[0]) * float(comps[1])
if float(comps[0]) >= 30:
q30readsum += float(comps[1])
elif section == "Sequence Length Distribution" and line[0] != "#":
comps = re.split(" ", line)
lreadsum += float(comps[1])
rng = list(map(float, comps[0].split(sep="-")))
mp = sum(rng) / len(rng)
lensum += mp * float(comps[1])
elif section == "Basic Statistics" and line[0] != "#":
comps = re.split(" ", line)
if comps[0] == "Filename":
fname = comps[1].split(sep=".")[0]
mtch= re.search(r'_R[12]_001$',fname)
if mtch:
name_comps=fname.split(sep="_")
stem=name_comps[0]
direction=dirdict[name_comps[3][1]]
else:
mtch2 = re.search(r'_[12]$',fname)
if mtch2:
name_comps=fname.split(sep="_")
stem=name_comps[0]
direction=dirdict[name_comps[-1][0]]
elif comps[0] == "Total Sequences":
totseq = int(comps[1])
if len(set([qreadsum, lreadsum, totseq])) == 1:
nreads = totseq
else:
nreads == "?"
alen = "%.2f" % (lensum/lreadsum)
avgQ = "%.2f" % (qscoresum/qreadsum)
q30 = "%.2f" % ((q30readsum/qreadsum) * 100)
sys.stdout.write(stem + "\t" + direction + "\t" + str(nreads) + "\t" + str(alen) + "\t" + str(avgQ) + "\t" + str(q30) + "\n")
sys.stdout.write("\n")
call(["fastqc", "--version"], stdout=sys.stdout)
c = open("tmp_cleanup", 'w')
c.write("mkdir FastQC_output\n")
for i in args.Fastqs:
s = i.split(sep=".")[0]
c.write("rm -r " + s + "_fastqc && mv " + s + "_fastqc.html FastQC_output/ && mv " + s + "_fastqc.zip FastQC_output/\n")
c.write("mv fastqc_progress.log FastQC_output/")
c.close()
call(["chmod", "u+x", "tmp_cleanup"])
call(["sh", "tmp_cleanup"])
call(["rm", "tmp_cleanup"])
| 2.625 | 3 |
imagepy/menus/Image/Lookup table/Others/lookuptables_plg.py | Pad0y/imagepy | 0 | 12764573 | <filename>imagepy/menus/Image/Lookup table/Others/lookuptables_plg.py
from ..lookuptables_plg import LUT
from imagepy.app import ColorManager
plgs = [LUT(i, j) for i, j, _ in ColorManager.gets(tag="adv")]
| 1.609375 | 2 |
tests/test_read_rpc.py | OptimalAnalytics/call_center_reports | 0 | 12764574 | import pytest
from process_reports import read_rpc, check_excel, check_extension, read_info
import os
@pytest.fixture
def good_fn():
return os.path.join('Sample_Reports', 'ALL_RPC-2-1-2018_Scrubbed.xlsx')
@pytest.fixture
def updated_fn():
return os.path.join('Sample_Reports', 'ALL_RPC-7-3_2018_Scrubbed.xlsx')
@pytest.mark.parametrize("fn,acceptable,case,expected", [
('cool.txt', ['txt', 'xls'], True, True),
('cool.ini', ['txt', 'xls'], True, False),
('C:\monster\cool.ini', ['txt', 'xls'], True, False),
('C:\monster\cool.txt', ['txt', 'xls'], True, True),
('C:\monster\cool.xls', ['txt', 'xls'], True, True),
('C:\monster\cool.XLS', ['txt', 'xls'], True, True),
('C:\monster\cool.XLS', ['txt', 'xls'], False, False),
('C:\monster\cool.xls', ['txt', 'XLS'], False, False),
('C:\monster\cool.xls', ['txt', 'XLS'], True, True),
('C:\monster\cool.ini', ['txt', 'XLS'], True, False),
('C:\monster\cool.ini', ['txt', 'XLS'], False, False),
])
def test_check_extensions(fn, acceptable, case, expected):
assert check_extension(fn, acceptable, case_insensitive=case) == expected
@pytest.mark.parametrize("fn,expected", [
('cool.xls', True),
('cool.xlsx', True),
('cool.XLSX', True),
('cool.xlsm', True),
('cool.xlsb', True),
('cool.csv', False),
('cool.ini', False),
])
def test_check_excel(fn, expected):
assert check_excel(fn) == expected
@pytest.mark.parametrize("f_type", ['csv', 'ini'])
def test_bad_f_types(good_fn, caplog, f_type):
with pytest.raises(NotImplementedError):
read_info(good_fn, f_type=f_type)
for record in caplog.records:
assert record.levelname == 'ERROR'
assert 'f_type' in caplog.text
assert 'supported' in caplog.text
assert f_type in caplog.text
def test_bad_f_type_file(caplog):
with pytest.raises(ValueError):
read_info('cool.csv')
for record in caplog.records:
assert record.levelname == 'ERROR'
def test_good_file(good_fn):
df = read_rpc(good_fn)
assert 'Created By Qcc' in df.columns.values
assert 'Acct Id Acc' in df.columns.values
assert 'Call Action Type Qcc' in df.columns.values
assert 'Call Result Type Qcc' in df.columns.values
def test_good_file2(updated_fn):
df = read_rpc(updated_fn)
assert 'Created By Qcc' in df.columns.values
assert 'Acct Id Acc' in df.columns.values
assert 'Call Action Type Qcc' in df.columns.values
assert 'Call Result Type Qcc' in df.columns.values
def test_kwargs(good_fn):
df = read_rpc(good_fn, usecols=2)
assert 'Call Result Type Qcc' not in df.columns.values
assert len(df.columns.values) == 3
| 1.992188 | 2 |
marbaloo_mako/__init__.py | marbaloo/marbalo_mako | 1 | 12764575 | import cherrypy
from mako.lookup import TemplateLookup
class Tool(cherrypy.Tool):
_lookups = {}
def __init__(self):
cherrypy.Tool.__init__(self, 'before_handler',
self.callable,
priority=40)
def callable(self,
filename=None,
directories=None,
module_directory=None,
collection_size=-1):
if filename is None or directories is None:
return
# Find the appropriate template lookup.
key = (tuple(directories), module_directory)
try:
lookup = self._lookups[key]
except KeyError:
lookup = TemplateLookup(directories=directories,
module_directory=module_directory,
collection_size=collection_size,
input_encoding='utf8')
self._lookups[key] = lookup
cherrypy.request.lookup = lookup
cherrypy.request.template = lookup.get_template(filename)
# Replace the current handler.
inner_handler = cherrypy.serving.request.handler
def wrapper(*args, **kwargs):
context = inner_handler(*args, **kwargs)
response = cherrypy.request.template.render(**context)
return response
cherrypy.serving.request.handler = wrapper
| 2.328125 | 2 |
cgs_vmc/run_training.py | ClarkResearchGroup/cgs-vmc | 18 | 12764576 | <filename>cgs_vmc/run_training.py
"""Runs supervised or unsupervised neural network training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
import evaluation
import operators
import training
import wavefunctions
import utils
# System parameters
flags.DEFINE_string(
'checkpoint_dir', '',
'Full path to the checkpoint directory.')
flags.DEFINE_integer(
'num_sites', 24,
'Number of sites in the system.')
flags.DEFINE_float(
'heisenberg_jx', 1.0,
'Jx value in Heisenberg Hamiltonian.')
# Training parameters
flags.DEFINE_integer(
'num_epochs', 1000,
'Total of number of epochs to train on.')
flags.DEFINE_integer(
'checkpoint_frequency', 1,
'Number of epochs between checkpoints.')
flags.DEFINE_boolean(
'resume_training', False,
'Indicator to resotre variables from the latest checkpoint')
flags.DEFINE_string(
'wavefunction_type', '',
'Network architecture to train. Available architectures are listed in '
'wavefunctions.WAVEFUNCTION_TYPES dict. and '
'wavefunctions.build_wavefunction() function.')
flags.DEFINE_string(
'optimizer', 'ITSWO',
'Ground state optimizer to use. Available options listed in '
'training.GROUND_STATE_OPTIMIZERS dict.')
flags.DEFINE_boolean(
'generate_vectors', False,
'Indicator generate full wavefunction vectors as a part of evaluation.')
flags.DEFINE_string(
'basis_file_path', '',
'Path to the basis file for full wavefunction evaluation.')
flags.DEFINE_string(
'hparams', '',
'Override values of hyper-parameters in the form of a '
'comma-separated list of name=value pairs, e.g., '
'"num_layers=3,filter_size=64".')
flags.DEFINE_boolean(
'override', True,
'Whether to automatically override existing Hparams.')
FLAGS = flags.FLAGS
def main(argv):
"""Runs wavefunction optimization.
This pipeline optimizes wavefunction specified in flags on a Marshal sign
included Heisenberg model. Bonds should be specified in the file J.txt in
checkpoint directory, otherwise will default to 1D PBC system. For other
tunable parameters see flags description.
"""
del argv # Not used.
n_sites = FLAGS.num_sites
hparams = utils.create_hparams()
hparams.set_hparam('checkpoint_dir', FLAGS.checkpoint_dir)
hparams.set_hparam('basis_file_path', FLAGS.basis_file_path)
hparams.set_hparam('num_sites', FLAGS.num_sites)
hparams.set_hparam('num_epochs', FLAGS.num_epochs)
hparams.set_hparam('wavefunction_type', FLAGS.wavefunction_type)
hparams.set_hparam('wavefunction_optimizer_type', FLAGS.optimizer)
hparams.parse(FLAGS.hparams)
hparams_path = os.path.join(hparams.checkpoint_dir, 'hparams.pbtxt')
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if os.path.exists(hparams_path) and not FLAGS.override:
print('Hparams file already exists')
exit()
with tf.gfile.GFile(hparams_path, 'w') as file:
file.write(str(hparams.to_proto()))
bonds_file_path = os.path.join(FLAGS.checkpoint_dir, 'J.txt')
heisenberg_jx = FLAGS.heisenberg_jx
if os.path.exists(bonds_file_path):
heisenberg_data = np.genfromtxt(bonds_file_path, dtype=int)
heisenberg_bonds = [[bond[0], bond[1]] for bond in heisenberg_data]
else:
heisenberg_bonds = [(i, (i + 1) % n_sites) for i in range(0, n_sites)]
wavefunction = wavefunctions.build_wavefunction(hparams)
hamiltonian = operators.HeisenbergHamiltonian(heisenberg_bonds,
heisenberg_jx, 1.)
wavefunction_optimizer = training.GROUND_STATE_OPTIMIZERS[FLAGS.optimizer]()
# TODO(dkochkov) change the pipeline to avoid adding elements to dictionary
shared_resources = {}
graph_building_args = {
'wavefunction': wavefunction,
'hamiltonian': hamiltonian,
'hparams': hparams,
'shared_resources': shared_resources
}
train_ops = wavefunction_optimizer.build_opt_ops(**graph_building_args)
session = tf.Session()
init = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
session.run([init, init_l])
checkpoint_saver = tf.train.Saver(
wavefunction.get_trainable_variables(), max_to_keep=5)
if FLAGS.resume_training:
latest_checkpoint = tf.train.latest_checkpoint(hparams.checkpoint_dir)
checkpoint_saver.restore(session, latest_checkpoint)
# TODO(kochkov92) use custom output file.
training_metrics_file = os.path.join(hparams.checkpoint_dir, 'metrics.txt')
for epoch_number in range(FLAGS.num_epochs):
checkpoint_name = 'model_prior_{}_epochs'.format(epoch_number)
save_path = os.path.join(hparams.checkpoint_dir, checkpoint_name)
checkpoint_saver.save(session, save_path)
metrics_record = wavefunction_optimizer.run_optimization_epoch(
train_ops, session, hparams)
metrics_file_output = open(training_metrics_file, 'a')
metrics_file_output.write('{}\n'.format(metrics_record))
metrics_file_output.close()
if FLAGS.generate_vectors:
vector_generator = evaluation.VectorWavefunctionEvaluator()
eval_ops = vector_generator.build_eval_ops(
wavefunction, None, hparams, shared_resources)
vector_generator.run_evaluation(
eval_ops, session, hparams, FLAGS.num_epochs)
if __name__ == '__main__':
app.run(main)
| 2.03125 | 2 |
miRNASNP3/miRNASNP3/ajax.py | chunjie-sam-liu/miRNASNP-v3 | 1 | 12764577 | import flask_restful
import re
from miRNASNP3 import app, api
from miRNASNP3.core import mongo
from flask_restful import Resource, fields, marshal_with, reqparse, marshal
from flask import send_file
mirna_exp_df = {
"ACC": fields.String,
"DLBC": fields.String,
"READ": fields.String,
"GBM": fields.String,
"LGG": fields.String,
"THCA": fields.String,
"STAD": fields.String,
"UCEC": fields.String,
"PCPG": fields.String,
"CESC": fields.String,
"UCS": fields.String,
"TGCT": fields.String,
"LIHC": fields.String,
"CHOL": fields.String,
"HNSC": fields.String,
"UVM": fields.String,
"SKCM": fields.String,
"COAD": fields.String,
"PAAD": fields.String,
"THYM": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"ESCA": fields.String,
"SARC": fields.String,
"KIRP": fields.String,
"BLCA": fields.String,
"PRAD": fields.String,
"LUAD": fields.String,
"BRCA": fields.String,
"KIRC": fields.String,
"KICH": fields.String,
}
mirna_expression = {
"exp_df": fields.Nested(mirna_exp_df),
"exp_mean": fields.String,
"mir_id": fields.String,
}
mirna_expression_list = {
"mirna_expression_list": fields.Nested(mirna_expression),
"mirna_expression_count": fields.Integer,
}
class MirExpression(Resource):
@marshal_with(mirna_expression_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
mirna_id = args["mirna_id"]
condition = {}
if mirna_id:
condition["mir_id"] = mirna_id
mirna_expression_list = mongo.db.mirna_expression.find(condition)
mirna_expression_count = mongo.db.mirna_expression.find(condition).count()
else:
mirna_expression_list = {}
mirna_expression_count = 0
return {
"mirna_expression_list": list(mirna_expression_list),
"mirna_expression_count": mirna_expression_count,
}
api.add_resource(MirExpression, "/api/mirna_expression")
site_info = {
"align_1": fields.String,
"align_2": fields.String,
"align_3": fields.String,
"align_4": fields.String,
"align_5": fields.String,
"align6": fields.String,
"align7": fields.String,
"align8": fields.String,
"mm_start": fields.String,
"mm_end": fields.String,
"tgs_start": fields.String,
"tgs_end": fields.String,
"tgs_score": fields.String,
"dg_duplex": fields.String,
"dg_binding": fields.String,
"dg_open": fields.String,
"tgs_au": fields.String,
"prob_exac": fields.String(attribute="prob_exac"),
"chrome": fields.String,
}
snp_info = {
"distance": fields.String,
"chr": fields.String,
"position": fields.String,
"snp_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"curalt": fields.String,
}
gene_exp_df = {
"ACC": fields.String,
"DLBC": fields.String,
"READ": fields.String,
"GBM": fields.String,
"LGG": fields.String,
"THCA": fields.String,
"STAD": fields.String,
"UCEC": fields.String,
"PCPG": fields.String,
"CESC": fields.String,
"UCS": fields.String,
"TGCT": fields.String,
"LIHC": fields.String,
"CHOL": fields.String,
"HNSC": fields.String,
"UVM": fields.String,
"SKCM": fields.String,
"COAD": fields.String,
"PAAD": fields.String,
"THYM": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"ESCA": fields.String,
"SARC": fields.String,
"KIRP": fields.String,
"BLCA": fields.String,
"PRAD": fields.String,
"LUAD": fields.String,
"BRCA": fields.String,
"KIRC": fields.String,
"KICH": fields.String,
}
gene_expression = {
"exp_df": fields.Nested(gene_exp_df),
"exp_mean": fields.String,
"symbol": fields.String,
}
utr_info = {
"acc": fields.List(fields.String),
"position": fields.String,
"enst_id": fields.String,
"gene_symbol": fields.String,
}
gainsite_info = {
"snp_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"snp_info": fields.Nested(snp_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"cor_key": fields.String,
}
snp_seed_gain = {
"snp_seed_gain_list": fields.Nested(gainsite_info),
"snp_seed_gain_count": fields.Integer,
}
class SnpSeedGainFull(Resource):
@marshal_with(snp_seed_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
print(args["mirna_id"])
page = args["page"]
#per_page = 15
#record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
group_count = {"$group": {"_id": "null", "count": {"$sum": 1}}}
print(pipline)
pipline = [match,lookup_gene, lookup_mirna]
snp_seed4666_gain_count = mongo.db.seed_gain_4666_redundancy.find(condition).count()
snp_indel_gain_count = mongo.db.seed_gain_addindel_redundancy.find(condition).count()
snp_seed_gain_count = snp_seed4666_gain_count + snp_indel_gain_count
# snp_seed_gain_count=[]
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(pipline)
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(indel_seed_gain_list)
return {
"snp_seed_gain_list": list(snp_seed_gain_list),
"snp_seed_gain_count": snp_seed_gain_count,
}
api.add_resource(SnpSeedGainFull, "/api/snp_seed_gain_full")
class SnpSeedGain(Resource):
@marshal_with(snp_seed_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
print(args["mirna_id"])
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
group_count = {"$group": {"_id": "null", "count": {"$sum": 1}}}
print(pipline)
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
snp_seed4666_gain_count = mongo.db.seed_gain_4666_redundancy.find(
condition
).count()
snp_indel_gain_count = mongo.db.seed_gain_addindel_redundancy.find(
condition
).count()
snp_seed_gain_count = snp_seed4666_gain_count + snp_indel_gain_count
# snp_seed_gain_count=[]
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(pipline)
# snp_seed4666_gain_count=mongo.db.seed_gain_4666.aggregate(pipline_count)
# indel_seed_gain_count=mongo.db.seed_gain_addindel.aggregate(pipline_count)
# snp_seed_gain_count=list(snp_seed4666_gain_count)+list(indel_seed_gain_count)
# for i in snp_seed4666_gain_count:
# snp_seed_gain_count.append(i)
# for i in indel_seed_gain_count:
# snp_seed_gain_count.append(i)
# print("snp_seed_gain_count")
# print(snp_seed_gain_count)
if args["snp_id"]:
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(
indel_seed_gain_list
)
elif record_skip > snp_seed4666_gain_count:
print("view end pages")
print(record_skip)
print(snp_seed4666_gain_count)
record_skip_indel = record_skip - snp_seed4666_gain_count
skip_indel = {"$skip": record_skip_indel}
pipline_indel = [match, skip_indel, limit, lookup_gene, lookup_mirna]
snp_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(
pipline_indel
)
elif (
snp_seed_gain_count - record_skip < 15
and snp_seed_gain_count - record_skip > 0
):
print("view across pages")
print(record_skip)
print(snp_seed4666_gain_count)
snp_seed4666_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(
pipline
)
limit_indel = snp_seed4666_gain_count - record_skip
limit_indel_pip = {"$limit": limit_indel}
pipline_indel = [match, limit_indel_pip, lookup_gene, lookup_mirna]
indel_seed_gain_list = mongo.db.seed_gain_addindel_redundancy.aggregate(
pipline_indel
)
snp_seed_gain_list = list(snp_seed4666_gain_list) + list(
indel_seed_gain_list
)
else:
snp_seed_gain_list = mongo.db.seed_gain_4666_redundancy.aggregate(pipline)
# snp_seed_gain_list=mongo.db.indel_target_test.aggregate(pipline)
# snp_seed_gain_count=mongo.db.indel_target_test.find(condition).count()
return {
"snp_seed_gain_list": list(snp_seed_gain_list),
"snp_seed_gain_count": snp_seed_gain_count,
}
api.add_resource(SnpSeedGain, "/api/snp_seed_gain")
cor_df = {
"ACC": fields.String,
"BLCA": fields.String,
"BRCA": fields.String,
"CESC": fields.String,
"CHOL": fields.String,
"COAD": fields.String,
"DLBC": fields.String,
"ESCA": fields.String,
"GBM": fields.String,
"HNSC": fields.String,
"KICH": fields.String,
"KIRC": fields.String,
"KIRP": fields.String,
"LGG": fields.String,
"LIHC": fields.String,
"LUAD": fields.String,
"LUSC": fields.String,
"MESO": fields.String,
"OV": fields.String,
"PAAD": fields.String,
"PCPG": fields.String,
"PRAD": fields.String,
"READ": fields.String,
"SARC": fields.String,
"SKCM": fields.String,
"STAD": fields.String,
"TGCT": fields.String,
"THCA": fields.String,
"THYM": fields.String,
"UCEC": fields.String,
"UCS": fields.String,
"UVM": fields.String,
}
corelation_detail = {"cor_df": fields.Nested(cor_df), "mir_gene": fields.String}
losssite_info = {
"snp_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"cor_key": fields.String,
"expr_corelation": fields.String,
"experiment_valid": fields.Integer,
"snp_info": fields.Nested(snp_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
snp_seed_loss_list = {
"snp_seed_loss_list": fields.Nested(losssite_info),
"snp_seed_loss_count": fields.Integer,
}
class SnpSeedLossFull(Resource):
@marshal_with(snp_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
page = args["page"]
condition = {}
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
match = {"$match": condition}
pipline = [match, lookup_gene, lookup_mirna, lookup_corelation]
snp_seed4666_loss_count = mongo.db.seed_loss_4666_redundancy.find(
condition
).count()
snp_indel_loss_count = mongo.db.seed_loss_addindel_redundancy.find(
condition
).count()
snp_seed_loss_count = snp_seed4666_loss_count + snp_indel_loss_count
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(indel_seed_loss_list)
return {
"snp_seed_loss_list": list(snp_seed_loss_list),
"snp_seed_loss_count": snp_seed_loss_count,
}
api.add_resource(SnpSeedLossFull, "/api/snp_seed_loss_full")
class SnpSeedLoss(Resource):
@marshal_with(snp_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("mirna_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
print(args["mirna_id"])
if args["snp_id"]:
condition["snp_id"] = args["snp_id"]
if args["mirna_id"]:
condition["mirna_id"] = args["mirna_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna, lookup_corelation]
snp_seed4666_loss_count = mongo.db.seed_loss_4666_redundancy.find(
condition
).count()
snp_indel_loss_count = mongo.db.seed_loss_addindel_redundancy.find(
condition
).count()
snp_seed_loss_count = snp_seed4666_loss_count + snp_indel_loss_count
if args["snp_id"]:
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(
indel_seed_loss_list
)
elif record_skip > snp_seed4666_loss_count:
record_skip_indel = record_skip - snp_seed4666_loss_count
skip_indel = {"$skip": record_skip_indel}
pipline_indel = [match, skip_indel, limit, lookup_gene, lookup_mirna]
snp_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline_indel
)
elif (
snp_seed4666_loss_count - record_skip < 15
and snp_seed4666_loss_count - record_skip > 0
):
snp_seed4666_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(
pipline
)
limit_indel = snp_seed4666_loss_count - record_skip
limit_indel_pip = {"$limit": limit_indel}
pipline_indel = [match, limit_indel_pip, lookup_gene, lookup_mirna]
indel_seed_loss_list = mongo.db.seed_loss_addindel_redundancy.aggregate(
pipline_indel
)
snp_seed_loss_list = list(snp_seed4666_loss_list) + list(
indel_seed_loss_list
)
else:
snp_seed_loss_list = mongo.db.seed_loss_4666_redundancy.aggregate(pipline)
return {
"snp_seed_loss_list": list(snp_seed_loss_list),
"snp_seed_loss_count": snp_seed_loss_count,
}
api.add_resource(SnpSeedLoss, "/api/snp_seed_loss")
mut_info = {
"distance": fields.String,
"chr": fields.String,
"position": fields.String,
"mut_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"curalt": fields.String,
"distance_align": fields.String,
}
mut_gainsite_info = {
"mut_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
mut_seed_gain_list = {
"mut_seed_gain_list": fields.Nested(mut_gainsite_info),
"mut_seed_gain_count": fields.Integer,
}
class MutSeedGain(Resource):
@marshal_with(mut_seed_gain_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("mut_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
mirna_id = args["mirna_id"]
page = 1
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if args["mirna_id"]:
condition["mirna_id"] = mirna_id
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
match = {"$match": condition}
print("mut_seed_gain")
print(condition)
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
tysnv_mut_seed_gain_list = mongo.db.seed_cosmic_gain_redundancy.aggregate(
pipline
)
tysnv_mut_seed_gain_count = mongo.db.seed_cosmic_gain_redundancy.find(
condition
).count()
indel_mut_seed_gain_list = mongo.db.indel_seed_mutation_gain_redundancy.aggregate(
pipline
)
indel_mut_seed_gain_count = mongo.db.indel_seed_mutation_gain_redundancy.find(
condition
).count()
mut_seed_gain_list = list(tysnv_mut_seed_gain_list) + list(
indel_mut_seed_gain_list
)
mut_seed_gain_count = tysnv_mut_seed_gain_count + indel_mut_seed_gain_count
print(mut_seed_gain_count)
return {
"mut_seed_gain_list": list(mut_seed_gain_list),
"mut_seed_gain_count": mut_seed_gain_count,
}
api.add_resource(MutSeedGain, "/api/mut_seed_gain")
mut_losssite_info = {
"mut_id": fields.String,
"mir_seedstart": fields.String,
"strand": fields.String,
"mir_seedchr": fields.String,
"mir_seedend": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"cor_key": fields.String,
"expr_corelation": fields.String,
"experiment_valid": fields.Integer,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(site_info),
"utr_info": fields.Nested(utr_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
mut_seed_loss_list = {
"mut_seed_loss_list": fields.Nested(mut_losssite_info),
"mut_seed_loss_count": fields.Integer,
}
class MutSeedLoss(Resource):
@marshal_with(mut_seed_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("mut_id")
parser.add_argument("gene")
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
mirna_id = args["mirna_id"]
page = 1
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if args["mirna_id"]:
condition["mirna_id"] = mirna_id
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
if args["gene"]:
condition["gene_symbol"] = {"$regex": args["gene"], "$options": "$i"}
match = {"$match": condition}
print("mut_seed_loss")
print(condition)
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_mirna, lookup_gene, lookup_corelation]
tysnv_mut_seed_loss_list = mongo.db.seed_cosmic_loss_redundancy.aggregate(
pipline
)
tysnv_mut_seed_loss_count = mongo.db.seed_cosmic_loss_redundancy.find(
condition
).count()
indel_mut_seed_loss_list = mongo.db.indel_seed_mutation_loss_redundancy.aggregate(
pipline
)
indel_mut_seed_loss_count = mongo.db.indel_seed_mutation_loss_redundancy.find(
condition
).count()
mut_seed_loss_list = list(tysnv_mut_seed_loss_list) + list(
indel_mut_seed_loss_list
)
mut_seed_loss_count = tysnv_mut_seed_loss_count + indel_mut_seed_loss_count
print(mut_seed_loss_count)
return {
"mut_seed_loss_list": list(mut_seed_loss_list),
"mut_seed_loss_count": mut_seed_loss_count,
}
api.add_resource(MutSeedLoss, "/api/mut_seed_loss")
utr_site_info = {
"chrome": fields.String,
"mm_start": fields.String,
"mm_end": fields.String,
"tgs_start": fields.String,
"tgs_end": fields.String,
"dg_duplex": fields.String,
"dg_binding": fields.String,
"dg_open": fields.String,
"tgs_au": fields.String,
"tgs_score": fields.String,
"prob_exac": fields.String,
"align_1": fields.String,
"align_2": fields.String,
"align_3": fields.String,
"align_4": fields.String,
"align_5": fields.String,
"align6": fields.String,
"align7": fields.String,
"align8": fields.String,
"truncate_start": fields.String,
"truncate_end": fields.String,
"distance": fields.Integer,
"alt_start": fields.Integer,
"alt_end": fields.Integer,
"alt_color": fields.String,
"alt_display": fields.Integer,
}
snp_info_line = {
"distance": fields.String,
"distance_align": fields.String,
"chr": fields.String,
"position": fields.String,
"snp_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"curalt": fields.String,
}
utr_info_line = {
"gene_symbol": fields.String,
"enst_id": fields.String,
"acc": fields.List(fields.String),
"chr": fields.String,
"end": fields.String,
"start": fields.String,
"strand": fields.String,
"position": fields.String,
}
experiment_valid = {
"pubmedid": fields.String,
"evidence": fields.String,
"source": fields.String,
"mirna": fields.String,
"experiment_valid_key": fields.String,
"gene": fields.String,
}
snv_utr_loss = {
"snv": fields.Integer,
"indel": fields.Integer,
"snp_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"experiment_valid": fields.Nested(experiment_valid),
"expr_corelation": fields.String,
"snp_info": fields.Nested(snp_info_line),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
utr_loss_list = {
"utr_loss_list": fields.Nested(snv_utr_loss),
"utr_loss_count": fields.Integer,
}
class SnvUtrLoss(Resource):
@marshal_with(utr_loss_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
snp_id = args["snp_id"]
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if snp_id:
condition["snp_id"] = snp_id
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
lookup_experiment_valid = {
"$lookup": {
"from": "gene_mirna_experiment_validation",
"localField": "cor_key",
"foreignField": "experiment_valid_key",
"as": "experiment_valid",
}
}
print(condition)
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [
match,
skip,
limit,
lookup_gene,
lookup_mirna,
lookup_corelation,
lookup_experiment_valid,
]
snv_utr_loss_list = mongo.db.snv_utr_loss_v2_redundancy.aggregate(pipline)
snv_utr_loss_count = mongo.db.snv_utr_loss_v2_redundancy.find(condition).count()
indel_utr_loss_list = mongo.db.indel_utr_loss_v2_redundancy.aggregate(pipline)
indel_utr_loss_count = mongo.db.indel_utr_loss_v2_redundancy.find(
condition
).count()
utr_loss_list = list(snv_utr_loss_list) + list(indel_utr_loss_list)
utr_loss_count = snv_utr_loss_count + indel_utr_loss_count
return {"utr_loss_list": list(utr_loss_list), "utr_loss_count": utr_loss_count}
api.add_resource(SnvUtrLoss, "/api/snv_utr_loss")
snv_utr_gain = {
"snp_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"snp_info": fields.Nested(snp_info_line),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
utr_gain_list = {
"utr_gain_list": fields.Nested(snv_utr_gain),
"utr_gain_count": fields.Integer,
}
class SnvUtrGain(Resource):
@marshal_with(utr_gain_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page", type=int, default=1)
args = parser.parse_args()
snp_id = args["snp_id"]
page = args["page"]
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
if snp_id:
condition["snp_id"] = snp_id
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
print(condition)
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
snv_utr_gain_list = mongo.db.snv_utr_gain_v2_redundancy.aggregate(pipline)
snv_utr_gain_count = mongo.db.snv_utr_gain_v2_redundancy.find(condition).count()
indel_utr_gain_list = mongo.db.indel_utr_gain_v2_redundancy.aggregate(pipline)
indel_utr_gain_count = mongo.db.indel_utr_gain_v2_redundancy.find(
condition
).count()
utr_gain_list = list(snv_utr_gain_list) + list(indel_utr_gain_list)
utr_gain_count = snv_utr_gain_count + indel_utr_gain_count
return {"utr_gain_list": list(utr_gain_list), "utr_gain_count": utr_gain_count}
api.add_resource(SnvUtrGain, "/api/snv_utr_gain")
mut_gain_utr_site = {
"mut_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"mut_info": fields.Nested(mut_info),
"site_info": fields.Nested(utr_site_info),
"utr_info": fields.Nested(utr_info_line),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
}
mut_utr_gain = {
"mut_utr_gain_list": fields.Nested(mut_gain_utr_site),
"mut_utr_gain_count": fields.Integer,
}
class MutUtrGain(Resource):
@marshal_with(mut_utr_gain)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id")
parser.add_argument("page")
args = parser.parse_args()
page = 1
per_page = 15
record_skip = (page - 1) * per_page
condition = {}
if args["page"]:
record_skip = (int(args["page"]) - 1) * per_page
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna]
if args["mut_id"].lower().startswith("cosn"):
tynsv_mut_utr_gain_list = mongo.db.utr_cosmic_gain_redundancy.aggregate(
pipline
)
tysnv_mut_utr_gain_count = mongo.db.utr_cosmic_gain_redundancy.find(
condition
).count()
indel_mut_utr_gain_list = mongo.db.utr_cosmic_gain_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_gain_count = mongo.db.utr_cosmic_gain_indel_redundancy.find(
condition
).count()
mut_utr_gain_list = list(tynsv_mut_utr_gain_list) + list(
indel_mut_utr_gain_list
)
mut_utr_gain_count = tysnv_mut_utr_gain_count + indel_mut_utr_gain_count
else:
tynsv_mut_utr_gain_list = mongo.db.utr_clinvar_gain_redundancy.aggregate(
pipline
)
tysnv_mut_utr_gain_count = mongo.db.utr_clinvar_gain_redundancy.find(
condition
).count()
indel_mut_utr_gain_list = mongo.db.utr_clinvar_gain_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_gain_count = mongo.db.utr_clinvar_gain_indel_redundancy.find(
condition
).count()
mut_utr_gain_list = list(tynsv_mut_utr_gain_list) + list(
indel_mut_utr_gain_list
)
mut_utr_gain_count = tysnv_mut_utr_gain_count + indel_mut_utr_gain_count
return {
"mut_utr_gain_list": list(mut_utr_gain_list),
"mut_utr_gain_count": mut_utr_gain_count,
}
api.add_resource(MutUtrGain, "/api/mut_utr_gain")
mut_loss_utr_site = {
"mut_id": fields.String,
"mirna_id": fields.String,
"gene_symbol": fields.String,
"experiment_valid": fields.Integer,
"expr_corelation": fields.String,
"mut_info": fields.Nested(mut_info),
"utr_info": fields.Nested(utr_info_line),
"site_info": fields.Nested(utr_site_info),
"gene_expression": fields.Nested(gene_expression),
"mirna_expression": fields.Nested(mirna_expression),
"corelation_detail": fields.Nested(corelation_detail),
}
mut_utr_loss = {
"mut_utr_loss_list": fields.Nested(mut_loss_utr_site),
"mut_utr_loss_count": fields.Integer,
}
class MutUtrLoss(Resource):
@marshal_with(mut_utr_loss)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id")
parser.add_argument("page")
args = parser.parse_args()
page = 1
per_page = 15
record_skip = (page - 1) * per_page
condition = {}
if args["page"]:
record_skip = (int(args["page"]) - 1) * per_page
if args["mut_id"]:
condition["mut_id"] = args["mut_id"]
lookup_gene = {
"$lookup": {
"from": "gene_expression",
"localField": "gene_symbol",
"foreignField": "symbol",
"as": "gene_expression",
}
}
lookup_mirna = {
"$lookup": {
"from": "mirna_expression",
"localField": "mirna_id",
"foreignField": "mir_id",
"as": "mirna_expression",
}
}
lookup_corelation = {
"$lookup": {
"from": "corelation_cancer_detail",
"localField": "cor_key",
"foreignField": "mir_gene",
"as": "corelation_detail",
}
}
print(condition)
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit, lookup_gene, lookup_mirna, lookup_corelation]
if args["mut_id"].lower().startswith("cos"):
tysnv_mut_utr_loss_list = mongo.db.utr_cosmic_loss_redundancy.aggregate(
pipline
)
tysnv_mut_utr_loss_count = mongo.db.utr_cosmic_loss_redundancy.find(
condition
).count()
indel_mut_utr_loss_list = mongo.db.utr_cosmic_loss_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_loss_count = mongo.db.utr_cosmic_loss_indel_redundancy.find(
condition
).count()
mut_utr_loss_list = list(tysnv_mut_utr_loss_list) + list(
indel_mut_utr_loss_list
)
mut_utr_loss_count = tysnv_mut_utr_loss_count + indel_mut_utr_loss_count
else:
tysnv_mut_utr_loss_list = mongo.db.utr_clinvar_loss_redundancy.aggregate(
pipline
)
tysnv_mut_utr_loss_count = mongo.db.utr_clinvar_loss_redundancy.find(
condition
).count()
indel_mut_utr_loss_list = mongo.db.utr_clinvar_loss_indel_redundancy.aggregate(
pipline
)
indel_mut_utr_loss_count = mongo.db.utr_clinvar_loss_indel_redundancy.find(
condition
).count()
mut_utr_loss_list = list(tysnv_mut_utr_loss_list) + list(
indel_mut_utr_loss_list
)
mut_utr_loss_count = tysnv_mut_utr_loss_count + indel_mut_utr_loss_count
return {
"mut_utr_loss_list": list(mut_utr_loss_list),
"mut_utr_loss_count": mut_utr_loss_count,
}
api.add_resource(MutUtrLoss, "/api/mut_utr_loss")
browse_info = {
"mir_id": fields.String,
"mir_acc": fields.String,
"mir_chr": fields.String,
"mir_start": fields.String,
"mir_end": fields.String,
"mir_strand": fields.String,
"location": fields.String,
"count_snp": fields.Integer,
"snp_info": fields.String,
"count_nutation": fields.Integer,
"mutation_info": fields.String,
}
browse_list = {"browse_list": fields.List(fields.Nested(browse_info))}
class BrowseMir(Resource):
@marshal_with(browse_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("chr", type=str)
parser.add_argument("page", type=int, default=1)
parser.add_argument("per_page", type=int, default=30)
args = parser.parse_args()
page = args["page"]
per_page = args["per_page"]
chrome = args["chr"]
record_skip = (page - 1) * per_page
condition = {}
browse_list = []
if chrome:
condition = {"mir_chr": chrome}
browse_list = mongo.db.browseY.find(condition).skip(record_skip).limit(per_page)
return {"browse_list": list(browse_list)}
api.add_resource(BrowseMir, "/api/browsemir")
mir_summary = {
"mir_id": fields.String,
"mir_acc": fields.String,
"mir_chr": fields.String,
"mir_start": fields.String,
"mir_end": fields.String,
"mir_strand": fields.String,
"matureSeq": fields.String,
"pre_id": fields.String,
"pre_acc": fields.String,
"pre_chr": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"harpin_seq": fields.String,
"snp_in_seed": fields.Integer,
"snp_in_mature": fields.Integer,
"snp_in_premir": fields.Integer,
"cosmic_in_seed": fields.Integer,
"cosmic_in_mature": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_seed": fields.Integer,
"clinvar_in_mature": fields.Integer,
"clinvar_in_premir": fields.Integer,
"snp_gwas_in_seed": fields.Integer,
"snp_gwas_in_mature": fields.Integer,
"snp_gwas_in_premir": fields.Integer,
"drv_in_seed": fields.Integer,
"drv_in_mature": fields.Integer,
"drv_in_premir": fields.Integer,
}
mirna_summary_list = {
"mirna_summary_list": fields.Nested(mir_summary),
"mirna_summary_count": fields.Integer,
}
class MirSummary(Resource):
@marshal_with(mirna_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("chrome", type=str)
parser.add_argument("mirna_id")
args = parser.parse_args()
page = args["page"]
chrome = args["chrome"]
mirna_id = args["mirna_id"]
per_page = 15
record_skip = (page - 1) * per_page
print(mirna_id)
condition = {}
if chrome != "All":
condition["mir_chr"] = chrome
if mirna_id:
condition["mir_id"] = {"$regex": mirna_id, "$options": "$i"}
# mirna_summary_list = mongo.db.mirna_summary_sort.find(condition).skip(record_skip).limit(per_page)
# mirna_summary_count=mongo.db.mirna_summary_sort.find(condition).count()
mirna_summary_list = (
mongo.db.seed_mature_pre_var_v1.find(condition)
.skip(record_skip)
.limit(per_page)
)
mirna_summary_count = mongo.db.seed_mature_pre_var_v1.find(condition).count()
return {
"mirna_summary_list": list(mirna_summary_list),
"mirna_summary_count": mirna_summary_count,
}
api.add_resource(MirSummary, "/api/mirna_summary")
class MirInfo(Resource):
@marshal_with(mirna_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
condition["mir_id"] = {
"$regex": "".join(["^", search_ids, "$"]),
"$options": "$i",
}
mirna_summary_list = mongo.db.seed_mature_pre_var_v1.find(condition)
mirna_summary_count = mongo.db.seed_mature_pre_var_v1.find(
condition
).count()
else:
mirna_summary_list = {}
mirna_summary_count = 0
return {
"mirna_summary_list": list(mirna_summary_list),
"mirna_summary_count": mirna_summary_count,
}
api.add_resource(MirInfo, "/api/mirinfo")
drug_name = {
"pubchem_sid": fields.String,
"drug_name": fields.String,
"fda_status": fields.String,
"nsc_id": fields.String,
"machanism_of_action": fields.String,
}
nci60_item = {
"miRNA": fields.String,
"NSC": fields.String,
"pubchem": fields.String,
"cor": fields.String,
"pv": fields.String,
"fdr": fields.String,
"drug_name": fields.Nested(drug_name),
}
drug_cor = {"nci60_list": fields.Nested(nci60_item), "nci60_count": fields.Integer}
class MirDrug(Resource):
@marshal_with(drug_cor)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mature_id", type=str)
args = parser.parse_args()
mature_id = args["mature_id"]
condition_ccle = {}
condition_nci60 = {}
pipeline = []
if mature_id:
condition_nci60["miRNA"] = mature_id
"""
condition_ccle['pv']={'$lt':'0.05'}
condition_ccle['fdr']={'$lt':'0.05'}
condition_nci60['miRNA']=mature_id
condition_nci60['pv']={'$lt':'0.05'}
"""
condition_nci60["fdr"] = {"$lt": "0.05"}
#
# ccle_list=mongo.db.ccle_drug_correlation.find(condition_ccle)
# ccle_count=mongo.db.ccle_drug_correlation.find(condition_ccle).count()
lookup_name = {
"$lookup": {
"from": "nscid_psid",
"localField": "NSC",
"foreignField": "nsc_id",
"as": "drug_name",
}
}
print(condition_nci60)
match = {"$match": condition_nci60}
pipeline = [match, lookup_name]
nci60_list = mongo.db.nci60_drug_correlation.aggregate(pipeline)
nci60_count = 1
else:
nci60_list = []
nci60_count = 0
return {"nci60_list": list(nci60_list), "nci60_count": nci60_count}
api.add_resource(MirDrug, "/api/mirdrug")
mirna_key_list = {
"mirna_key_list": fields.Nested(mir_summary),
"premir_key_list": fields.Nested(mir_summary),
}
mirnago_item = {
"go_name": fields.String,
"go_id": fields.String,
"precursor_id": fields.String,
"reference": fields.String,
}
mirnago_list = {
"mirnago_list": fields.Nested(mirnago_item),
"mirnago_count": fields.Integer,
}
class MirnaGo(Resource):
@marshal_with(mirnago_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("precursor_id", type=str)
args = parser.parse_args()
precursor_id = args["precursor_id"]
condition = {}
if precursor_id:
condition["precursor_id"] = precursor_id
mirnago_list = mongo.db.mirnago.find(condition)
mirnago_count = mongo.db.mirnago.find(condition).count()
else:
mirnago_list = []
mirnago_count = 0
return {"mirnago_list": list(mirnago_list), "mirnago_count": mirnago_count}
api.add_resource(MirnaGo, "/api/mirnago")
class MirnaKey(Resource):
@marshal_with(mirna_key_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
mirna_id = args["mirna_id"]
condition = {}
condition_pre = {}
if mirna_id:
condition["mir_id"] = {"$regex": mirna_id, "$options": "$i"}
condition_pre["pre_id"] = {"$regex": mirna_id, "$options": "$i"}
print(condition)
mirna_key_list = mongo.db.pri_mir_summary.find(condition)
premir_key_list = mongo.db.pri_mir_summary.find(condition_pre)
else:
mirna_key_list = {}
premir_key_list = {}
return {
"mirna_key_list": list(mirna_key_list),
"premir_key_list": list(premir_key_list),
}
api.add_resource(MirnaKey, "/api/mirna_key")
pri_id = {
"pre_id": fields.String,
"pre_chr": fields.String,
"pre_acc": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"snp_in_premir": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_premir": fields.Integer,
}
mature_info = {
"mir_id": fields.List(fields.String),
"mir_acc": fields.List(fields.String),
}
pri_count = {"_id": fields.String, "count": fields.String}
primir_summary = {
"pre_id": fields.String,
"pre_chr": fields.String,
"pre_acc": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"pre_strand": fields.String,
"snp_in_premir": fields.Integer,
"cosmic_in_premir": fields.Integer,
"clinvar_in_premir": fields.Integer,
"drv_in_premir": fields.Integer,
"mature_info": fields.Nested(mature_info),
}
primir_summary_list = {
"primir_summary_list": fields.Nested(primir_summary),
"primir_summary_count": fields.Integer,
}
class PrimirSummary(Resource):
@marshal_with(primir_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, default=1)
parser.add_argument("chrome", type=str)
parser.add_argument("pre_id")
args = parser.parse_args()
page = args["page"]
chrome = args["chrome"]
pre_id = args["pre_id"]
per_page = 15
record_skip = (page - 1) * per_page
print(page)
condition = {}
if chrome != "All":
condition["pre_chr"] = chrome
if pre_id:
condition["pre_id"] = {"$regex": pre_id, "$options": "$i"}
"""
group={'$group':{
'_id':{
'pre_id':'$pre_id',
'pre_acc':'$pre_acc',
'pre_chr':'$pre_chr',
'pre_start':'$pre_start',
'pre_end':'$pre_end',
'pre_strand':'$pre_strand',
'snp_in_premir':'$snp_in_premir',
'cosmic_in_premir':'$cosmic_in_premir',
'clinvar_in_premir':'$clinvar_in_premir',
},
'mature_info':{'$push':{
'mir_id':'$mir_id',
'mir_acc':'$mir_acc',
}},
}}
group_sum={'$group':{
'_id':'null',
'count':{'$sum':1}
}}
"""
print(condition)
premir_summary_list = (
mongo.db.premir_summary_v1.find(condition).skip(record_skip).limit(per_page)
)
premir_summary_count = mongo.db.premir_summary_v1.find(condition).count()
print("done serch")
# print(pip_sum)
# print(pipline)
return {
"primir_summary_list": list(premir_summary_list),
"primir_summary_count": premir_summary_count,
}
api.add_resource(PrimirSummary, "/api/primir_summary")
"""
premir_genome={
'start':fields.String,
'end':fields.String,
'stand':fields.String,
'chromosome':fields.String
}
mir_cluster5k={
'id':fields.String,
'confidence':fields.String,
'cluster5k_id':fields.String,
'accession':fields.String,
'genome':fields.List(fields.Nested(premir_genome)),
'rpm':fields.String
}
mir_cluster10k={
'id':fields.String,
'confidence':fields.String,
'cluster10k_id':fields.String,
'accession':fields.String,
'genome':fields.List(fields.Nested(premir_genome)),
'rpm':fields.String
}
"""
mut_item = {
"mut_id": fields.String,
"chr": fields.String,
"position": fields.String,
"ref": fields.String,
"alt": fields.String,
"structure_analys": fields.Integer,
}
premir_cluster = {
"pre_id": fields.String,
"cluster10k_id": fields.String,
"cluster5k_id": fields.String,
}
mirset_v9_item = {
"Function": fields.List(fields.String),
"precurser_id": fields.String,
"HMDD": fields.List(fields.String),
}
premir_context = {
"precursor_id": fields.String,
"host_gene": fields.String,
"region": fields.String,
}
premir_info = {
"pre_id": fields.String,
"cluster10k_id": fields.List(fields.List(fields.String)),
"cluster5k_id": fields.List(fields.List(fields.String)),
"sequence": fields.String,
"dotfold": fields.String,
"cosmic": fields.Nested(mut_item),
"clinvar": fields.Nested(mut_item),
"snv": fields.Nested(mut_item),
"mfe": fields.String,
"host_gene": fields.Nested(premir_context),
"mirinfo": fields.Nested(mir_summary),
"mature_position": fields.List(fields.List(fields.String)),
"mirset_v9": fields.Nested(mirset_v9_item),
}
premir_info_list = {"premir_info": fields.Nested(premir_info)}
class PremirInfo(Resource):
@marshal_with(premir_info_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
match = {"$match": {"pre_id": search_ids}}
lookup_mirinfo = {
"$lookup": {
"from": "pri_mir_summary",
"localField": "pre_id",
"foreignField": "pre_id",
"as": "mirinfo",
}
}
lookup_function = {
"$lookup": {
"from": "mirset_v9",
"localField": "pre_id",
"foreignField": "precurser_id",
"as": "mirset_v9",
}
}
lookup_context = {
"$lookup": {
"from": "premir_context",
"localField": "pre_id",
"foreignField": "precursor_id",
"as": "host_gene",
}
}
pipline = [match, lookup_mirinfo, lookup_function, lookup_context]
print(pipline)
# premir_info=mongo.db.premir_info.aggregate(pipline)
premir_info = mongo.db.premir_info_addindel_v1.aggregate(pipline)
else:
premir_info = {}
return {"premir_info": list(premir_info)}
api.add_resource(PremirInfo, "/api/premir_info")
pri_alt = {
"pre_id": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_position": fields.String,
"ref": fields.String,
"snp_ref_freq": fields.String,
"alt": fields.String(attribute="snp_alt"),
"snp_alt_freq": fields.String,
"curalt": fields.String,
"pre_altseq": fields.String,
"dotfold": fields.String,
"mfe": fields.String,
"pre_strand": fields.String,
"pre_acc": fields.String,
"rela_loc": fields.String,
"insert": fields.Integer,
"delete": fields.Integer,
"alt_start": fields.String,
"alt_end": fields.String,
}
primir_alt_list = {
"primir_alt_list": fields.Nested(pri_alt),
"primir_alt_count": fields.Integer,
}
class PrimirAlt(Resource):
@marshal_with(primir_alt_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("pre_id", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
condition = {}
print(search_ids)
if search_ids:
condition["snp_id"] = search_ids
condition["pre_id"] = args["pre_id"]
# primir_alt_list=mongo.db.primary_altseq.find(condition)
# primir_alt_count=mongo.db.primary_altseq.find(condition).count()
primir_alt_list = mongo.db.primary_altseq_indel.find(condition)
primir_alt_count = mongo.db.primary_altseq_indel.find(condition).count()
else:
primir_alt_list = {}
primit_alt_count = 0
return {
"primir_alt_list": list(primir_alt_list),
"primir_alt_count": primir_alt_count,
}
api.add_resource(PrimirAlt, "/api/primir_altseq")
primir_mut = {
"pre_id": fields.String,
"pre_start": fields.String,
"pre_end": fields.String,
"mut_id": fields.String,
"mut_chr": fields.String,
"mut_position": fields.String,
"ref": fields.String,
"curalt": fields.String,
"pre_altseq": fields.String,
"dotfold": fields.String,
"mfe": fields.String,
"pre_strand": fields.String,
"pre_acc": fields.String,
"rela_loc": fields.String,
"source": fields.String,
"insert": fields.Integer,
"delete": fields.Integer,
"alt_start": fields.String,
"alt_end": fields.String,
}
primir_mut_list = {
"primir_mut_list": fields.Nested(primir_mut),
"primir_mut_count": fields.Integer,
}
class PrimirMut(Resource):
@marshal_with(primir_mut_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("pre_id", type=str)
args = parser.parse_args()
mut_id = args["mut_id"]
pre_id = args["pre_id"]
condition = {}
if mut_id:
condition["mut_id"] = mut_id
condition["pre_id"] = pre_id
# primir_mut_list=mongo.db.primir_altseq_mut.find(condition)
# primir_mut_count=mongo.db.primir_altseq_mut.find(condition).count()
primir_mut_list = mongo.db.primir_altseq_mut_indel.find(condition)
primir_mut_count = mongo.db.primir_altseq_mut_indel.find(condition).count()
else:
primir_mut_count = 0
primir_mut_list = {}
return {
"primir_mut_list": list(primir_mut_list),
"primir_mut_count": primir_mut_count,
}
api.add_resource(PrimirMut, "/api/primir_altseq_mut")
snpinfo_line = {
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_coordinate": fields.String,
"ref": fields.String,
"alt": fields.String,
"ref_freq": fields.String,
"alt_freq": fields.String,
"location": fields.String,
"identifier": fields.String,
"ldsnp": fields.Integer,
"mutation_rela": fields.Integer,
"gain_count": fields.String,
"loss_count": fields.String,
}
snpinfo = {"snpinfo": fields.Nested(snpinfo_line), "snpinfo_count": fields.Integer}
class SnpInfo(Resource):
@marshal_with(snpinfo)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("query_snp", type=str)
parser.add_argument("page")
args = parser.parse_args()
page = args["page"]
query_snp = args["query_snp"]
per_page = 15
record_skip = (int(page) - 1) * int(per_page)
condition = {}
if query_snp == "summary":
snpinfo = mongo.db.snp_summary.find().skip(record_skip).limit(per_page)
snpinfo_count = mongo.db.snp_summary.find().count()
elif query_snp.startswith("rs"):
condition = {"snp_id": query_snp}
snpinfo = mongo.db.snp_summary.find(condition)
snpinfo_count = mongo.db.snp_summary.find(condition).count()
else:
snpinfo = {}
snpinfo_count = 0
return {"snpinfo": list(snpinfo), "snpinfo_count": snpinfo_count}
api.add_resource(SnpInfo, "/api/snpinfo")
catalog_line = {
"snp_id": fields.String(attribute="SNPS"),
"risk_allele": fields.String(attribute="STRONGEST_SNP-RISK_ALLELE"),
"risk_allele_fre": fields.String(attribute="RISK_ALLELE_FREQUENCY"),
"disease": fields.String(attribute="DISEASE/TRAIT"),
"reported_gene": fields.String(attribute="REPORTED_GENE"),
"p_value": fields.String(attribute="P-VALUE"),
"or_beta": fields.String(attribute="OR_or_BETA"),
"ci95": fields.String(attribute="CI_95_TEXT"),
"pubmed_id": fields.String(attribute="PUBMEDID"),
"pubmed_link": fields.String(attribute="LINK"),
}
catalog_list = {
"catalog_list": fields.Nested(catalog_line),
"catalog_count": fields.Integer,
}
class GwasCatalog(Resource):
@marshal_with(catalog_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
print(search_ids)
if search_ids:
condition = {"SNPS": search_ids}
catalog_list = mongo.db.gwas_catalog_alternative.find(condition)
catalog_count = mongo.db.gwas_catalog_alternative.find(condition).count()
else:
catalog_list = {}
catalog_count = 0
return {"catalog_list": list(catalog_list), "catalog_count": catalog_count}
api.add_resource(GwasCatalog, "/api/gwas_catalog")
tag_info = {
"population": fields.String,
"ld_start": fields.String,
"ld_end": fields.String,
}
relate_tag_info = {
"population": fields.String,
"relate_tag_chr": fields.String,
"relate_tag_ld_start": fields.String,
"relate_tag_ld_end": fields.String,
"d_prime": fields.String,
"r2": fields.String,
}
ld_info_id = {
"snp_id": fields.String,
"snp_chr": fields.String(attribute="chrome"),
"snp_position": fields.String(attribute="position"),
"is_tag": fields.String,
"is_ld": fields.String,
"location": fields.String,
"rela_tag": fields.String,
"relate_tag_pos": fields.String,
}
ld_info = {
"_id": fields.Nested(ld_info_id),
"tag_info": fields.Nested(tag_info),
"relate_tag_info": fields.Nested(relate_tag_info),
"catalog_info": fields.Nested(catalog_line),
}
ld_info_list = {"ld_list": fields.Nested(ld_info), "ld_item_lenth": fields.Integer}
class LDinfo(Resource):
@marshal_with(ld_info_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
args = parser.parse_args()
search_ids = args["search_ids"]
print(search_ids)
# condition = {}
match = {"$match": {"snp_id": search_ids}}
group = {
"$group": {
"_id": {
"snp_id": "$snp_id",
"chrome": "$chrome",
"position": "$position",
"is_tag": "$is_tag",
"is_ld": "$is_ld",
"location": "$location",
"rela_tag": "$rela_tag",
"relate_tag_pos": "$relate_tag_pos",
},
"tag_info": {
"$push": {
"population": "$population",
"ld_start": "$ld_start",
"ld_end": "$ld_end",
}
},
"relate_tag_info": {
"$push": {
"population": "$population",
"relate_tag_chr": "$relate_tag_chr",
"relate_tag_ld_start": "$relate_tag_ld_start",
"relate_tag_ld_end": "$relate_tag_ld_end",
"d_prime": "$d_prime",
"r2": "$r2",
}
},
}
}
lookup = {
"$lookup": {
"from": "gwas_catalog_alternative",
"localField": "_id.rela_tag",
"foreignField": "SNPS",
"as": "catalog_info",
}
}
pipline = [match, group, lookup]
print(pipline)
ld_list = mongo.db.ld_region.aggregate(pipline)
ld_item_lenth = mongo.db.ld_region.find({"snp_id": search_ids}).count()
return {"ld_list": list(ld_list), "ld_item_lenth": ld_item_lenth}
api.add_resource(LDinfo, "/api/ldinfo")
disease_pubmed_item = {"disease": fields.String, "pubmed_id": fields.String}
mutation_line = {
"analysis": fields.Integer,
"mut_chr": fields.String,
"mut_position": fields.String,
"mut_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"rela_tag_snp": fields.String,
"location": fields.String,
"source": fields.String,
"gain_count": fields.String,
"loss_count": fields.String,
"mature_id": fields.String,
"gene": fields.String,
"identifier_lower": fields.String,
"pre_id": fields.String,
"energy_change": fields.String,
"expression_change": fields.String,
"snp_id": fields.String,
"disease_pubmed": fields.Nested(disease_pubmed_item),
}
count_group = {"_id": fields.String, "count": fields.Integer}
mutation_summary_list = {
"mutation_seed_list": fields.Nested(mutation_line),
"mutation_seed_count": fields.Nested(count_group),
"mutation_mature_list": fields.Nested(mutation_line),
"mutation_mature_count": fields.Nested(count_group),
"mutation_premir_list": fields.Nested(mutation_line),
"mutation_premir_count": fields.Nested(count_group),
"mutation_utr3_list": fields.Nested(mutation_line),
#'mutation_utr3_count':fields.Nested(count_group),
"mutation_utr3_count": fields.Integer,
"mutation_summary_list": fields.Nested(mutation_line),
"mutation_summary_count": fields.Nested(count_group),
}
"""
class MutationSummary(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('mut_id', type=str)
parser.add_argument('page')
#parser.add_argument('chrome')
#parser.add_argument('location')
parser.add_argument('resource')
#parser.add_argument('snp_rela')
#parser.add_argument('pubmed_id')
parser.add_argument('histology')
parser.add_argument('pathology')
parser.add_argument('gene')
args = parser.parse_args()
#print(args['chrome'])
page=1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict={}
pathology_dict={}
match_histology={}
match_pathology={}
pipline=[]
if args['page']:
page=args['page']
record_skip = (int(page) - 1) * per_page
if args['gene']:
condition['identifier_lower']=args['gene'].lower()
#if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
#if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args['resource']!='All' and args['resource']:
condition['source']=args['resource'].lower()
if args['histology'] and args['histology'] != 'All':
histology_dict['disease']={'$regex':args['histology'],'$options':'$i'}
match_histology={'$match':histology_dict}
if args['pathology'] and args['pathology']!='All':
pathology_dict['disease']={'$regex':args['pathology'],'$options':'$i'}
match_pathology={'$match':pathology_dict}
if args['mut_id']:
mut_id=args['mut_id']
if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition['mut_id']=args['mut_id']
#if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
#if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition={'$match':condition}
skip={'$skip':record_skip}
limit={'$limit':per_page}
count_group={'$group':{'_id':'null','count':{'$sum':1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count=pipline+[count_group]
pipline.append(skip)
pipline.append(limit)
print("condition:")
print(condition)
print("histology:")
print(histology_dict)
print("pathology:")
print(pathology_dict)
if condition or histology_dict or pathology_dict:
mutation_summary_list=mongo.db.mutation_summary_addtarget.aggregate(pipline)
else:
mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_summary_count=mongo.db.mutation_summary_addtarget.aggregate(pipline_count)
return{'mutation_summary_list':list(mutation_summary_list),'mutation_summary_count':list(mutation_summary_count)}
api.add_resource(MutationSummary,'/api/mutation_summary')
"""
gene_symbol = {"gene_symbol": fields.String, "gene_symbol_lower": fields.String}
gene_list = {
"gene_list": fields.Nested(gene_symbol),
"gene_query": fields.Nested(gene_symbol),
}
class GetGene(Resource):
@marshal_with(gene_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("gene", type=str)
args = parser.parse_args()
condition = {}
accurate_condition = {}
print(args["gene"])
if args["gene"]:
condition["gene_symbol"] = {
"$regex": args["gene"].lower(),
"$options": "$i",
}
accurate_condition["gene_symbol_lower"] = args["gene"].lower()
print(accurate_condition)
gene_list = mongo.db.snp_summary_genelist.find(condition).limit(10)
gene_query = mongo.db.snp_summary_genelist.find(accurate_condition)
else:
gene_list = {}
gene_query = {}
return {"gene_list": list(gene_list), "gene_query": list(gene_query)}
api.add_resource(GetGene, "/api/snp_summary_gene")
class MutGetGene(Resource):
@marshal_with(gene_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("gene", type=str)
args = parser.parse_args()
condition = {}
accurate_condition = {}
print(args["gene"])
if args["gene"]:
condition["gene_symbol"] = {
"$regex": args["gene"].lower(),
"$options": "$i",
}
accurate_condition["gene_symbol_lower"] = args["gene"].lower()
print(accurate_condition)
gene_list = mongo.db.mutation_summary_genelist.find(condition).limit(10)
gene_query = mongo.db.mutation_summary_genelist.find(accurate_condition)
else:
gene_list = {}
gene_query = {}
return {"gene_list": list(gene_list), "gene_query": list(gene_query)}
api.add_resource(MutGetGene, "/api/mutation_summary_gene")
phenotype_line = {"phenotype": fields.String}
phenotype_list = {"phenotype_list": fields.Nested(phenotype_line)}
class GetPhenotype(Resource):
@marshal_with(phenotype_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("phenotype", type=str)
args = parser.parse_args()
condition = {}
# accurate_condition={}
print(args["phenotype"])
if args["phenotype"]:
condition["phenotype"] = {"$regex": args["phenotype"], "$options": "$i"}
# accurate_condition['gene_symbol_lower']=args['gene'].lower()
# print(accurate_condition)
phenotype_list = mongo.db.phenotype_list.find(condition).limit(10)
# gene_query=mongo.db.mutation_summary_genelist.find(accurate_condition)
else:
phenotype_list = {}
# gene_query={}
return {"phenotype_list": list(phenotype_list)}
api.add_resource(GetPhenotype, "/api/mutation_summary_phenotype")
class MutationSummarySeed(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
parser.add_argument("location")
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print("search srv seed")
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_seed_list = mongo.db.drv_in_seed_v3_redundancy.aggregate(pipline)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_seed_count = mongo.db.drv_in_seed_v3_redundancy.aggregate(
pipline_count
)
return {
"mutation_seed_list": list(mutation_seed_list),
"mutation_seed_count": list(mutation_seed_count),
}
api.add_resource(MutationSummarySeed, "/api/mutation_summary_seed")
class MutationSummaryMature(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["resource"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["pathology"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease"] = {"$regex": args["pathology"], "$options": "$i"}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
condition["location"] = "Mature"
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_mature_tmp_list = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline
)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_mature_tmp_count = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline_count
)
condition["location"] = "Seed"
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
mutation_seed_list = mongo.db.drv_in_premir_v2.aggregate(pipline)
mutation_seed_count = mongo.db.drv_in_premir_v2.aggregate(pipline_count)
mutation_mature_list = list(mutation_mature_tmp_list) + list(mutation_seed_list)
mutation_mature_count = list(mutation_mature_tmp_count) + list(
mutation_seed_count
)
return {
"mutation_mature_list": list(mutation_mature_list),
"mutation_mature_count": list(mutation_mature_count),
}
api.add_resource(MutationSummaryMature, "/api/mutation_summary_mature")
class MutationSummaryPremir(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
# find_gene={}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
# condition['identifier_lower']=args['gene'].lower()
condition["$or"] = [
{"identifier_lower": args["gene"].lower()},
{"pre_id": args["gene"].lower()},
]
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
histology_dict["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
pathology_dict["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
match_pathology = {"$match": pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
match_condition = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
count_group = {"$group": {"_id": "null", "count": {"$sum": 1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count = pipline + [count_group]
pipline.append(skip)
pipline.append(limit)
print(condition)
print(histology_dict)
print(pathology_dict)
# if condition or histology_dict or pathology_dict:
mutation_premir_list = mongo.db.drv_in_premir_v3_redundancy.aggregate(pipline)
# else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_premir_count = mongo.db.drv_in_premir_v3_redundancy.aggregate(
pipline_count
)
return {
"mutation_premir_list": list(mutation_premir_list),
"mutation_premir_count": list(mutation_premir_count),
}
api.add_resource(MutationSummaryPremir, "/api/mutation_summary_premir")
class MutationSummaryUtr3(Resource):
@marshal_with(mutation_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mut_id", type=str)
parser.add_argument("page")
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("resource")
# parser.add_argument('snp_rela')
# parser.add_argument('pubmed_id')
parser.add_argument("histology")
parser.add_argument("pathology")
parser.add_argument("gene")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
page_condition = {}
histology_dict = {}
pathology_dict = {}
match_histology = {}
match_pathology = {}
pipline = []
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
# page_condition['item_number']={"$gt":record_skip}
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome']!='All' and args['chrome']:
# condition['chrome']=args['chrome']
# if args['location'] != 'All'and args['location']:
# condition['location']=args['location']
if args["resource"] != "All" and args["resource"]:
condition["source"] = args["resource"]
if args["histology"] and args["histology"] != "All":
# histology_dict['disease_pubmed.disease']={'$regex':args['histology'],'$options':'$i'}
condition["disease_pubmed.disease"] = {
"$regex": args["histology"],
"$options": "$i",
}
match_histology = {"$match": histology_dict}
if args["pathology"] and args["pathology"] != "All":
# pathology_dict['disease_pubmed.disease']={'$regex':args['pathology'],'$options':'$i'}
condition["disease_pubmed.disease"] = {
"$regex": args["pathology"],
"$options": "$i",
}
# match_pathology={'$match':pathology_dict}
if args["mut_id"]:
# mut_id=args['mut_id']
# if mut_id.startswith('COS') or re.match('[0-9]*',mut_id):
condition["mut_id"] = args["mut_id"]
# if args['snp_rela']:
# condition['snp_rela']=args['snp_rela']
# if args['pubmed_id']:
# condition['pubmed_id']={'$exists':True}
"""
match_condition={'$match':condition}
#skip={'$skip':record_skip}
limit={'$limit':per_page}
skip={'$skip':record_skip}
count_group={'$group':{'_id':'null','count':{'$sum':1}}}
if condition:
pipline.append(match_condition)
if histology_dict:
pipline.append(match_histology)
if pathology_dict:
pipline.append(match_pathology)
pipline_count=pipline+[count_group]
#pipline.append(skip)
if args['gene'] or (args['resource']!='All' and args['resource']) or (args['pathology'] and args['pathology']!='All') or (args['histology'] and args['histology'] != 'All') or args['mut_id']:
pipline.append(skip)
else:
#pipline.append({'$match':page_condition})
pipline.append(skip)
pipline.append(limit)
print('get mutation summary UTR3')
print(condition)
print(histology_dict)
print(pathology_dict)
print(pipline)
#if condition or histology_dict or pathology_dict:
mutation_utr3_list=mongo.db.drv_in_utr_v3_redundancy.aggregate(pipline)
#print(list(mutation_utr3_list))
#else:
# mutation_summary_list=mongo.db.mutation_summary_addtarget.find(condition).skip(record_skip).limit(per_page)
mutation_utr3_count=mongo.db.drv_in_utr_v3_redundancy.aggregate(pipline_count)
"""
mutation_utr3_list = (
mongo.db.drv_in_utr_v3_redundancy.find(condition)
.skip(record_skip)
.limit(per_page)
)
mutation_utr3_count = mongo.db.drv_in_utr_v3_redundancy.find(condition).count()
return {
"mutation_utr3_list": list(mutation_utr3_list),
"mutation_utr3_count": mutation_utr3_count,
}
api.add_resource(MutationSummaryUtr3, "/api/mutation_summary_utr3")
snp_line = {
"snp_id": fields.String,
"snp_chr": fields.String,
"snp_position": fields.String,
"ref": fields.String,
"alt": fields.String,
"curalt": fields.String,
"ref_freq": fields.String,
"alt_freq": fields.String,
"location": fields.String,
"gene": fields.String,
"mature_chr": fields.String,
"mature_start": fields.String,
"mature_end": fields.String,
"mature_strand": fields.String,
"mature_id": fields.String,
"is_ld": fields.String,
"gain_count": fields.String,
"loss_count": fields.String,
"pre_id": fields.String,
"energy_change": fields.String,
"expression_change": fields.String,
"analysis": fields.Integer,
"snp_energy": fields.String,
"wild_energy": fields.String,
}
"""
indel_line={
'chr':fields.String,
'position':fields.String,
'snp_id':fields.String,
'ref':fields.String,
'alt':fields.String,
'ref_freq':fields.String,
'alt_freq':fields.String,
'transcript_chr':fields.String,
'trnascript_start':fields.String,
'transcript_end':fields.String,
'transcript_strand':fields.String,
'enst_id':fields.String,
'ref_seq':fields.String,
'identifier':fields.String,
'location':fields.String,
'identifier_lower':fields.String,
'mir_chr':fields.String,
'mir_start':fields.String,
'mir_end':fields.String,
'mir_strand':fields.String
}
snp_summary_list={
'snp_seed_list':fields.Nested(snp_line),
'snp_seed_count':fields.Integer,
'snp_mature_list':fields.Nested(snp_line),
'snp_mature_count':fields.Integer,
'snp_premir_list':fields.Nested(snp_line),
'snp_premir_count':fields.Integer,
'snp_utr3_list':fields.Nested(snp_line),
'snp_utr3_count':fields.Integer,
'snp_summary_list':fields.Nested(snp_line),
'snp_summary_count':fields.Integer,
'indel_seed_list':fields.Nested(indel_line),
'indel_seed_count':fields.Integer,
'indel_premir_list':fields.Nested(indel_line),
'indel_premir_count':fields.Integer,
'indel_utr_list':fields.Nested(indel_line),
'indel_utr_count':fields.Integer
}
"""
snp_summary_list = {
"snp_seed_list": fields.Nested(snp_line),
"snp_seed_count": fields.Integer,
"snp_premir_list": fields.Nested(snp_line),
"snp_premir_count": fields.Integer,
"snp_utr3_list": fields.Nested(snp_line),
"snp_utr3_count": fields.Integer,
"snp_mature_list": fields.Nested(snp_line),
"snp_mature_count": fields.Integer,
"snp_summary_list": fields.Nested(snp_line),
"snp_summary_count": fields.Integer,
}
class SnpSummary(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
# parser.add_argument('page')
# parser.add_argument('chrome')
# parser.add_argument('location')
parser.add_argument("identifier")
# parser.add_argument('gmaf')
# parser.add_argument('ldsnp')
# parser.add_argument('mutation_rela')
# parser.add_argument('gene')
# parser.add_argument('spe_snp_id')
args = parser.parse_args()
# print(args['chrome'])
# page=1
# per_page = 15
# record_skip = (int(page)-1)*per_page
condition = {}
pipline = []
# print(args['page'])
# print(record_skip)
print(args)
# if args['page']:
# page=args['page']
# record_skip = (int(page)-1)*per_page
# if args['gene']:
# condition['identifier_lower']=args['gene'].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
# if args['ldsnp']:
# condition['ldsnp']=args['ldsnp']
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
# if args['gmaf'] !='All' and args['gmaf']:
# condition['alt_freq']={'$gt':args['gmaf'][1:]}
# if args['location']=="All":
# condition_utr3=condition
# condition_utr3['location']='UTR3'
# snp_utr3_list=mongo.db.snp_summary.find(condition_utr3).skip(record_skip).limit(per_page)
# snp_utr3_count=mongo.db.snp_summary.find(condition_utr3).count()
# condition_seed=condition
# condition_seed['location']='mirseed'
# snp_seed_list=mongo.db.snp_summary.find(condition_seed).skip(record_skip).limit(per_page)
# snp_seed_count=mongo.db.snp_summary.find(condition_seed).count()
# condition_mature=condition
# condition_mature['location']='mature'
# snp_mature_list=mongo.db.snp_summary.find(condition_mature).skip(record_skip).limit(per_page)
# snp_mature_count=mongo.db.snp_summary.find(condition_mature).count()
# condition_premir=condition
# condition_premir['location']='pre-miRNA'
# snp_premir_list=mongo.db.snp_summary.find(condition_premir).skip(record_skip).limit(per_page)
# snp_premir_count=mongo.db.snp_summary.find(condition_premir).count()
# elif args['location']=='mirseed':
# condition['location']='mirseed'
# snp_seed_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_seed_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='mature':
# condition['location']='mature'
# snp_mature_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_mature_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='pre-miRNA':
# condition['location']='pre-miRNA'
# snp_premir_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_premir_count=mongo.db.snp_summary.find(condition).count()
# elif args['location']=='UTR3':
# condition['location']='UTR3'
# snp_utr3_list=mongo.db.snp_summary.find(condition).skip(record_skip).limit(per_page)
# snp_utr3_count=mongo.db.snp_summary.find(condition).count()
# print(condition)
# snp_summary_list=mongo.db.snp_summary.find(condition)
# snp_summary_count=mongo.db.snp_summary.find(condition).count()
snp_summary_seed = mongo.db.snp_in_seed_v2.find(condition)
snp_summary_premir = mongo.db.snp_in_premir_v2.find(condition)
snp_summary_utr3 = mongo.db.snp_in_utr_v2.find(condition)
snp_summary_seed_count = mongo.db.snp_in_seed_v2.find(condition).count()
snp_summary_premir_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_summary_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
snp_summary_list = (
list(snp_summary_seed) + list(snp_summary_premir) + list(snp_summary_utr3)
)
snp_summary_count = (
snp_summary_seed_count + snp_summary_premir_count + snp_summary_utr3_count
)
return {
"snp_summary_list": list(snp_summary_list),
"snp_summary_count": snp_summary_count,
}
api.add_resource(SnpSummary, "/api/snp_summary")
class SnpSummarySeed(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
# condition['location']='mirseed'
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = str(args["ldsnp"])
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
match = {"$match": condition}
skip = {"$skip": record_skip}
limit = {"$limit": per_page}
pipline = [match, skip, limit]
# snp_seed_list=mongo.db.snp_summary_mirseed.aggregate(pipline)
snp_seed_count = mongo.db.snp_in_seed_v2.find(condition).count()
snp_seed_list = (
mongo.db.snp_in_seed_v2.find(condition).skip(record_skip).limit(per_page)
)
return {"snp_seed_list": list(snp_seed_list), "snp_seed_count": snp_seed_count}
api.add_resource(SnpSummarySeed, "/api/snp_summary_seed")
class SnpSummaryMature(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("mutation_rela")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
condition["location"] = "mature"
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
if args["chrome"] != "All" and args["chrome"]:
condition["snp_chr"] = args["chrome"]
if args["spe_snp_id"]:
condition["snp_id"] = args["spe_snp_id"]
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["id_ld"] = args["ldsnp"]
if args["mutation_rela"]:
condition["mutation_rela"] = args["mutation_rela"]
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
condition["location"] = "Seed"
snp_seed_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_seed_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
condition["location"] = "Mature"
snp_mature_tmp_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_mature_tmp_count = mongo.db.snp_in_premir_v2.find(condition).count()
snp_mature_list = list(snp_seed_list) + list(snp_mature_tmp_list)
snp_mature_count = snp_seed_count + snp_mature_tmp_count
return {
"snp_mature_list": list(snp_mature_list),
"snp_mature_count": snp_mature_count,
}
api.add_resource(SnpSummaryMature, "/api/snp_summary_mature")
class SnpSummaryPremir(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("mutation_rela")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args)
print(condition)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["$or"] = [
{"identifier_lower": args["gene"].lower()},
{"pre_id": args["gene"].lower()},
]
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
if args["spe_snp_id"]:
condition["snp_id"] = args["spe_snp_id"]
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = args["ldsnp"]
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
print(condition)
snp_premir_list = (
mongo.db.snp_in_premir_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_premir_count = mongo.db.snp_in_premir_v2.find(condition).count()
return {
"snp_premir_list": list(snp_premir_list),
"snp_premir_count": snp_premir_count,
}
api.add_resource(SnpSummaryPremir, "/api/snp_summary_premir")
class SnpSummaryUtr3(Resource):
@marshal_with(snp_summary_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("snp_id", type=str)
parser.add_argument("page")
parser.add_argument("chrome")
parser.add_argument("location")
parser.add_argument("identifier")
parser.add_argument("gmaf")
parser.add_argument("ldsnp")
parser.add_argument("gene")
parser.add_argument("spe_snp_id")
args = parser.parse_args()
# print(args['chrome'])
page = 1
per_page = 15
record_skip = (int(page) - 1) * per_page
condition = {}
condition_indel = {}
# condition['location']='UTR3'
pipline = []
snp_seed_list = {}
snp_mature_list = {}
snp_premir_list = {}
snp_utr3_list = {}
snp_seed_count = 0
snp_mature_count = 0
snp_premir_count = 0
snp_utr3_count = 0
print(args["page"])
print(record_skip)
print(args)
if args["page"]:
page = args["page"]
record_skip = (int(page) - 1) * per_page
if args["gene"]:
condition["identifier_lower"] = args["gene"].lower()
# if args['chrome'] != 'All' and args['chrome']:
# condition['snp_chr'] = args['chrome']
# if args['spe_snp_id']:
# condition['snp_id']=args['spe_snp_id']
if args["snp_id"]:
# condition['snp_id']={'$regex':args['snp_id'],'$options':'$i'}
condition["snp_id"] = args["snp_id"]
if args["identifier"]:
# condition['identifier']={'$regex':args['identifier'],'$options':'$i'}
condition["identifier_lower"] = args["identifier"].lower()
if args["ldsnp"]:
condition["is_ld"] = args["ldsnp"]
# if args['mutation_rela']:
# condition['mutation_rela']=args['mutation_rela']
if args["gmaf"] != "All" and args["gmaf"]:
condition["alt_freq"] = {"$gt": args["gmaf"][1:]}
if (
args["gene"]
or args["snp_id"]
or args["identifier"]
or args["ldsnp"]
or (args["gmaf"] != "All" and args["gmaf"])
):
snp_utr3_list = (
mongo.db.snp_in_utr_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
elif int(page) <= 50000:
snp_utr3_list = (
mongo.db.snp_in_utr_v2.find(condition).skip(record_skip).limit(per_page)
)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
else:
condition["item_number"] = {"$gt": str(record_skip)}
snp_utr3_list = mongo.db.snp_in_utr_v2.find(condition).limit(per_page)
snp_utr3_count = mongo.db.snp_in_utr_v2.find(condition).count()
# snp_utr3_list=mongo.db.snp_summary_utr3.aggregate(pipline)
print(condition)
return {"snp_utr3_list": list(snp_utr3_list), "snp_utr3_count": snp_utr3_count}
api.add_resource(SnpSummaryUtr3, "/api/snp_summary_utr3")
cosmic_line = {
"ID_NCV": fields.String,
"snp_rela": fields.String,
"Primary_histology": fields.String(attribute="Primary histology"),
"chrome": fields.String,
"Mutation_somatic_status": fields.String(attribute="Mutation somatic status"),
"Primary_site": fields.String(attribute="Primary site"),
"PUBMED_PMID": fields.String,
"SNP": fields.String,
"snp_id": fields.String,
"position": fields.String,
"alt": fields.String,
"ref": fields.String,
"location": fields.String,
}
cosmic_list = {"cosmic_list": fields.Nested(cosmic_line), "data_length": fields.Integer}
class CosmicInfo(Resource):
@marshal_with(cosmic_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("page")
args = parser.parse_args()
search_ids = args["search_ids"]
page = args["page"]
per_page = 30
print(page)
print(search_ids)
# skip_records = per_page * (page - 1)
record_skip = (int(page) - 1) * per_page
print(search_ids)
if search_ids == "summary":
cosmic_list = (
mongo.db.cosmic_summary.find().skip(record_skip).limit(per_page)
)
cosmic_count = mongo.db.cosmic_summary.find().count()
elif search_ids:
condition = {"snp_id": search_ids}
cosmic_list = mongo.db.cosmic_summary.find(condition)
cosmic_count = mongo.db.cosmic_summary.find(condition).count()
else:
cosmic_list = {}
cosmic_count = 0
return {"cosmic_list": list(cosmic_list), "data_length": cosmic_count}
api.add_resource(CosmicInfo, "/api/cosmicinfo")
clinvar_line = {
"chrome": fields.String,
"position": fields.String,
"clinvar_id": fields.String,
"disease": fields.String,
"snp_rela": fields.String,
"snp_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"location": fields.String,
}
clinvar_list = {
"clinvar_list": fields.Nested(clinvar_line),
"data_length": fields.Integer,
}
class ClinvarInfo(Resource):
@marshal_with(clinvar_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("search_ids", type=str)
parser.add_argument("page")
args = parser.parse_args()
search_ids = args["search_ids"]
per_page = 15
page = args["page"]
skip_records = (int(page) - 1) * per_page
if search_ids == "summary":
clinvar_list = (
mongo.db.clinvar_summary.find().skip(skip_records).limit(per_page)
)
clinvar_count = mongo.db.clinvar_summary.find().count()
elif search_ids:
condition = {"snp_id": search_ids}
clinvar_list = mongo.db.clinvar_summary.find(condition)
clinvar_count = mongo.db.clinvar_summary.find(condition).count()
else:
clinvar_list = {}
clinvar_count = 0
return {"clinvar_list": list(clinvar_list), "data_length": clinvar_count}
api.add_resource(ClinvarInfo, "/api/clinvarinfo")
csv_table = {
"op": fields.String(attribute="ONTOLOGY_pathway"),
"id": fields.String(attribute="ID"),
"description": fields.String(attribute="Description"),
"gene_ratio": fields.String(attribute="GeneRatio"),
"bg_ratio": fields.String(attribute="BgRatio"),
"pvalue": fields.String,
"padjust": fields.String,
"qvalue": fields.String,
"gene_id": fields.String(attribute="geneID"),
"gene_count": fields.String(attribute="Count"),
"csv_file": fields.String,
}
enrich_line = {
"mirna_id": fields.String,
"variation_id": fields.String,
"alt": fields.String,
"ref": fields.String,
"enrich_type": fields.String,
"effect": fields.String,
"csv_file": fields.String,
"dot_file": fields.String,
"csv_table": fields.Nested(csv_table),
"go_pathway_count": fields.String,
}
enrich_result_list = {
"enrich_result_list": fields.Nested(enrich_line),
"enrich_result_count": fields.Integer,
}
class EnrichResult(Resource):
@marshal_with(enrich_result_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
parser.add_argument("variate_id")
args = parser.parse_args()
condition = {}
search = 0
if args["mirna_id"]:
search = 1
condition["mirna_id"] = args["mirna_id"]
match = {"$match": {"mirna_id": args["mirna_id"]}}
if args["variate_id"]:
search = 1
condition["variation_id"] = args["variate_id"]
match["$match"]["variation_id"] = args["variate_id"]
lookup_csv = {
"$lookup": {
"from": "enrichment_csv_v2",
"localField": "csv_file",
"foreignField": "csv_file",
"as": "csv_table",
}
}
if search:
pipline = [match, lookup_csv]
enrich_result_list = mongo.db.enrichment_summary_v2.aggregate(pipline)
enrich_result_count = mongo.db.enrichment_summary_v2.find(condition).count()
else:
enrich_result_list = {}
enrich_result_count = 0
return {
"enrich_result_list": list(enrich_result_list),
"enrich_result_count": enrich_result_count,
}
api.add_resource(EnrichResult, "/api/enrich_result")
var_item = {
"var_id": fields.String,
"ref": fields.String,
"alt": fields.String,
"color": fields.String,
"count": fields.Integer,
}
snp_distribute = {
"base": fields.String,
"pos": fields.Integer,
"var_list": fields.Nested(var_item),
"mature_id": fields.String,
}
snp_distribute_list = {
"snp_distribute_list": fields.Nested(snp_distribute),
"snp_distribute_count": fields.Integer,
}
class SnpDistribute(Resource):
@marshal_with(snp_distribute_list)
def get(self):
parser = reqparse.RequestParser()
parser.add_argument("mirna_id", type=str)
args = parser.parse_args()
condition = {}
if args["mirna_id"]:
condition["mature_id"] = args["mirna_id"]
snp_distribute_list = mongo.db.variation_distribute_deduplicate.find(
condition
)
snp_distribute_count = mongo.db.variation_distribute_deduplicate.find(
condition
).count()
else:
snp_distribute_list = []
snp_distribute_count = 0
return {
"snp_distribute_list": list(snp_distribute_list),
"snp_distribute_count": snp_distribute_count,
}
api.add_resource(SnpDistribute, "/api/snp_distribute")
class BIGDIndexBS(Resource):
def get(self):
filepath_indexbs = "index.bs"
return send_file(filepath_indexbs, mimetype="text/plain")
api.add_resource(BIGDIndexBS, "/index.bs")
| 2.203125 | 2 |
resnet50/predict.py | TerenceChen95/Bladder-Cancer-Stage-Detection | 5 | 12764578 | <gh_stars>1-10
import torch
from PIL import Image, ImageFilter
import seaborn
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.autograd import Variable
device = torch.device('cuda:0')
#load model
model = torch.load('../BEST_checkpoint_resnet50.pth.tar')['model']
model.to(device)
model.eval()
class_to_idx = {'T0':0, 'T1':1, 'T2': 2, 'T3':3, 'T4':4}
cat_to_name = {class_to_idx[i]: i for i in list(class_to_idx.keys())}
img_pth = '/home/tianshu/bladder-cancer/dataset/bbox_images/TCGA-ZF-AA5P40_bbox.jpg'
img_i = Image.open(img_pth)
data_transforms = transforms.Compose([
transforms.Lambda(lambda image: image.convert('RGB')),
transforms.Lambda(lambda image: image.filter(ImageFilter.EDGE_ENHANCE_MORE)),
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
img = data_transforms(img_i)
data = img.unsqueeze_(0)
data = Variable(data)
data = data.to(device)
probs = torch.exp(model.forward(data))
top_probs, top_labs = probs.topk(3)
top_probs = top_probs.cpu().detach().numpy().tolist()[0]
top_labs = top_labs.cpu().detach().numpy().tolist()[0]
top_cat = [cat_to_name[lab] for lab in top_labs]
plt.figure(figsize=(6, 10))
plt.subplot(2,1,1)
plt.imshow(img_i)
plt.subplot(2,1,2)
seaborn.barplot(x=top_probs, y=top_cat, color=seaborn.color_palette()[0])
plt.savefig('predict_img.png')
| 2.265625 | 2 |
transposonmapper/mapping/get_insertions_and_reads.py | EKingma/Transposonmapper | 2 | 12764579 | <gh_stars>1-10
import numpy as np
def get_insertions_and_reads(coordinates, tn_coordinates, readnumb_array):
"""This function computes the total number of transposons per gene , the number of reads per gene and
the distribution of transposons along the gene.
Parameters
----------
coordinates : dict
This is the output of the function add_chromosome_length(coordinates, chr_lengths_cumsum, ref_tid_roman)
tn_coordinates : numpy.array
This is the output of the function add_chromosome_length_inserts(coordinates, ref_names, chr_lengths)
readnumb_array : numpy.array
This is the 1st output of the function get_reads(bam)
Returns
-------
dict
A dict which every key corresponds with each gene and each value with the total number of transposons found in that gene
dict
A dict which every key corresponds with each gene and each value with the total number of reads for all the transposons found in that gene
dict
A dict which every key corresponds with each gene and each value with a list of 4 elements:
- the chromosome number
- gene start position
- gene end position
- distribution of reads per transposon found inside the gene
"""
tn_per_gene = {}
reads_per_gene = {}
tn_coordinates_per_gene = {}
for gene in coordinates:
xx = np.where(np.logical_and(tn_coordinates[:,1] >= coordinates.get(gene)[1], tn_coordinates[:,1] <= coordinates.get(gene)[2])) #get all insertions within range of current gene
tn_per_gene[gene] = np.size(xx)
reads_per_gene[gene] = sum(readnumb_array[xx]) - max(readnumb_array[xx], default=0) #REMOVE LARGEST VALUE TO REDUCE NOISE
if np.size(xx) > 0:
tn_coordinates_per_gene[gene] = [coordinates.get(gene)[0], coordinates.get(gene)[1], coordinates.get(gene)[2], list(tn_coordinates[xx[0][0]:xx[0][-1]+1, 1]), list(readnumb_array[xx])]
else:
tn_coordinates_per_gene[gene] = [coordinates.get(gene)[0], coordinates.get(gene)[1], coordinates.get(gene)[2], [], []]
return tn_per_gene, reads_per_gene, tn_coordinates_per_gene | 3.109375 | 3 |
src/score.py | hudua/mlops | 7 | 12764580 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import pickle
import numpy as np
import pandas as pd
import azureml.train.automl
from sklearn.externals import joblib
from azureml.core.model import Model
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
import xgboost as xgb
input_sample = pd.DataFrame(data=[{'winddirabs': 0.34244, 'winddirrel': 0.324235,'windspeedrel':1.3213}])
output_sample = np.array([0])
def init():
global model
# This name is model.id of model that we want to deploy deserialize the model file back
# into a sklearn model
model_path = Model.get_model_path(model_name = 'Model')
model = joblib.load(model_path)
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
return json.dumps({"result": result.tolist()})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
| 2.25 | 2 |
fasttask/apps.py | huanjoyous/FasterRunner20190716 | 2 | 12764581 | from django.apps import AppConfig
class FasttaskConfig(AppConfig):
name = 'fasttask'
| 0.945313 | 1 |
main.py | GokuGunZ/KingCardGame | 0 | 12764582 | import pygame
from pygame.locals import *
from pygame.event import wait
from deck import *
from game import *
from init import *
deck = Deck()
King = Game("Pit","Dotti","Lella","Rob")
giocata=0
position=[0,0,0,0]
carteGiocate=[[],[],[],[],[],[],[],[],[],[],[],[],[]]
timerScomparsa=0
timerGiocata=0
primaCarta = None
Turno = init(deck, King)
#Inizializzare pygame
pygame.init()
clock = pygame.time.Clock()
#Mostra lo schermo
screen = pygame.display.set_mode((800,600))
#Impostazioni del gioco
pygame.display.set_caption("King")
icon = pygame.image.load("img\icon.png")
pygame.display.set_icon(icon)
font = pygame.font.SysFont("monospace", 16)
#funzione per mostrare la mano a video
def mostraMano(self,ypos,sel):
xpos=400-len(self.Mano)*50/2
for carta in range(len(self.Mano)):
thisy=ypos
if carta == sel :
thisy-=35
screen.blit(self.Mano[carta].img, (xpos,thisy))
xpos+=50
def primaGiocata(Turno,giocata):
if King.Primo == 0 :
primaCarta=King.g1.Mano[position[0]]
Turno=(Turno+1)%4
return primaCarta, Turno
position[King.Primo]=random.randint(0,len(King.allg[King.Primo].Mano)-1)
primaCarta=King.allg[King.Primo].Mano[position[King.Primo]]
carteGiocate[giocata].append(primaCarta)
King.allg[Turno].Mano.pop(position[Turno])
Turno=(Turno+1)%4
return primaCarta, Turno, giocata
def altraGiocata(Turno, primaCarta, position):
position[Turno]=random.randint(0,len(King.allg[Turno].Mano)-1)
cartaGiocata=King.allg[Turno].Mano[position[Turno]]
while King.checkSuit(position, primaCarta, Turno):
position[Turno]=random.randint(0,len(King.allg[Turno].Mano)-1)
cartaGiocata=King.allg[Turno].Mano[position[Turno]]
carteGiocate[giocata].append(cartaGiocata)
King.allg[Turno].Mano.pop(position[Turno])
Turno=(Turno+1)%4
return Turno
def checkVincitore(primaCarta, giocata, carteGiocate, Primo):
cartaVincente = primaCarta
newPrimo = Primo
for i in [1,2,3]:
if (cartaVincente.suit == carteGiocate[giocata][i].suit) & (cartaVincente.value < carteGiocate[giocata][i].value):
cartaVincente = carteGiocate[giocata][i]
newPrimo = (i + Primo) % 4
print("La mano è stata vinta da {} con la carta ".format(King.allg[newPrimo].Nome), end="")
cartaVincente.show()
return newPrimo
def mostraGiocata(giocata):
cordcarte=[(375,310),(425,230),(375,150),(325,230)]
for i in range(len(carteGiocate[giocata])):
screen.blit(carteGiocate[giocata][i].img, cordcarte[(King.Primo+i)%4])
def stampaUHD():
pos=[(350,540),(600,300),(350,40),(100,300)]
pos2=[(350,556),(600,316),(350,56),(100,316)]
for i in range(4):
label = font.render('{}'.format(King.allg[i].Nome), 1, (0,0,0), (160,160,160))
label2 = font.render('Punti: {}'.format(King.allg[i].Punti), 1, (0,0,0), (160,160,160))
screen.blit(label, pos[i])
screen.blit(label2, pos2[i])
#Loop del gioco
running = True
while running:
screen.fill((0,255,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
#Muoversi tra le carte
if (event.type == pygame.KEYDOWN) & (len(carteGiocate[giocata]) < 4):
if (event.key == pygame.K_LEFT) :
if position[0] > 0:
position[0]-=1
else:
position[0]=len(King.g1.Mano)-1
elif (event.key == pygame.K_RIGHT) :
if (position[0]<len(King.g1.Mano)-1) :
position[0]+=1
else :
position[0]= 0
elif (event.key == pygame.K_RETURN):
if (Turno == 0):
if (King.Primo == 0):
primaCarta, Turno = primaGiocata(Turno, giocata)
carteGiocate[giocata].append(King.g1.Mano[position[0]])
King.g1.Mano.pop(position[0])
position[0]=0
else:
if King.checkSuit(position, primaCarta, 0):
print("Devi rispondere a seme")
continue
carteGiocate[giocata].append(King.g1.Mano[position[0]])
King.g1.Mano.pop(position[0])
position[0]=0
Turno += 1
else :
print('Non è il tuo turno')
elif (event.key == pygame.K_ESCAPE):
pygame.quit()
quit()
#Premo un pulsante per far giocare quello dopo
#Primo
elif (event.key == pygame.K_p and len(carteGiocate[giocata]) == 0):
if (Turno == 0 ):
print('è il tuo turno')
continue
primaCarta, Turno, giocata = primaGiocata(Turno, giocata)
#Altri
elif (event.key == pygame.K_n):
if (Turno == 0):
print('è il tuo turno')
continue
if (primaCarta == None):
print('deve giocare il primo di mano')
continue
Turno = altraGiocata(Turno, primaCarta, position)
#Premere S per stampare cose
elif (event.key == pygame.K_s):
print(carteGiocate)
print(giocata)
#Premere T per stampare carta selezionata
elif (event.key == pygame.K_t):
King.allg[0].Mano[position[0]].show()
#Andamento del gioco
if (Turno != 0):
if timerGiocata>10:
if (len(carteGiocate[giocata]) == 0):
primaCarta, Turno, giocata = primaGiocata(Turno, giocata)
elif (len(carteGiocate[giocata]) < 4):
Turno = altraGiocata(Turno, primaCarta, position)
timerGiocata = 0
timerGiocata += 1
#Check di fine turno
if (len(carteGiocate[giocata])>3):
if timerScomparsa>16 :
King.Primo=checkVincitore(primaCarta,giocata, carteGiocate,King.Primo)
King.allg[King.Primo].Punti+=1
giocata+=1
Turno=King.Primo
timerScomparsa=0
primaCarta=None
King.contaSemi()
King.punteggio()
timerScomparsa+=1
if (giocata == 13):
Turno = init(deck, King)
giocata=0
position=[0,0,0,0]
carteGiocate=[[],[],[],[],[],[],[],[],[],[],[],[],[]]
timerScomparsa=0
primaCarta = None
mostraMano(King.g1,450,position[0])
stampaUHD()
# mostraMano(King.g2,50,position[0])
# mostraMano(King.g3,100,position[0])
# mostraMano(King.g4,150,position[0])
mostraGiocata(giocata)
pygame.display.update()
clock.tick(10) | 3.0625 | 3 |
automaton/plugins/translate.py | nemec/Automaton | 6 | 12764583 | <reponame>nemec/Automaton
from urllib2 import urlopen, URLError
from urllib import urlencode
import unicodedata
import automaton.lib.plugin
from automaton.lib.data.abbreviations import language_codes
# pylint: disable-msg=W0101
raise automaton.lib.plugin.PluginLoadError(
"Translations from google are now a paid service, this plugin must "
"be rewritten to use their new API keys.")
def platform():
"""Return the list of platforms the plugin is available for."""
return ['linux', 'mac', 'windows']
class Translate(automaton.lib.plugin.PluginInterface):
"""Translate text from one language to another by name or language code."""
def __init__(self, registrar):
super(Translate, self).__init__(registrar)
registrar.register_service("translate", self.execute,
grammar={
"text": [],
"from": ["from"],
"to": ["to"],
},
namespace=__name__)
def disable(self):
"""Disable all of Translate's services."""
self.registrar.unregister_service("translate", namespace=__name__)
def execute(self, **kwargs):
"""Translate text from one language to another. The text is normalized
to ascii before being returned.
Keyword arguments:
from -- source language, either the language name or the language code
to -- destination language, either the name or language code
text -- the text to translate
"""
# The google translate API can be found here:
# http://code.google.com/apis/ajaxlanguage/documentation/#Examples
if "to" not in kwargs:
return "Please provide a destination language."
if "from" not in kwargs:
return "Please provide the language of the message."
if kwargs['from'] in language_codes.long:
lang1 = language_codes[kwargs['from']]
else:
lang1 = kwargs['from']
if kwargs['to'] in language_codes.long:
lang2 = language_codes[kwargs['to']]
else:
lang2 = kwargs['to']
langpair = '{0}|{1}'.format(lang1, lang2)
text = kwargs["text"]
base_url = 'http://ajax.googleapis.com/ajax/services/language/translate?'
params = urlencode((('v', 1.0),
('q', text),
('langpair', langpair),))
url = base_url + params
try:
content = urlopen(url).read()
except URLError:
return "Could not contact translation server."
start_idx = content.find('"translatedText":"') + 18
translation = content[start_idx:]
end_idx = translation.find('"}, "')
translation = translation[:end_idx]
return unicodedata.normalize('NFKD',
unicode(translation, "utf-8")).encode('ascii', 'ignore')
| 2.796875 | 3 |
dynamic_subdomains/templatetags/dynamic_subdomains.py | Oksamies/django-dynamic-subdomains-ai | 6 | 12764584 | <reponame>Oksamies/django-dynamic-subdomains-ai
from django import template
from django.conf import settings
from django.template import TemplateSyntaxError
from django.utils.encoding import smart_str
from django.template.defaulttags import kwarg_re
from ..reverse import reverse_crossdomain
register = template.Library()
@register.tag
def domain_url(parser, token, mangle=True):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least 1 argument" % bits[0])
view = parser.compile_filter(bits[1])
bits = bits[1:] # Strip off view
try:
pivot = bits.index('on')
try:
domain = bits[pivot + 1]
except IndexError:
raise TemplateSyntaxError(
"'%s' arguments must include a domain after 'on'" % bits[0]
)
view_args, view_kwargs = parse_args_kwargs(parser, bits[1:pivot])
domain_args, domain_kwargs = parse_args_kwargs(parser, bits[pivot+2:])
except ValueError:
# No "on <subdomain>" was specified so use the default domain
domain = settings.SUBDOMAIN_DEFAULT
view_args, view_kwargs = parse_args_kwargs(parser, bits[1:])
domain_args, domain_kwargs = (), {}
return DomainURLNode(
domain, view, domain_args, domain_kwargs, view_args, view_kwargs, mangle
)
@register.tag
def domain_url_no_mangle(parser, token):
return domain_url(parser, token, mangle=False)
class DomainURLNode(template.Node):
def __init__(self, subdomain, view, subdomain_args, subdomain_kwargs, view_args, view_kwargs, mangle):
self.subdomain = subdomain
self.view = view
self.subdomain_args = subdomain_args
self.subdomain_kwargs = subdomain_kwargs
self.view_args = view_args
self.view_kwargs = view_kwargs
self.mangle = mangle
def render(self, context):
subdomain_args = [x.resolve(context) for x in self.subdomain_args]
subdomain_kwargs = dict((smart_str(k, 'ascii'), v.resolve(context))
for k, v in self.subdomain_kwargs.items())
view_args = [x.resolve(context) for x in self.view_args]
view_kwargs = dict((smart_str(k, 'ascii'), v.resolve(context))
for k, v in self.view_kwargs.items())
return reverse_crossdomain(
self.subdomain,
self.view.resolve(context),
subdomain_args,
subdomain_kwargs,
view_args,
view_kwargs,
self.mangle,
)
def parse_args_kwargs(parser, bits):
args = []
kwargs = {}
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to domain_url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return args, kwargs
| 2.25 | 2 |
event_handlers/bot_computer_status_event_handler.py | sly9/VoyagerTelegramBot | 12 | 12764585 | <gh_stars>10-100
import datetime
import os
import threading
from collections import deque
from time import sleep
from typing import Dict
import psutil
from data_structure.special_battery_percentage import SpecialBatteryPercentageEnum, MemoryUsage
from event_emitter import ee
from event_handlers.voyager_event_handler import VoyagerEventHandler
from event_names import BotEvent
class BotComputerStatusEventHandler(VoyagerEventHandler):
"""
An event handler which monitors the battery status, and bot memory usages of the bot computer.
If Voyager and bot are running on the same computer, Voyager memory usages are also monitored.
"""
def __init__(self, config):
super().__init__(config=config)
self.thread = None
self.last_battery_warning_timestamp = datetime.datetime.now()
self.battery_check_enabled = True
self.last_battery_status = None
self.memory_usage_history = deque(maxlen=8640) # 1 data point for 10 sec duration => 1 day usage.
self.start_gathering()
def interested_event_names(self):
return ['LogEvent', 'ShotRunning', 'ControlData']
def handle_event(self, event_name: str, message: Dict):
if not self.config.monitor_local_computer:
ee.emit(BotEvent.UPDATE_BATTERY_PERCENTAGE.name,
battery_percentage=SpecialBatteryPercentageEnum.NOT_MONITORED, update=False)
return
self.check_battery_status()
# Check log content and see if there's an OOM exception
if event_name == 'LogEvent':
text = message['Text'] # type: str
if text.lower().find('insufficient memory') >= 0 or text.lower().find('outofmemoryexception') >= 0:
self.maybe_add_memory_datapoint(oom_observed=True)
def start_gathering(self):
if self.thread:
return
self.thread = threading.Thread(target=self.run_loop)
self.thread.daemon = True
self.thread.start()
def run_loop(self):
while True:
self.maybe_add_memory_datapoint()
try:
if datetime.datetime.now() - self.last_battery_warning_timestamp > datetime.timedelta(minutes=1):
self.check_battery_status()
self.last_battery_warning_timestamp = datetime.datetime.now()
except Exception as exception:
pass
sleep(10)
def maybe_add_memory_datapoint(self, oom_observed: bool = False):
# Iterate over the list
voyager_vms_usage = 0
voyager_rss_usage = 0
bot_vms_usage = 0
bot_rss_usage = 0
bot_pid = os.getpid()
for proc in psutil.process_iter():
try:
# Fetch process details as dict
pinfo = proc.as_dict(attrs=['pid', 'name', 'username'])
if pinfo['name'] == 'Voyager2.exe':
voyager_vms_usage = proc.memory_info().vms / (1024 * 1024)
voyager_rss_usage = proc.memory_info().rss / (1024 * 1024)
if pinfo['pid'] == bot_pid:
bot_vms_usage = proc.memory_info().vms / (1024 * 1024)
bot_rss_usage = proc.memory_info().rss / (1024 * 1024)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
timestamp = datetime.datetime.now().timestamp()
memory_usage = MemoryUsage(timestamp=timestamp, voyager_vms=voyager_vms_usage, voyager_rss=voyager_rss_usage,
bot_vms=bot_vms_usage, bot_rss=bot_rss_usage, oom_observed=oom_observed)
self.memory_usage_history.append(memory_usage)
ee.emit(BotEvent.UPDATE_MEMORY_USAGE.name,
memory_history=self.memory_usage_history, memory_usage=memory_usage)
def check_battery_status(self):
battery = psutil.sensors_battery()
if battery and self.last_battery_status and \
battery.percent == self.last_battery_status.percent and \
battery.power_plugged == self.last_battery_status.power_plugged:
# nothing really changed, return.
return
if not battery:
# no battery, nothing to watch for. just return
return
if battery.power_plugged:
# this means battery status has changed from unplugged to plugged.
ee.emit(BotEvent.UPDATE_BATTERY_PERCENTAGE.name,
battery_percentage=SpecialBatteryPercentageEnum.ON_AC_POWER, update=True)
battery_msg = 'Power is back on AC again'
else:
if battery.percent > 30:
battery_msg = f'Battery ({battery.percent}%)'
else:
battery_msg = f'!!Critical battery ({battery.percent}%)!!'
telegram_message = f'<b><pre>{battery_msg}</pre></b>'
ee.emit(BotEvent.UPDATE_BATTERY_PERCENTAGE.name,
battery_percentage=SpecialBatteryPercentageEnum.ON_AC_POWER, update=True)
ee.emit(BotEvent.SEND_TEXT_MESSAGE.name, telegram_message)
self.last_battery_status = battery
| 2.296875 | 2 |
python-decompresser/formatter.py | maxwellb/rdm-native-value-formatters | 12 | 12764586 | <gh_stars>10-100
from abc import ABC, abstractmethod
import binascii
import json
import logging
import argparse
import base64
import time
import os
import sys
import gzip
import lz4.block
import lzma
try:
import snappy
SNAPPY_SUPPORT = True
except ImportError:
SNAPPY_SUPPORT = False
sys.path.append(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
__version__ = '0.0.1'
DESCRIPTION = 'Python native decompressing formatter with gzip, lzma, lz4 ' \
'and snappy support'
TIMEOUT = 5
def get_arg_parser(description, version, actions):
parser = argparse.ArgumentParser(
description='{} {}'.format(description, version))
parser.add_argument('action', choices=actions,
help='Available actions: {}'.format(actions))
return parser
def wait_for_stdin_value(timeout=TIMEOUT):
stop = time.time() + timeout
while time.time() < stop:
try:
value = sys.stdin.read()
return base64.b64decode(value)
except Exception:
time.sleep(0.1)
return None
class BaseFormatter(ABC):
ACTION_DECODE = 'decode'
ACTION_INFO = 'info'
ACTION_VALIDATE = 'validate'
actions = (ACTION_DECODE, ACTION_INFO, ACTION_VALIDATE)
def __init__(self, debug=True):
self.logger = logging.getLogger()
if debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
@property
@abstractmethod
def description(self):
return DESCRIPTION
@property
@abstractmethod
def version(self):
return __version__
@abstractmethod
def format(self, value):
raise NotImplementedError()
def process_error(self, message):
if self.action == self.ACTION_VALIDATE:
print(json.dumps({
'valid': False,
'error': message
}))
else:
self.logger.error(message)
sys.exit(2)
def validate_action(self, action):
if action not in self.actions:
self.logger.error('Error: Invalid action {}'.format(action))
sys.exit(1)
self.action = action
@staticmethod
def valid_output():
print(json.dumps({
'valid': True,
'error': ''
}))
def info_output(self):
print(json.dumps({
'version': self.version,
'description': self.description
}))
@staticmethod
def formatted_output(output):
def get_output_dict(output):
return {
'output': output,
'read-only': True,
'format': 'plain_text',
}
if hasattr(output, 'decode'):
output = output.decode()
try:
json_output = json.dumps(get_output_dict(output))
except (TypeError, OverflowError):
json_output = json.dumps(get_output_dict(repr(output)))
print(json_output)
def main(self, *args):
parser = get_arg_parser(description=self.description,
version=self.version,
actions=self.actions)
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
self.validate_action(args.action)
if self.action == self.ACTION_INFO:
return self.info_output()
try:
value = wait_for_stdin_value()
except binascii.Error as e:
return self.process_error('Cannot decode value: {}'.format(e))
if not value:
return self.process_error('No value to format.')
try:
output = self.format(value=value)
except Exception as e:
return self.process_error('Cannot format value: {}'.format(e))
if self.action == self.ACTION_VALIDATE:
return self.valid_output()
return self.formatted_output(output)
class DecompressingFormatter(BaseFormatter):
description = DESCRIPTION
version = __version__
def format(self, value):
def is_gzip(value):
return len(value) >= 3 and value[:3] == b'\x1f\x8b\x08'
def is_lzma(value):
return len(value) >= 26 and value[:26] == \
b'\xfd7zXZ\x00\x00\x04\xe6\xd6\xb4F\x02\x00!' \
b'\x01\x16\x00\x00\x00t/\xe5\xa3\x01\x00'
def is_snappy(value):
return snappy.isValidCompressed(value)
try:
if is_gzip(value):
output = gzip.decompress(value)
elif is_lzma(value):
output = lzma.decompress(value)
elif is_snappy(value):
if SNAPPY_SUPPORT:
output = snappy.uncompress(value)
else:
return self.process_error(
'Cannot decompress value: '
'Snappy is not available on this system.')
else:
output = lz4.block.decompress(value)
return output
except OSError as e:
return self.process_error('Cannot decompress value: {}'.format(e))
if __name__ == "__main__":
DecompressingFormatter().main()
| 2.203125 | 2 |
Previous_State_On_Repo/StrokeRecoveryOffline/Data/code/combineWordHinEngBan.py | rohun-tripati/pythonRepo | 1 | 12764587 | #!/usr/bin/env python
import netcdf_helpers
from scipy import *
from optparse import OptionParser
from xml.dom.minidom import parse
import sys, time, os
# import tamil_extract as TE
# import bangla_extract as BE
import hindi_extract as HE
import english_extract as EE
import engword_extract as EWE
import hinword_extract as HWE
import banword_extract as BWE
#command line options
parser = OptionParser()
(options, args) = parser.parse_args()
if (len(args)<1):
print "usage: test/train/val"
sys.exit(2)
function = args [0]
if not function in ["test", "train", "val"]:
print "usage: test/train/val"
sys.exit(2)
labels = ["hindi", "english", "bangla"]
seqDims = []
seqLengths = []
targetStrings = []
wordTargetStrings = []
seqTags = []
inputs = []
#Here begins the module functional call for each of the respective indic scripts
inputhinword = []
HWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputhinword, True)
inputMeans = array([ 43.5716277755 , 72.728701988 , 0.0151754027826 ])
inputStds = array([ 27.3972575236 , 51.9577234449 , 0.122250194 ])
inputhinword = ((array(inputhinword)-inputMeans)/inputStds).tolist()
inputs.extend(inputhinword)
inputbanword = []
BWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputbanword, True)
inputMeans = array([ 39.3020429273 , 64.3542876398 , 0.0174984915094 ])
inputStds = array([ 24.220588125 , 45.5887552493 , 0.131119389505 ])
inputbanword = ((array(inputbanword)-inputMeans)/inputStds).tolist()
inputs.extend(inputbanword)
inputengword = []
EWE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputengword, True)
inputMeans = array([ 44.2994163835 , 68.7957830052 , 0.01821566173 ])
inputStds = array([ 24.4149708067 , 70.159852713 , 0.133730517825 ])
inputengword = ((array(inputengword)-inputMeans)/inputStds).tolist()
inputs.extend(inputengword)
# inputenglish = []
# EE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputenglish, True)
# inputMeans = array([ 21.5401437051 , 19.095532646 , 0.0197438300531 ])
# inputStds = array([ 15.2712299058 , 14.35175744 , 0.139118694746 ])
# inputenglish = ((array(inputenglish)-inputMeans)/inputStds).tolist()
# inputs.extend(inputenglish)
# inputhindi = []
# HE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputhindi, True)
# inputMeans = array([ 116.181545791 , 117.589252273 , 0.0311165710348 ])
# inputStds = array([ 95.3247873525 , 86.246804645 , 0.173632744728 ])
# inputhindi = ((array(inputhindi)-inputMeans)/inputStds).tolist()
# inputs.extend(inputhindi)
# inputbangla = []
# BE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputbangla, True)
# inputMeans = array([ 26.1452919339 , 38.2040724491 , 0.0170435369558 ])
# inputStds = array([ 19.3466051312 , 23.8909551492 , 0.129433592254 ])
# inputbangla = ((array(inputbangla)-inputMeans)/inputStds).tolist()
# inputs.extend(inputbangla)
# inputtamil = []
# TE.main(function, labels, seqDims, seqLengths, targetStrings, wordTargetStrings, seqTags, inputtamil, True)
# inputMeans = array([ 57.8497793792 , 78.1069514634 , 0.00850420629953 ])
# inputStds = array([ 32.9270365136 , 59.0435324226 , 0.0918252948525 ])
# inputtamil = ((array(inputtamil)-inputMeans)/inputStds).tolist()
# inputs.extend(inputtamil)
# print inputs
# print len(labels), labels
# print labels
#create a new .nc file
ncFilename = "combine" + function + ".nc"
file = netcdf_helpers.NetCDFFile(ncFilename, 'w')
#create the dimensions
netcdf_helpers.createNcDim(file,'numSeqs',len(seqLengths))
netcdf_helpers.createNcDim(file,'numTimesteps',len(inputs))
netcdf_helpers.createNcDim(file,'inputPattSize',len(inputs[0]))
netcdf_helpers.createNcDim(file,'numDims',1)
netcdf_helpers.createNcDim(file,'numLabels',len(labels))
#create the variables
netcdf_helpers.createNcStrings(file,'seqTags',seqTags,('numSeqs','maxSeqTagLength'),'sequence tags')
netcdf_helpers.createNcStrings(file,'labels',labels,('numLabels','maxLabelLength'),'labels')
netcdf_helpers.createNcStrings(file,'targetStrings',targetStrings,('numSeqs','maxTargStringLength'),'target strings')
netcdf_helpers.createNcStrings(file,'wordTargetStrings',wordTargetStrings,('numSeqs','maxWordTargStringLength'),'word target strings')
netcdf_helpers.createNcVar(file,'seqLengths',seqLengths,'i',('numSeqs',),'sequence lengths')
netcdf_helpers.createNcVar(file,'seqDims',seqDims,'i',('numSeqs','numDims'),'sequence dimensions')
netcdf_helpers.createNcVar(file,'inputs',inputs,'f',('numTimesteps','inputPattSize'),'input patterns')
#write the data to disk
print "closing file", ncFilename
file.close()
| 2.296875 | 2 |
apps/carts/migrations/0003_cart_products.py | feliamunda/Django_Store | 0 | 12764588 | # Generated by Django 3.0.3 on 2020-02-07 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
('carts', '0002_cart_user'),
]
operations = [
migrations.AddField(
model_name='cart',
name='products',
field=models.ManyToManyField(through='carts.CartProducts', to='products.Product'),
),
]
| 1.8125 | 2 |
person/models.py | rgrlinux/nss | 0 | 12764589 | from django.db import models
from django.db.models import base
from django.db.models.deletion import CASCADE
from django.db.models.expressions import F
from localflavor.br.models import BRCPFField
from localflavor.br.validators import BRCPFValidator
class PersonType(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class PersonMediaType(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Person(models.Model):
id = models.AutoField(primary_key=True, editable=False)
name = models.CharField(max_length=32, blank=False, null=False)
type = models.ForeignKey(PersonType, on_delete=models.CASCADE, related_name='type', blank=False, null=False)
cpf = BRCPFField()
phone = models.CharField(max_length=15, null=True, blank=True)
company = models.CharField(max_length=32, null=False, blank=False)
last_update = models.DateField(auto_now=True, null=False, blank=False)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class PersonMedia(models.Model):
id = models.AutoField(primary_key=True, editable=False)
person_id = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='person', null=True, blank=True)
object_media = models.TextField(null=False, blank=False)
class PersonAudit(models.Model):
id = models.AutoField(primary_key=True, editable=False)
person_id = models.ForeignKey(Person,on_delete=models.CASCADE, null=False, blank=False, editable=False)
cpf_new = models.CharField(max_length=14, null=False, blank=False, editable=False)
cpf_old = models.CharField(max_length=14, null=True, blank=False, editable=False)
last_update = models.DateField(auto_now=True, null=False, blank=False, editable=False) | 2.109375 | 2 |
bindings/python/test/time/test_time.py | robinpdm/open-space-toolkit-physics | 7 | 12764590 | <filename>bindings/python/test/time/test_time.py<gh_stars>1-10
################################################################################################################################################################
# @project Open Space Toolkit ▸ Physics
# @file bindings/python/test/time/test_time.py
# @author <NAME> <<EMAIL>>
# @license Apache License 2.0
################################################################################################################################################################
import pytest
from ostk.core.types import String
from ostk.physics.time import Time
################################################################################################################################################################
def test_time_constructors ():
assert Time(0, 0, 0) is not None
################################################################################################################################################################
def test_time_undefined ():
assert Time.undefined() is not None
################################################################################################################################################################
def test_time_midnight ():
assert Time.midnight() is not None
################################################################################################################################################################
def test_time_noon ():
assert Time.noon() is not None
################################################################################################################################################################
def test_time_parse ():
## Using python strings
# Testing with default format argument (Time::Format::Undefined)
time: Time = Time.parse('00:00:00')
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
# Testing with Time.Format.Standard
time: Time = Time.parse('00:00:00', Time.Format.Standard)
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
# Testing with Time.Format.ISO8601
time: Time = Time.parse('00:00:00', Time.Format.ISO8601)
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
## Using String class
# Testing with default format argument (Time::Format::Undefined)
time: Time = Time.parse(String('00:00:00'))
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
# Testing with Time.Format.Standard
time: Time = Time.parse(String('00:00:00'), Time.Format.Standard)
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
# Testing with Time.Format.ISO8601
time: Time = Time.parse(String('00:00:00'), Time.Format.ISO8601)
assert time is not None
assert isinstance(time, Time)
assert time.is_defined()
################################################################################################################################################################
def test_time_operators ():
time = Time(0, 0, 0)
assert (time == time) is not None
assert (time != time) is not None
################################################################################################################################################################
def test_time_is_defined ():
time = Time(0, 0, 0)
assert time.is_defined() is not None
################################################################################################################################################################
def test_time_get_hour ():
time = Time(0, 0, 0)
assert time.get_hour() is not None
################################################################################################################################################################
def test_time_get_minute ():
time = Time(0, 0, 0)
assert time.get_minute() is not None
################################################################################################################################################################
def test_time_get_second ():
time = Time(0, 0, 0)
assert time.get_second() is not None
################################################################################################################################################################
def test_time_get_millisecond ():
time = Time(0, 0, 0)
assert time.get_millisecond() is not None
################################################################################################################################################################
def test_time_get_microsecond ():
time = Time(0, 0, 0)
assert time.get_microsecond() is not None
################################################################################################################################################################
def test_time_get_nanosecond ():
time = Time(0, 0, 0)
assert time.get_nanosecond() is not None
################################################################################################################################################################
def test_time_get_floating_seconds ():
time = Time(0, 0, 0)
assert time.get_floating_seconds() is not None
################################################################################################################################################################
def test_time_to_string ():
time = Time(0, 0, 0)
assert time.to_string() is not None
assert time.to_string(Time.Format.Standard) is not None
assert time.to_string(Time.Format.ISO8601) is not None
################################################################################################################################################################
def test_time_set_hour ():
time = Time(0, 0, 0)
time.set_hour(1)
################################################################################################################################################################
def test_time_set_minute ():
time = Time(0, 0, 0)
time.set_minute(1)
################################################################################################################################################################
def test_time_set_second ():
time = Time(0, 0, 0)
time.set_second(1)
################################################################################################################################################################
def test_time_set_millisecond ():
time = Time(0, 0, 0)
time.set_millisecond(1)
################################################################################################################################################################
def test_time_set_microsecond ():
time = Time(0, 0, 0)
time.set_microsecond(1)
################################################################################################################################################################
def test_time_set_nanosecond ():
time = Time(0, 0, 0)
time.set_nanosecond(1)
################################################################################################################################################################
| 1.734375 | 2 |
messenger.py | mtahmed/antnest | 1 | 12764591 | # Standard imports
import collections
import json
import select
import socket
import threading
import zmq
# Custom imports
import job
import message
import taskunit
import utils.logger
class Messenger:
'''A class representing a messenger that handles all communication.
'''
def __init__(self):
# identity <--> address maps.
self.identity_to_address = {}
self.address_to_identity = {}
# Both inbound_queue and outbound_queue contain tuples of
# (address, message) that are received or need to be sent out.
self.inbound_queue = collections.deque()
self.outbound_queue = collections.deque()
self.outbound_queue_sem = threading.Semaphore(value=0)
self.inbound_queue_sem = threading.Semaphore(value=0)
# This dict is used to keep track of MessageTracker objects which can
# be used to track message status.
self.trackers = {}
self.logger = utils.logger.Logger('MESSENGER')
return
def start(self):
'''Start the messenger.
'''
pass
def get_host_by_name(self, name):
'''Return the address for the hostname.
'''
return self.identity_to_address[name]
def register_destination(self, name, address):
'''
Store the hostname as key with address as value for this destination
so that the caller can later only supply destination as hostname
to communicate with the destination.
'''
self.identity_to_address[name] = address
self.address_to_identity[address] = name
return
def send(self, msg, address):
'''Send the msg to the address.
'''
self.outbound_queue.append((address, msg))
self.outbound_queue_sem.release()
return
def receive(self, deserialize=True):
'''Yield the next message from the inbound_queue.
:param deserialize: If True, the message payload is deserialized
and generated instead of the Message object itself.
'''
while self.inbound_queue_sem.acquire():
msg = self.inbound_queue.popleft()
if not deserialize:
yield msg
continue
msg_type = msg.msg_type
decoded_msg = msg.msg_payload.decode('UTF-8')
if msg_type == message.Message.MSG_STATUS:
yield int(decoded_msg)
elif msg_type == message.Message.MSG_TASKUNIT:
yield taskunit.TaskUnit.deserialize(decoded_msg)
elif msg_type == message.Message.MSG_TASKUNIT_RESULT:
yield taskunit.TaskUnit.deserialize(decoded_msg)
elif msg_type == message.Message.MSG_JOB:
yield job.Job.deserialize(decoded_msg)
def queue_for_sending(self, messages, address):
'''Add messages to the outbound queue for sending.
NOTE: This method takes a list of messages and not a single message.
'''
for message in messages:
self.outbound_queue.append((address, message))
self.outbound_queue_sem.release()
return
def delete_tracker(self, tracker):
'''
The tracker for msg_id is no longer needed. Delete it.
'''
msg_id = tracker.msg_id
del self.trackers[msg_id]
return
def sender(self):
'''Send messages out through the sender socket. Forever.
'''
pass
def receiver(self):
'''Receive messages on the receiver socket. Forever.
'''
pass
class UDPMessenger(Messenger):
'''A Messenger that uses UDP sockets for communication.
This messenger implements custom fragmentation, ack etc.
'''
# Constants
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = 33310
def __init__(self, ip=DEFAULT_IP, port=DEFAULT_PORT):
super().__init__()
self.ip = ip
self.port = port
# Fragments map for inbound messages.
self.fragments_map = {}
return
def start(self):
'''Start the messenger.
'''
# Create the sockets.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(('0.0.0.0', self.port))
# Create and start the receiver and sender threads now.
receiver_thread = threading.Thread(target=self.receiver,
name='receiver_thread')
sender_thread = threading.Thread(target=self.sender,
name='sender_thread')
receiver_thread.start()
sender_thread.start()
return
def send_status(self, status, address, track=False):
'''
Send a status update to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
# Trivially serializeable.
serialized_status = str(status)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_STATUS,
serialized_status,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_ack(self, msg, address, track=False):
'''
Send an ack for msg to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
msg_id = msg.msg_id
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_ACK,
msg_id,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_job(self, job, address, track=False):
'''
Send a job to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
serialized_job = job.serialize(json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_JOB,
serialized_job,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_taskunit(self, tu, address, track=False,
attrs=['id', 'job_id', 'data', 'retries', 'state',
'result']):
'''
Send a taskunit to a remote node.
If track is True, then this method returns a MessageTracker object
which can be used to check the state of the message sending.
'''
serialized_taskunit = tu.serialize(include_attrs=attrs,
json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_TASKUNIT,
serialized_taskunit,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def send_taskunit_result(self, tu, address, track=False,
attrs=['id', 'job_id', 'state', 'result']):
'''
Send the result of running taskunit.
'''
serialized_result = tu.serialize(include_attrs=attrs, json_encode=True)
msg_id, messages = message.Message.packed_fragments(
message.Message.MSG_TASKUNIT_RESULT,
serialized_result,
address)
tracker = message.MessageTracker(msg_id, isinuse=track)
self.trackers[msg_id] = tracker
self.queue_for_sending(messages, address)
if track:
return tracker
def sender(self):
'''Send messages out through the sender socket. Forever.
'''
poller = select.epoll()
poller.register(self.socket.fileno(),
select.EPOLLOUT | select.EPOLLET) # Edge-triggered.
self.logger.log("Sender up!")
while True:
self.outbound_queue_sem.acquire()
address, msg = self.outbound_queue.popleft()
self.logger.log("Sending message to %s:%d" % address)
# While the msg is still not sent...
while msg is not None:
# Poll with timeout of 1.0 seconds.
poll_responses = poller.poll(1.0)
for _, event in poll_responses:
# If we can send...
if event & select.EPOLLOUT:
bytes_sent = self.socket.sendto(msg, address)
if bytes_sent == 0:
raise Exception("Couldn't send out the message.")
# If we have a tracker for this msg, then we need to
# mark it as sent if this is the last frag for the msg
# being sent out.
try:
msg_object = message.Message(packed_msg=msg)
if msg_object.is_last_frag():
tracker = self.trackers[msg_object.msg_id]
tracker.set_state(
message.MessageTracker.MSG_SENT)
except KeyError:
pass
msg = None
break
else:
self.logger.log("Unexpected event on sender socket.")
def handle_received_msg(self, msg, address):
'''Handle received message.
'''
fragments_map = self.fragments_map
msg = message.Message(packed_msg=msg)
try:
fragments_map[msg.msg_id]
except KeyError:
fragments_map[msg.msg_id] = []
if not msg.is_last_frag():
fragments_map[msg.msg_id].append(msg)
else:
msg_frag_id = msg.msg_meta1
total_frags = msg_frag_id + 1
current_frags = len(fragments_map[msg.msg_id])
fragments_map[msg.msg_id].extend(
[None] * (total_frags - current_frags))
fragments_map[msg.msg_id][-1] = msg
# If all the frags for this message have already been received.
if None not in fragments_map[msg.msg_id]:
if fragments_map[msg.msg_id][-1].is_last_frag():
msg = message.Message.glue_fragments(fragments_map[msg.msg_id])
# If it is an ack message, then we don't need to put it on the
# inbound_queue.
msg_id = msg.msg_id
# If this message is an ack, then update the tracker.
if msg.msg_type == message.Message.MSG_ACK:
MSG_ACKED = message.MessageTracker.MSG_ACKED
acked_msg_id = msg.msg_payload
tracker = self.trackers[acked_msg_id]
tracker.set_state(MSG_ACKED)
# If the tracker is not being used, delete it.
if not tracker.isinuse:
self.delete_tracker(tracker)
return
self.inbound_queue.append((address, msg))
self.inbound_queue_sem.release()
# Send an ack now that we have received the msg.
self.send_ack(msg, address)
del fragments_map[msg_id]
return
def receiver(self):
'''Receive messages on the receiver socket. Forever.
'''
poller = select.epoll()
poller.register(self.socket.fileno(),
select.EPOLLIN | select.EPOLLET) # Edge-triggered.
self.logger.log("Receiver up!")
while True:
poll_responses = poller.poll()
for fileno, event in poll_responses:
if not event & select.EPOLLIN:
self.logger.log(
"Unexpected event on receiver socket.")
continue
data, address = self.socket.recvfrom(message.Message.MSG_SIZE)
self.logger.log("Received message from %s:%d" % address)
self.handle_received_msg(data, address)
class ZMQMessenger(Messenger):
# Constants
DEFAULT_PORT = 33310
NUM_TRIES = 3
# Messenger types
TYPE_SERVER = 0 # Listener socket. Accepts connections.
TYPE_CLIENT = 1 # Client socket. Connects to server.
VALID_TYPES = [TYPE_SERVER, TYPE_CLIENT]
def __init__(self, type, ip=None, port=DEFAULT_PORT):
'''
:param type: The type of Messenger. Can be SERVER or CLIENT messenger.
:param ip: The ip of the interface the socket should use.
:param port: The port the socket should use.
'''
super().__init__()
self.type = type
self.ip = ip
self.port = port
self.context = zmq.Context()
return
def start(self):
if self.ip:
public_ip = ip
else:
public_ip = self.get_public_ip()
identity = 'tcp://%s:%d' % (public_ip, self.port)
bind_addr = 'tcp://*:%d' % self.port
self.socket = self.context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.IDENTITY, bytes(identity, 'UTF-8'))
if self.type == self.TYPE_SERVER:
self.socket.bind(bind_addr)
return
def connect(self, address):
'''Connect to address and PING NUM_TRIES times till PONG received.
Raises ConnectionError if failed to connect after NUM_TRIES tries. None
otherwise.
'''
self.socket.connect('tcp://%s:%d' % address)
for _ in range(self.NUM_TRIES):
self.ping(address)
try:
msg_address, msg = next(self.receive(block=False, timeout=0.2))
if msg_address == address and msg == 'PONG':
return
except:
pass
else:
raise ConnectionError("Failed to connect.")
def ping(self, address):
self.send(json.dumps('PING'), address)
return
def pong(self, address):
self.send(json.dumps('PONG'), address)
return
def receive(self, deserialize=False, block=True, timeout=0):
while True:
flags = 0 if block else zmq.NOBLOCK
if timeout > 0.0:
if self.socket.poll(timeout=timeout*1000) == 0:
raise TimeoutError()
address = self.socket.recv_string(flags=flags)
assert self.socket.recv() == b"" # Empty delimiter
msg = self.socket.recv_json()
# FIXME(mtahmed): This would probably fail for IPV6.
address = address.split(':')[1:]
address[0] = address[0][2:]
address[1] = int(address[1])
address = tuple(address)
# FIXME(mtahmed): The PING-PONG should be taken care of in Messenger.
if not deserialize:
yield (address, msg)
continue
# FIXME
msg_type = msg.msg_type
decoded_msg = msg.msg_payload.decode('UTF-8')
if msg_type == message.Message.MSG_STATUS:
yield (address, int(decoded_msg))
elif msg_type == message.Message.MSG_TASKUNIT:
yield (address, taskunit.TaskUnit.deserialize(decoded_msg))
elif msg_type == message.Message.MSG_TASKUNIT_RESULT:
yield (address, taskunit.TaskUnit.deserialize(decoded_msg))
elif msg_type == message.Message.MSG_JOB:
yield (address, job.Job.deserialize(decoded_msg))
def send(self, msg, address):
address = 'tcp://%s:%d' % address
self.socket.send_string(address, zmq.SNDMORE)
self.socket.send_string("", zmq.SNDMORE)
self.socket.send_string(msg)
return
def send_job(self, job, address):
'''Send a job to a remote node.
'''
serialized_job = job.serialize(json_encode=True)
self.send(serialized_job, address)
return
def send_taskunit(self, tu, address,
attrs=['id', 'job_id', 'data', 'retries', 'state',
'result']):
'''Send a taskunit to a remote node.
'''
serialized_taskunit = tu.serialize(include_attrs=attrs,
json_encode=True)
self.send(serialized_taskunit, address)
return
def send_taskunit_result(self, tu, address,
attrs=['id', 'job_id', 'state', 'result']):
'''Send the result of running taskunit.
'''
serialized_result = tu.serialize(include_attrs=attrs, json_encode=True)
self.send(serialized_result, address)
return
@staticmethod
def get_public_ip():
'''Get the ip address of the external interface.
This tries to connect to some public service to try to see what
interface the socket binds to and uses that interface's address.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
google_addr = socket.gethostbyname('www.google.com')
s.connect((google_addr, 80))
addr = s.getsockname()[0]
s.close()
return addr
| 2.515625 | 3 |
video_info.py | ondrasouk/encoders-comparison-tool | 1 | 12764592 | <filename>video_info.py
import subprocess
binaries = {}
# Videofiles properties. Key is video file path.
videofiles_frame_num = {}
videofiles_duration = {}
videofiles_framerate = {}
videofiles_resolution = {}
videofiles_pix_fmt = {}
pix_fmts_bpp = {}
def set_defaults(binaries_ent):
global binaries
binaries = {**binaries, **binaries_ent}
# Functions for getting the video info.
def video_length_seconds(videofile_path, binaries_ent=None):
""" Get length of video in seconds.
Args:
binaries_ent: Dictionary with binaries and their path.
videofile_path: Path to video file.
Returns: Length of video in seconds.
"""
if binaries_ent is None:
global binaries
ffprobepath = binaries["ffprobe"]
elif type(binaries_ent) == str:
ffprobepath = binaries_ent
elif type(binaries_ent) == dict:
ffprobepath = binaries_ent["ffprobe"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
global videofiles_duration
try:
duration = videofiles_duration[videofile_path]
return duration
except KeyError:
result = subprocess.run(
[
ffprobepath,
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
videofile_path,
],
capture_output=True,
text=True,
)
try:
videofiles_duration[videofile_path] = float(result.stdout)
return float(result.stdout)
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_framerate_str(videofile_path, binaries_ent=None):
""" Get framerate of video as string.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffprobe.
videofile_path: Path to video file.
Returns: Framerate of video as string of "numerator/denominator".
"""
if binaries_ent is None:
global binaries
ffprobepath = binaries["ffprobe"]
elif type(binaries_ent) == str:
ffprobepath = binaries_ent
elif type(binaries_ent) == dict:
ffprobepath = binaries_ent["ffprobe"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
global videofiles_framerate
try:
framerate_str = videofiles_framerate[videofile_path]
return framerate_str
except KeyError:
result = subprocess.run(
[
ffprobepath,
"-v",
"error",
"-show_entries",
"stream=r_frame_rate",
"-of",
"default=noprint_wrappers=1:nokey=1",
videofile_path,
],
capture_output=True,
text=True,
)
try:
framerate_str = str(result.stdout.split("\n")[0])
videofiles_framerate[videofile_path] = framerate_str
return framerate_str
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_framerate(videofile_path, binaries_ent=None):
""" Get framerate of video.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffprobe.
videofile_path: Path to video file.
Returns: Framerate of video as number.
"""
global videofiles_framerate
try:
framerate_str = videofiles_framerate[videofile_path]
framerate = int(framerate_str.split("/")[0]) / int(
framerate_str.split("/")[1])
return framerate
except KeyError:
framerate_str = video_framerate_str(videofile_path, binaries_ent=binaries_ent)
framerate = int(framerate_str.split("/")[0]) / int(
framerate_str.split("/")[1])
videofiles_framerate[videofile_path] = framerate_str
return framerate
def video_frames(videofile_path, binaries_ent=None):
""" Calculate number of frames of video.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffprobe.
videofile_path: Path to video file.
Returns: Number of frames of video.
"""
return int(
video_framerate(videofile_path, binaries_ent) *
video_length_seconds(videofile_path, binaries_ent))
def video_stream_size(videofile_path, binaries_ent=None):
""" Get size of video in KB.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffmpeg.
videofile_path: Path to video file.
Returns: Size of stream in KB.
"""
if binaries_ent is None:
global binaries
ffmpegpath = binaries["ffprobe"]
elif type(binaries_ent) == str:
ffmpegpath = binaries_ent
elif type(binaries_ent) == dict:
ffmpegpath = binaries_ent["ffmpeg"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
result = subprocess.run(
[
ffmpegpath,
"-hide_banner",
"-i", videofile_path,
"-map", "0:v:0",
"-c", "copy",
"-f", "null", "-"
],
capture_output=True,
text=True,
)
try:
size = (result.stderr.rsplit("\n")[-2].rsplit(" ")[0].rsplit(":")[1][0: -2])
return float(size)
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_dimensions(videofile_path, binaries_ent=None):
""" Get dimensions of video in pixels.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffprobe.
videofile_path: Path to video file.
Returns: Dimensions of video in string. e.g. "1920x1080"
"""
if binaries_ent is None:
global binaries
ffprobepath = binaries["ffprobe"]
elif type(binaries_ent) == str:
ffprobepath = binaries_ent
elif type(binaries_ent) == dict:
ffprobepath = binaries_ent["ffprobe"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
global videofiles_resolution
try:
resolution = videofiles_resolution[videofile_path]
return resolution
except KeyError:
result = subprocess.run(
[
ffprobepath,
"-v",
"error",
"-select_streams",
"v:0",
"-show_entries",
"stream=width,height",
"-of",
"csv=s=x:p=0",
videofile_path,
],
capture_output=True,
text=True,
)
try:
resolution_str = str(result.stdout.split("\n")[0])
resolution = [int(resolution_str.split("x")[0]), int(resolution_str.split("x")[1])]
videofiles_resolution[videofile_path] = resolution_str
return resolution_str
except ValueError:
raise ValueError(result.stderr.rstrip("\n"))
def video_pix_fmt(videofile_path, binaries_ent=None):
""" Get pix_fmt of video in ffmpeg's format.
Args:
binaries_ent: Dictionary with binaries and their path or string with path
to ffprobe.
videofile_path: Path to video file.
Returns: String with ffmpeg pix_fmt format. (eg. "yuv420p10le")
"""
if binaries_ent is None:
global binaries
ffprobepath = binaries["ffprobe"]
elif type(binaries_ent) == str:
ffprobepath = binaries_ent
elif type(binaries_ent) == dict:
ffprobepath = binaries_ent["ffprobe"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
global videofiles_pix_fmt
try:
pix_fmt = videofiles_pix_fmt[videofile_path]
return pix_fmt
except KeyError:
result = subprocess.run(
[
ffprobepath,
"-v",
"error",
"-select_streams",
"v:0",
"-show_entries",
"stream=pix_fmt",
"-of",
"default=noprint_wrappers=1:nokey=1",
videofile_path,
],
capture_output=True,
text=True,
)
return str(result.stdout.split("\n")[0])
def video_get_info_for_yuv(videofile_path, binaries_ent=None):
return (video_framerate(videofile_path, binaries_ent),
video_dimensions(videofile_path, binaries_ent),
video_pix_fmt(videofile_path, binaries_ent))
def pix_fmt_bpp(pix_fmt, binaries_ent=None):
if binaries_ent is None:
global binaries
ffmpegpath = binaries["ffmpeg"]
elif type(binaries_ent) == str:
ffmpegpath = binaries_ent
elif type(binaries_ent) == dict:
ffmpegpath = binaries_ent["ffmpeg"]
else:
raise TypeError(
"Passed binary can only be in format string or dictionary")
result = subprocess.run(
[
ffmpegpath,
"-pix_fmts",
"-hide_banner",
],
capture_output=True,
text=True,
)
line = [s for s in result.stdout.split("\n") if pix_fmt+" " in s][0]
if line[2] == "H":
return -1
return int(line[-4:].lstrip())
def calculate_size_raw(num_frames, video_dimensions, pix_fmt, binaries_ent=None):
return num_frames*int(video_dimensions.split("x")[0])*int(video_dimensions.split("x")[1])*pix_fmt_bpp(pix_fmt, binaries_ent)
| 2.671875 | 3 |
back/slidingpuzzle.py | monoklabs/Econ-Puzzle-Experiment | 2 | 12764593 | # -*- coding: utf-8 -*-
"""
A* sliding puzzle solver
Solver script for the fifteen-puzzle and derivations of it (8-puzzle etc.),
based on what we've learnt on ai-class.com so far. Don't expect it to run very
fast, certain puzzle states take ages to solve. I have documented and
commented the code thoroughly, so hopefully it's easy to understand what's
going on.
Written by <NAME>. Released into the public domain.
Example usage:
>>> from slidingpuzzle import Board
>>> b = Board(3, "1,8,7,3,0,5,4,6,2")
>>> print b
1 8 7
3 5
4 6 2
>>> b.get_solution()
Solution found!
Moves: 22
Nodes visited: 601
Time: 0.856076
All moves: (1, 0), (2, 0), ..., (2, 2)
"""
import copy
import math
import time
import bisect
import random
import itertools
class Board:
"""
Contains the state of a sliding puzzle board, as well as some methods for
manipulating it.
"""
def __init__(self, size=4, text=None):
"""
Initialize a new Board object.
Keyword arguments:
size -- the width/height of the board to create (default: 4)
text -- string representation of the board; a comma-separated
string of numbers where 0 represents the empty tile
(optional; if left out a board at the goal state will be
generated)
"""
if size < 2: raise ValueError("Board has to be at least 2 by 2 tiles large")
self._size = size
size_sq = size * size
if text != None:
values = [int(n) for n in text.split(",")]
# make sure we have valid input
if sorted(values) != range(size_sq):
raise ValueError("Invalid tile values supplied")
else:
# we are not given a string input, create a plain board
values = range(1, size_sq) + [0]
# list comprehension voodoo to put the values into a nested list
self._tiles = [[n if n > 0 else None for n in values[y * size:(y + 1) * size]] for y in range(size)]
# store the location of the empty tile
self._empty = values.index(0) % size, values.index(0) / size
# store the goal location of each tile
self.goals = {}
for x in range(size_sq):
self.goals[x + 1] = x % size, x / size
self.goals[None] = self.goals[x + 1]
def get_solution(self):
"""
Solve a sliding puzzle board. Note that this only prints the actual moves,
it does not change the board to its solved state.
"""
start_time = time.clock()
frontier = [Node(self, None, 0, None)]
explored = []
visited = 0
while True:
visited += 1
# pop the lowest value from the frontier (sorted using bisect, so pop(0) is the lowest)
node = frontier.pop(0)
# if the current node is at the goal state, we're done!
if node.board.h() == 0:
# recursively compile a list of all the moves
moves = []
while node.parent:
moves.append(node.action)
node = node.parent
moves.reverse()
print "Solution found!"
print "Moves:", len(moves)
print "Nodes visited:", visited
print "Time:", time.clock() - start_time
print "All moves:", ", ".join(str(move) for move in moves)
break
else:
# we're not done yet:
# expand the node, and add the new nodes to the frontier, as long
# as they're not in the frontier or explored list already
for new_node in node.expand():
if new_node not in frontier and new_node not in explored:
# use bisect to insert the node at the proper place in the frontier
bisect.insort(frontier, new_node)
explored.append(node)
def h(self):
"""
The heuristic function for A*. Currently implemented as the sum of
the Manhattan distance between each tile and it's goal position.
"""
h = 0
for y, row in enumerate(self._tiles):
for x, tile in enumerate(row):
h += math.fabs(x - self.goals[tile][0]) + \
math.fabs(y - self.goals[tile][1])
return h
def apply_action(self, action):
"""
Apply an action (a move) to the board.
Arguments:
action -- a 2-tuple containing the x,y coordinate of the tile to move
Raises a ValueError on invalid moves.
"""
x, y = action
e_x, e_y = self._empty
# check that the tile to move and the empty tile are neighbors
if (math.fabs(x - e_x) == 1) ^ (math.fabs(y - e_y) == 1):
# swap them
self._tiles[y][x], self._tiles[e_y][e_x] = None, self._tiles[y][x]
self._empty = x, y # empty tile has moved; store new location
else:
raise ValueError("Invalid move")
def actions(self):
"""Return a list of possible actions to perform on the board."""
x, y = self._empty
actions = []
if x > 0: actions.append((x - 1, y))
if y > 0: actions.append((x, y - 1))
if x < self._size - 1: actions.append((x + 1, y))
if y < self._size - 1: actions.append((x, y + 1))
return actions
def randomize(self, moves=1000):
"""
Randomize the board.
Arguments:
moves -- the amound of random moves to perform (default: 1000)
"""
for _ in range(moves): self.apply_action(random.choice(self.actions()))
def __str__(self):
grid = "\n".join([" ".join(["{:>2}"] * self._size)] * self._size)
values = itertools.chain(*self._tiles)
return grid.format(*values).replace("None", " ")
class Node:
"""
Represents a node in the A* search algorithm graph.
"""
def __init__(self, board, action, cost, parent):
"""
Initialize a new Node object.
Arguments:
board -- the board state at this node (Board object)
action -- the action that took us here from the previous node
cost -- the total cost of the path from the initial node to this
node (the "g" component of the A* algorithm)
parent -- the previous Node object
"""
self.board = board
self.action = action
self.cost = cost
self.parent = parent
self.estimate = cost + board.h() # A* "f" function
def expand(self):
"""Return a list possible nodes to move to from this node."""
nodes = []
for action in self.board.actions():
# copy the current board
board = copy.deepcopy(self.board)
board.apply_action(action)
nodes.append(Node(board, action, self.cost + 1, self))
return nodes
def __eq__(self, rhs):
# when checking nodes for equality, compare their boards instead
# thus, when checking if a node is in the frontier/explored list, check
# for the board configuration instead
if isinstance(rhs, Node):
return self.board._tiles == rhs.board._tiles
else:
return rhs == self
def __lt__(self, rhs):
# when comparing nodes (sorting), compare their estimates (so they are sorted by estimates)
return self.estimate < rhs.estimate
| 3.796875 | 4 |
python/fds.protobuf.stach.v2/fds/protobuf/stach/v2/table/TableData_pb2.py | saigiridhar21/stachschema-sdks | 0 | 12764594 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fds/protobuf/stach/v2/table/TableData.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from fds.protobuf.stach.v2.table import ColumnData_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2
from fds.protobuf.stach.v2.table import MetadataCollection_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2
from fds.protobuf.stach.v2.table import RowDefinition_pb2 as fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='fds/protobuf/stach/v2/table/TableData.proto',
package='factset.protobuf.stach.v2.table',
syntax='proto3',
serialized_options=b'\n#com.factset.protobuf.stach.v2.tableB\016TableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\252\002\037FactSet.Protobuf.Stach.V2.Table',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n+fds/protobuf/stach/v2/table/TableData.proto\x12\x1f\x66\x61\x63tset.protobuf.stach.v2.table\x1a,fds/protobuf/stach/v2/table/ColumnData.proto\x1a\x34\x66\x64s/protobuf/stach/v2/table/MetadataCollection.proto\x1a/fds/protobuf/stach/v2/table/RowDefinition.proto\"\xb7\x02\n\tTableData\x12<\n\x04rows\x18\x01 \x03(\x0b\x32..factset.protobuf.stach.v2.table.RowDefinition\x12H\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x37.factset.protobuf.stach.v2.table.TableData.ColumnsEntry\x12\x45\n\x08metadata\x18\x03 \x01(\x0b\x32\x33.factset.protobuf.stach.v2.table.MetadataCollection\x1a[\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12:\n\x05value\x18\x02 \x01(\x0b\x32+.factset.protobuf.stach.v2.table.ColumnData:\x02\x38\x01\x42\x9b\x01\n#com.factset.protobuf.stach.v2.tableB\x0eTableDataProtoZBgithub.com/factset/stachschema-sdks/go/fds/protobuf/stach/v2/table\xaa\x02\x1f\x46\x61\x63tSet.Protobuf.Stach.V2.Tableb\x06proto3'
,
dependencies=[fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2.DESCRIPTOR,fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2.DESCRIPTOR,fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2.DESCRIPTOR,])
_TABLEDATA_COLUMNSENTRY = _descriptor.Descriptor(
name='ColumnsEntry',
full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='factset.protobuf.stach.v2.table.TableData.ColumnsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=450,
serialized_end=541,
)
_TABLEDATA = _descriptor.Descriptor(
name='TableData',
full_name='factset.protobuf.stach.v2.table.TableData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='rows', full_name='factset.protobuf.stach.v2.table.TableData.rows', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='columns', full_name='factset.protobuf.stach.v2.table.TableData.columns', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='factset.protobuf.stach.v2.table.TableData.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TABLEDATA_COLUMNSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=230,
serialized_end=541,
)
_TABLEDATA_COLUMNSENTRY.fields_by_name['value'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_ColumnData__pb2._COLUMNDATA
_TABLEDATA_COLUMNSENTRY.containing_type = _TABLEDATA
_TABLEDATA.fields_by_name['rows'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_RowDefinition__pb2._ROWDEFINITION
_TABLEDATA.fields_by_name['columns'].message_type = _TABLEDATA_COLUMNSENTRY
_TABLEDATA.fields_by_name['metadata'].message_type = fds_dot_protobuf_dot_stach_dot_v2_dot_table_dot_MetadataCollection__pb2._METADATACOLLECTION
DESCRIPTOR.message_types_by_name['TableData'] = _TABLEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TableData = _reflection.GeneratedProtocolMessageType('TableData', (_message.Message,), {
'ColumnsEntry' : _reflection.GeneratedProtocolMessageType('ColumnsEntry', (_message.Message,), {
'DESCRIPTOR' : _TABLEDATA_COLUMNSENTRY,
'__module__' : 'fds.protobuf.stach.v2.table.TableData_pb2'
# @@protoc_insertion_point(class_scope:factset.protobuf.stach.v2.table.TableData.ColumnsEntry)
})
,
'DESCRIPTOR' : _TABLEDATA,
'__module__' : 'fds.protobuf.stach.v2.table.TableData_pb2'
# @@protoc_insertion_point(class_scope:factset.protobuf.stach.v2.table.TableData)
})
_sym_db.RegisterMessage(TableData)
_sym_db.RegisterMessage(TableData.ColumnsEntry)
DESCRIPTOR._options = None
_TABLEDATA_COLUMNSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 1.210938 | 1 |
config_writer/__init__.py | glennib/py-config | 0 | 12764595 | <reponame>glennib/py-config<filename>config_writer/__init__.py
from .config import ConfigWriter
| 1.320313 | 1 |
app/models.py | Diogosantosecastro/bo_sales | 0 | 12764596 | <filename>app/models.py
import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request, url_for
from flask_login import UserMixin, AnonymousUserMixin
from . import db, login_manager
from sqlalchemy import desc
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
class User(UserMixin, db.Model):
""" Nesta classe estam defenidos os utilizadores do BO"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
id_hubspot=db.Column(db.Integer, unique=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) # nao esta em uso
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""esta funcao recebe o objecto user(que está na base de dados) e compara a pass"""
return check_password_hash(self.password_hash, password)
@property
def pin(self):
raise AttributeError('password is not a readable attribute')
@pin.setter
def pin(self, pin):
self.pin_hash = generate_password_hash(pin)
def verify_pin(self, password):
"""esta funcao recebe o objecto user(que está na base de dados) e compara a pass"""
return check_password_hash(self.pin_hash, password)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],expires_in=expiration)
return s.dumps({'id': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return True
self.confirmed = True
db.session.add(self)
return True
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def to_json(self):
"""esta funcao recebe um objecto to tipo user -> devolve em formato json o dados do utilizador """
json_user = {
'url': url_for('api.get_user', id=self.id),
'username': self.username
}
return json_user
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return True
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Restaurant(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer(), primary_key=True)
id_zomato = db.Column(db.Integer())
id_hubspot= db.Column(db.Integer())
url=db.Column(db.String(128))
addTime = db.Column(db.DateTime, default=datetime.datetime.utcnow)
name=db.Column(db.String(128))
address=db.Column(db.String(128))
city=db.Column(db.String(128))
latitude=db.Column(db.String(128))
longitude=db.Column(db.String(128))
locality=db.Column(db.String(128))
zipcode=db.Column(db.String(128))
price_range=db.Column(db.String(128))
all_reviews_count=db.Column(db.String(128))
average_cost_for_two=db.Column(db.String(128))
phone_numbers=db.Column(db.String(128))
cuisines=db.Column(db.String(128))
average_cost_for_two=db.Column(db.String(128))
def __repr__(self):
return '<Restaurant %r>' % self.restaurant
| 2.609375 | 3 |
problems/primes.py | JoshKarpel/Euler | 1 | 12764597 | import math
def is_prime(n):
if n <= 1:
return False
elif n == 2:
return True
elif n % 2 == 0:
return False
for divisor in range(3, math.ceil(math.sqrt(n)) + 1, 2):
if n % divisor == 0:
return False
return True
def find_n_primes(n):
primes = [2]
test = 2
while len(primes) < n:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
break
return primes
def find_primes_less_than_n(n):
primes = [2]
test = 2
while test < n:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
break
return primes
def generate_primes():
yield 2
primes = [2]
test = 2
while True:
test += 1
for d in primes:
if test % d == 0:
break
elif d > math.sqrt(test):
primes.append(test)
yield test
break
def sieve_of_eratosthenes(n):
primes = []
not_primes = []
for test in range(2, n):
if test not in not_primes:
primes.append(test)
i = test
while i < n:
i += test
if test not in not_primes:
not_primes.append(i)
elif test in primes:
continue
return primes
def prime_factorization(n):
factors = []
divisor = 2
n_sqrt = math.sqrt(n)
while True:
if n % divisor == 0:
factors.append(divisor)
n = n / divisor
elif n == 1:
break
elif divisor > n_sqrt:
factors.append(int(n))
break
else:
divisor += 1
return factors
def relatively_prime(n):
relatives = set()
relatives.add(1)
for test in range(2, n):
if len(set(prime_factorization(n)).intersection(set(prime_factorization(test)))) == 0:
relatives.add(test)
return relatives
| 3.953125 | 4 |
honeybot/manager.py | Zernerus/honeybot | 0 | 12764598 | from tkinter import *
import tkinter as tk
import os
import inspect
import configparser
#Create a window with a title
window = tk.Tk()
window.geometry("650x670")
window.title("Manager")
#Gets the system path for the manager file
filePath = os.path.abspath(inspect.getfile(inspect.currentframe()))
extenstion = filePath[-11]
#gets the system path for the plugin folder
pluginPath = filePath[0:filePath.index("manager.py")] + "plugins"
#Switches to the config file dir
configPath = filePath[0:filePath.index("manager.py")] + "settings" + extenstion + "PLUGINS.conf"
def saveFile():
s = tt.get(1.0,END)
f = open(configPath, 'wt')
f.write(s)
f.close()
def getPlugins(dirName):
listOfFile = os.listdir(dirName)
return listOfFile
def clicked():
selected = [listbox.get(pos) for pos in listbox.curselection()]
for file in selected:
tt.insert(END, file + "\n")
pluginList = getPlugins(pluginPath);
MainLabel = Label(window, text="Select the plugins you wish to load and add them to the config file")
MainLabel.grid(row=0, column=0)
saveBtn = Button(window, text="Save File", width=10, command=saveFile)
saveBtn.grid(row=6, column=0)
addBtn = Button(window, text="Add Plugin", width=10, command=clicked)
addBtn.grid(row=2, column=0)
label2 = Label(window, text="Editable PLUGINS.conf file:")
label2.grid(row=3,column=0)
label3 = Label(window, text="Don't forget to hit save!")
label3.grid(row=5,column=0)
tt = Text(window, width= 80)
tt.grid(row=4,column=0)
tt.insert(END, open(configPath).read())
listbox = Listbox(window, width=60)
listbox.grid(row=1, column=0)
for name in pluginList:
if(name[-2:] == "py"):
listbox.insert(END, name)
window.mainloop() | 2.953125 | 3 |
api/server/swagger_server/code_templates/serve_kfserving.TEMPLATE.py | srishtipithadia/mlx | 0 | 12764599 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
from kfp_tekton import TektonClient
from os import path
from tempfile import gettempdir
############################################################
# Define the pipeline method
############################################################
@dsl.pipeline(
name='KFServing pipeline',
description='A pipeline for serving models with KFServing'
)
def model_pipeline(model_id='${model_identifier}'):
"""A pipeline to serve models with KFServing."""
import ai_pipeline_params as params
from kfp import components
from kfp import dsl
secret_name = 'e2e-creds'
template_url = 'https://raw.githubusercontent.com/Tomcli/pipelines/35112b844ff3c9cc92a186fcb9abac646271ef02/components/kubeflow/kfserving/component.yaml'
kfserving_op = components.load_component_from_url(template_url)
model_config = dsl.ContainerOp(
name='model_config',
image='tomcli/model-config',
command=['python'],
arguments=[
'-u', 'model-config.py',
'--secret_name', secret_name,
'--model_id', model_id
],
file_outputs={
'default-custom-model-spec': '/tmp/default_custom_model_spec',
'deployment-name': '/tmp/deployment_name',
'container-port': '/tmp/container_port'
}
)
kfserving = kfserving_op(action='apply',
model_name=model_id,
namespace='mlx', # TODO: use a variable 'namespace' for multi-user deployment
framework='custom',
default_custom_model_spec=model_config.outputs['default-custom-model-spec']).set_image_pull_policy('Always')
############################################################
# Compile the pipeline
############################################################
pipeline_function = model_pipeline
pipeline_filename = path.join(gettempdir(),
pipeline_function.__name__ + '.tar.gz')
TektonCompiler().compile(pipeline_function, pipeline_filename)
############################################################
# Run the pipeline
############################################################
client = TektonClient(${pipeline_server})
# Get or create an experiment and submit a pipeline run
experiment = client.create_experiment('MODEL_RUNS')
# Submit the experiment to run in a pipeline
run_name = '${run_name}'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename)
| 2.171875 | 2 |
export_results.py | bergercookie/demo-selfdriving | 0 | 12764600 | <filename>export_results.py
from basic_types import Problem
from utils import print_, print_sec, print_ssec
def export_results(prob: Problem, outfile: str):
"""Export the results to the file specified."""
print_ssec("Exporting results to {}".format(outfile))
out_lines = []
for a in prob.assigns:
with open(outfile, 'w') as f_out:
msg = "{} {}\n".format(len(a), " ".join([str(i.id) for i in a]))
out_lines.append(msg)
f_out.writelines(out_lines)
print_("Exported.")
| 2.796875 | 3 |
python/save_cookies.py | ruilisi/weibo-crawler | 3 | 12764601 | <filename>python/save_cookies.py
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
import pickle
import json
driver = webdriver.Firefox()
wait = WebDriverWait(driver, 10)
driver.get('https://www.weibo.com/')
input_username = wait.until(EC.presence_of_element_located((By.ID, 'loginname')))
input_password = wait.until(EC.presence_of_element_located((By.NAME, 'password')))
login = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="pl_login_form"]/div/div[3]/div[6]/a')))
input_username.clear()
input_password.clear()
input_username.send_keys("your username")
sleep(1)
input_password.send_keys("<PASSWORD>")
sleep(1)
login.click()
sleep(15)
sleep(80)
with open("utils/cookies.txt","w") as f:
for cookie in driver.get_cookies():
f.write(str(cookie).replace("\'","\"").replace("False","false").replace("True","true")+"\n")
driver.quit()
| 2.8125 | 3 |
tests/test_all.py | thilp/ecological | 0 | 12764602 | #!/usr/bin/python3
import typing
import pytest
import ecological
def test_regular_types(monkeypatch):
monkeypatch.setenv("INTEGER", "42")
monkeypatch.setenv("BOOLEAN", "False")
monkeypatch.setenv("ANY_STR", "AnyStr Example")
monkeypatch.setenv("TEXT", "Text Example")
monkeypatch.setenv("DICT", "{'key': 'value'}")
monkeypatch.setenv("LIST", "[1, 2, 3]")
class Configuration(ecological.AutoConfig):
integer: int
boolean: bool
any_str: typing.AnyStr
default: str = "Default Value"
text: typing.Text
dict: typing.Dict[str, str]
list: typing.List[int]
assert Configuration.integer == 42
assert Configuration.boolean is False
assert Configuration.any_str == "AnyStr Example"
assert Configuration.default == "Default Value"
assert Configuration.text == "Text Example"
assert Configuration.dict == {'key': 'value'}
assert Configuration.list == [1, 2, 3]
def test_nested(monkeypatch):
monkeypatch.setenv("INTEGER", "42")
monkeypatch.setenv("NESTED_BOOLEAN", "False")
class Configuration(ecological.AutoConfig):
integer: int
class Nested(ecological.AutoConfig, prefix='nested'):
boolean: bool
assert Configuration.integer == 42
assert Configuration.Nested.boolean is False
def test_explicit_variable(monkeypatch):
monkeypatch.setenv("TEST_Integer", "42")
class Configuration(ecological.AutoConfig, prefix="this_is_going_to_be_ignored"):
var1a = ecological.Variable("TEST_Integer", transform=lambda v, wt: int(v))
var1b: str = ecological.Variable("TEST_Integer", transform=lambda v, wt: v * 2)
var2: bool = ecological.Variable("404", default=False)
assert Configuration.var1a == 42
assert Configuration.var1b == "4242"
assert Configuration.var2 is False
def test_prefix(monkeypatch):
monkeypatch.setenv("PREFIX_INTEGER", "42")
monkeypatch.setenv("PREFIX_BOOLEAN", "False")
monkeypatch.setenv("PREFIX_NOT_DEFAULT", "Not Default")
class Configuration(ecological.AutoConfig, prefix="prefix"):
integer: int
boolean: bool
default: str = "Default"
not_default: typing.AnyStr
assert Configuration.integer == 42
assert Configuration.boolean is False
assert Configuration.default == "Default"
assert Configuration.not_default == "Not Default"
def test_invalid_value_regular_type(monkeypatch):
monkeypatch.setenv("PARAM_REGULAR_TYPE", "not an integer")
with pytest.raises(ValueError):
class Configuration(ecological.AutoConfig):
param_regular_type: int
def test_invalid_value_parsed_type(monkeypatch):
monkeypatch.setenv("PARAM_PARSED_TYPE", "not a list")
with pytest.raises(ValueError):
class Configuration(ecological.AutoConfig):
param_parsed_type: list = ['param_1', 'param_2']
def test_no_default():
with pytest.raises(AttributeError):
class Configuration(ecological.AutoConfig):
no_default: int
bool_var: bool = False
| 2.796875 | 3 |
sdk/python/pulumi_google_native/apigee/v1/get_override.py | AaronFriel/pulumi-google-native | 44 | 12764603 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOverrideResult',
'AwaitableGetOverrideResult',
'get_override',
'get_override_output',
]
@pulumi.output_type
class GetOverrideResult:
def __init__(__self__, api_proxy=None, name=None, sampling_config=None):
if api_proxy and not isinstance(api_proxy, str):
raise TypeError("Expected argument 'api_proxy' to be a str")
pulumi.set(__self__, "api_proxy", api_proxy)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sampling_config and not isinstance(sampling_config, dict):
raise TypeError("Expected argument 'sampling_config' to be a dict")
pulumi.set(__self__, "sampling_config", sampling_config)
@property
@pulumi.getter(name="apiProxy")
def api_proxy(self) -> str:
"""
ID of the API proxy that will have its trace configuration overridden.
"""
return pulumi.get(self, "api_proxy")
@property
@pulumi.getter
def name(self) -> str:
"""
ID of the trace configuration override specified as a system-generated UUID.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="samplingConfig")
def sampling_config(self) -> 'outputs.GoogleCloudApigeeV1TraceSamplingConfigResponse':
"""
Trace configuration to override.
"""
return pulumi.get(self, "sampling_config")
class AwaitableGetOverrideResult(GetOverrideResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOverrideResult(
api_proxy=self.api_proxy,
name=self.name,
sampling_config=self.sampling_config)
def get_override(environment_id: Optional[str] = None,
organization_id: Optional[str] = None,
override_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOverrideResult:
"""
Gets a trace configuration override.
"""
__args__ = dict()
__args__['environmentId'] = environment_id
__args__['organizationId'] = organization_id
__args__['overrideId'] = override_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:apigee/v1:getOverride', __args__, opts=opts, typ=GetOverrideResult).value
return AwaitableGetOverrideResult(
api_proxy=__ret__.api_proxy,
name=__ret__.name,
sampling_config=__ret__.sampling_config)
@_utilities.lift_output_func(get_override)
def get_override_output(environment_id: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
override_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOverrideResult]:
"""
Gets a trace configuration override.
"""
...
| 1.664063 | 2 |
miepy/particles/regular_prism.py | johnaparker/MiePy | 3 | 12764604 | <reponame>johnaparker/MiePy
import miepy
from .particle_base import particle
import numpy as np
class regular_prism(particle):
def __init__(self, position, N, height, material, width=None, radius=None, orientation=None, tmatrix_lmax=0):
"""A regular prism object (an extruded regular polygon). By convention, the initial orientation is such that
the bottom edge is parallel to the x-axis.
Arguments:
position[3] x,y,z position of particle
N number of vertices
height height of prism
material particle material (miepy.material object)
width width (side length) of prism (specifiy width or radius)
radius radius of prism, from center to vertex (specifiy radius or width)
orientation particle orientation
"""
if width is None and radius is None:
raise ValueError('A width or a radius must be specified')
super().__init__(position, orientation, material)
self.N = N
self.height = height
factor = np.sqrt(2*(1 - np.cos(2*np.pi/N)))
if width is None:
self.radius = radius
self.width = radius*factor
else:
self.radius = width/factor
self.width = width
self.tmatrix_lmax = tmatrix_lmax
def __repr__(self):
return f'''{self.__class__.__name__}:
position = {self.position} m
orientation = {self.orientation}
vertices = {self.N}
width = {self.width:.2e} m
height = {self.height:.2e} m
material = {self.material}'''
def is_inside(self, pos):
pass
def compute_tmatrix(self, lmax, wavelength, eps_m, **kwargs):
calc_lmax = max(lmax+2, self.tmatrix_lmax)
self.tmatrix_fixed = miepy.tmatrix.tmatrix_regular_prism(self.N, self.width, self.height, wavelength,
self.material.eps(wavelength), eps_m, calc_lmax, extended_precision=False, conducting=self.conducting)
if lmax < calc_lmax:
self.tmatrix_fixed = miepy.tmatrix.tmatrix_reduce_lmax(self.tmatrix_fixed, lmax)
self._rotate_fixed_tmatrix()
return self.tmatrix
def enclosed_radius(self):
return np.sqrt(self.radius**2 + (self.height/2)**2)
def _dict_key(self, wavelength):
return (regular_prism, self.N, self.width, self.height, self.material.eps(wavelength).item(), self.material.mu(wavelength).item())
def cube(position, width, material, orientation=None, tmatrix_lmax=0):
"""A cube is a type of regular prism
Arguments:
position[3] x,y,z position of particle
width width (side length) of cube
material particle material (miepy.material object)
orientation particle orientation
"""
return regular_prism(position, N=4, width=width, height=width, material=material,
orientation=orientation, tmatrix_lmax=tmatrix_lmax)
| 2.953125 | 3 |
SSH-Honeypot-master/SSH.py | Zusyaku/Termux-And-Lali-Linux-V2 | 2 | 12764605 | <gh_stars>1-10
#Author:D4Vinci
#Squnity Developers
import socket
our_log=open("Attackers_Data.txt","w") #Our text file to save attackers data in it
def ssh(msg="",listeners=2):
welcome="""Welcome to BackBox Linux 4.5 (GNU/Linux 4.2.0-30-generic i686)\n
* Documentation: http://www.backbox.org/\n\n
The programs included with the BackBox/Ubuntu system are free software;
the exact distribution terms for each program are described in the
individual files in /usr/share/doc/*/copyright.\n
BackBox/Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent
permitted by applicable law.\n
"""
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 22))#binding for the ssh port
print "\nSSH Honeypot ready(Waiting For Attackers)..\n"
s.listen(int(listeners))
stat=0
n=0
rqs=["http","HTTP/1.0","GET","bind","version","OPTIONS"]
while True:
n+=1
c,attacker= s.accept()
port=attacker[1]
ip=attacker[0]
c.send("login as: ")
login=c.recv(1024)
c.send(login+"@host's password: ")
a=c.recv(1024)
PROMPT = login+"@host:~$"
c.send(welcome)
ips.append(ip)
our_log.write("\n ["+str(n)+"] IP: "+str(ip)+"\tPort: "+str(port)+"\n")
print "\n ["+str(n)+"] IP: "+str(ip)+"\tPort: "+str(port)+"\n"
c.send(PROMPT)
data = c.recv(1024)
for rq in rqs:
if rq in data.split(" ") or data.split(" ")=="" or data==" " :
our_log.write(" ["+str(ip)+"] is Scanning us With nmap looking for service info.!"+"\n")
print " ["+str(ip)+"] is Scanning us With nmap looking for service info.!"+"\n"
if ip in ips:c.close()
stat=1
break
if data.split(" ")[0] == "id":
our_log.write(" ["+str(ip)+"][!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\nuid=0(root) gid=0(root) groups=0(root)")
our_log.write(" ["+str(ip)+"]>Output: uid=0(root) gid=0(root) groups=0(root)\n")
print " ["+str(ip)+"]>Output: uid=0(root) gid=0(root) groups=0(root)\n"
c.send(str(msg)+'\n')
stat=1
c.close()
elif data.split(" ")[0] == "uname":
our_log.write(" ["+str(ip)+"]!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\nLinux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux")
our_log.write(" ["+str(ip)+"]>Output: Linux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux\n")
print " ["+str(ip)+"]>Output: Linux f001 3.13.3-7-high-octane-fueled #3000-LPG SMPx4 Fri Jun 31 25:24:23 UTC 2200 x86_64 x64_86 x13_37 GNU/Linux\n"
c.send(str(msg)+'\n')
stat=1
c.close()
elif stat==0:
our_log.write("\t[!]Command: "+str(data)+"\n")
print " ["+str(ip)+"][!]Command: "+str(data)+"\n"
c.send("\n"+str(data.split(" ")[0]) + ": command not found")
our_log.write(" ["+str(ip)+"]>Output: "+ data.split(" ")[0] + ": command not found\n")
print " ["+str(ip)+"]>Output: "+ data.split(" ")[0] + ": command not found\n"
c.send(str(msg)+'\n')
c.close()
our_log.write("="*10)
print "="*10
our_log.close()
ssh() | 2.703125 | 3 |
cloudwatchbridge/dt_cw_bridge.py | dlopes7/dynatrace-api | 80 | 12764606 | #
# Script for transferring Dynatrace timeseries into AWS CloudWatch.
#
import requests, datetime, time, sched, subprocess, shlex
# Enter your own environment id and API key token here
YOUR_ENV_ID = 'ENTER_YOUR_ENV_ID_HERE';
YOUR_API_TOKEN = 'ENTER_YOUR_API_TOKEN_HERE';
# Configure a list of monitored components you would like to transfer timeseries for.
# Please mind that the component has to support the requested tye of timeseries and
# that the timeseries also supports the requested aggregation type.
# Find details on metric types within our Dynatrace API help documentation here:
# https://help.dynatrace.com/api-documentation/v1/
CONFIG = [
{'timeseriesId':'com.dynatrace.builtin:appmethod.useractionsperminute', 'aggregation':'COUNT', 'entities':['APPLICATION_METHOD-13A2457ABF20CF35', 'APPLICATION_METHOD-322A1F8DD1984123']},
{'timeseriesId':'com.dynatrace.builtin:host.mem.used', 'aggregation':'AVG', 'entities':['HOST-F5D85B7DCDD8A93C']}
]
scheduler = sched.scheduler(time.time, time.sleep)
def export_metric(name):
scheduler.enter(360, 1, export_metric, ('first',))
for conf in CONFIG:
print('Pull timeseries ' + conf['timeseriesId']);
headers = {'Content-Type' : 'application/json', 'Authorization' : 'Api-Token ' + YOUR_API_TOKEN };
url = 'https://' + YOUR_ENV_ID + '.live.dynatrace.com/api/v1/timeseries/';
data = {
'relativeTime' : '5mins',
'timeseriesId' : conf['timeseriesId'],
'aggregationType' : conf['aggregation'],
'entities' : conf['entities']
};
r = requests.post(url, json=data, headers=headers);
if r.status_code == 200:
j = r.json();
for entity in conf['entities']:
for dp in j['result']['dataPoints'][entity]:
val = "";
print(datetime.datetime.utcfromtimestamp(int(dp[0]/1000)).isoformat());
if str(dp[1]) != 'None':
val = str(dp[1]);
cmd = 'aws cloudwatch put-metric-data --metric-name "' + j['result']['entities'][entity] + ' (' + conf['timeseriesId'] + ')" --namespace "Dynatrace" --value ' + val + ' --timestamp ' + datetime.datetime.utcfromtimestamp(int(dp[0]/1000)).isoformat();
subprocess.call(shlex.split(cmd));
elif r.status_code == 401:
print('Dynatrace authentication failed, please check your API token!');
elif r.status_code == 400:
print('Wrong timeseriesid, aggregation type or entity combination, please check Dynatrace API help for valid combinations!');
else:
print('Error ' + r);
scheduler.enter(1, 1, export_metric, ('first',))
scheduler.run()
| 2.03125 | 2 |
fedlab_benchmarks/fedmgda+/client.py | KarhouTam/FedLab-benchmarks | 0 | 12764607 | from logging import log
import torch
import argparse
import sys
import os
import tqdm
from copy import deepcopy
import torchvision
from torchvision import transforms
from torch import nn
from fedlab.core.client.manager import PassiveClientManager
from fedlab.core.client.trainer import SGDClientTrainer
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.core.network import DistNetwork
from fedlab.utils import Logger, SerializationTool
from fedlab.utils.functional import load_dict
from fedlab.utils.dataset import SubsetSampler
from setting import get_model, get_dataloader
class SerialProxTrainer(SubsetSerialTrainer):
def __init__(self,
model,
dataset,
data_slices,
optimizer,
criterion,
logger=None,
cuda=False,
args=None) -> None:
super().__init__(model, dataset, data_slices, logger, cuda, args)
self.optimizer = optimizer
self.criterion = criterion
@property
def uplink_package(self):
return super().uplink_package
def _get_dataloader(self, client_id):
train_loader = torch.utils.data.DataLoader(
self.dataset,
sampler=SubsetSampler(indices=self.data_slices[client_id],
shuffle=True),
batch_size=self.args.batch_size)
return train_loader
def _train_alone(self, model_parameters, train_loader):
frz_model = deepcopy(self._model)
SerializationTool.deserialize_model(frz_model, model_parameters)
SerializationTool.deserialize_model(
self._model, model_parameters) # load parameters
self._LOGGER.info("Local train procedure is running")
for ep in range(self.args.epochs):
self._model.train()
for inputs, labels in train_loader:
if self.cuda:
inputs, labels = inputs.cuda(self.gpu), labels.cuda(
self.gpu)
outputs = self._model(inputs)
l1 = self.criterion(outputs, labels)
l2 = 0.0
for w0, w in zip(frz_model.parameters(),
self._model.parameters()):
l2 += torch.sum(torch.pow(w - w0, 2))
loss = l1 + 0.5 * self.args.mu * l2
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self._LOGGER.info("Local train procedure is finished")
return self.model_parameters
# return model_parameters - self.model_parameters
class ProxTrainer(SGDClientTrainer):
"""Refer to GitHub implementation https://github.com/WwZzz/easyFL """
def __init__(self,
model,
data_loader,
epochs,
optimizer,
criterion,
cuda=True,
logger=Logger(),
args=None):
super().__init__(model,
data_loader,
epochs,
optimizer,
criterion,
cuda=cuda,
logger=logger)
self.delta_w = None
self.args = args
@property
def uplink_package(self):
return self.model_parameters
def local_process(self, payload) -> None:
model_parameters = payload[0]
frz_model = deepcopy(self._model)
SerializationTool.deserialize_model(frz_model, model_parameters)
SerializationTool.deserialize_model(
self._model, model_parameters) # load parameters
self._LOGGER.info("Local train procedure is running")
for ep in range(self.epochs):
self._model.train()
for inputs, labels in self._data_loader:
if self.cuda:
inputs, labels = inputs.cuda(self.gpu), labels.cuda(
self.gpu)
outputs = self._model(inputs)
l1 = self.criterion(outputs, labels)
l2 = 0.0
for w0, w in zip(frz_model.parameters(),
self._model.parameters()):
l2 += torch.sum(torch.pow(w - w0, 2))
loss = l1 + 0.5 * self.args.mu * l2
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self._LOGGER.info("Local train procedure is finished")
#self.delta_w = model_parameters - self.model_parameters
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Distbelief training example")
parser.add_argument("--ip", type=str)
parser.add_argument("--port", type=str)
parser.add_argument("--world_size", type=int)
parser.add_argument("--rank", type=int)
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--dataset", type=str, default="mnist")
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--mu", type=float, default=0.1)
parser.add_argument("--scale", type=bool, default=False)
parser.add_argument("--gpu", type=str, default="0,1,2,3")
parser.add_argument("--ethernet", type=str, default=None)
args = parser.parse_args()
if args.gpu != "-1":
args.cuda = True
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
else:
args.cuda = False
model = get_model(args)
network = DistNetwork(
address=(args.ip, args.port),
world_size=args.world_size,
rank=args.rank,
ethernet=args.ethernet,
)
LOGGER = Logger(log_name="client " + str(args.rank))
if not args.scale:
trainloader, _ = get_dataloader(args)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
trainer = ProxTrainer(model,
trainloader,
epochs=args.epochs,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
args=args)
else:
data_slices = load_dict("mnist_noniid_200_100.pkl")
#data_slices = load_dict("mnist_iid_100.pkl")
client_id_list = [
i for i in range((args.rank - 1) * 10, (args.rank - 1) * 10 + 10)
]
# get corresponding data partition indices
sub_data_indices = {
idx: data_slices[cid]
for idx, cid in enumerate(client_id_list)
}
root = '../datasets/mnist/'
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()
trainer = SerialProxTrainer(model,
trainset,
data_slices=sub_data_indices,
optimizer=optimizer,
criterion=criterion,
cuda=args.cuda,
logger=LOGGER,
args=args)
manager_ = PassiveClientManager(trainer=trainer,
network=network,
logger=LOGGER)
manager_.run()
| 2.0625 | 2 |
people_counter_app/inference.py | serisaigeetha/Edge_AI | 0 | 12764608 | <filename>people_counter_app/inference.py<gh_stars>0
#!/usr/bin/env python3
"""
Copyright (c) 2018 Intel Corporation.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import logging as log
from openvino.inference_engine import IENetwork, IECore
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
class Network:
"""
Load and configure inference plugins for the specified target devices
and performs synchronous and asynchronous modes for the specified infer requests.
"""
def __init__(self):
### TODO: Initialize any class variables desired ###
self.out = None
self.infer_status = None
self.plugin = None
self.net = None
self.exec_network = None
self.input_blob = None
self.output_blob = None
def load_model(self, model, CPU_EXTENSION, DEVICE):
### TODO: Load the model ###
self.plugin = IECore()
model_bin = os.path.splitext(model)[0] + ".bin"
self.net = IENetwork(model=model, weights=model_bin)
### TODO: Check for supported layers ###
supported_layers = self.plugin.query_network(network=self.net, device_name="CPU")
unsupported_layers = [l for l in self.net.layers.keys() if l not in supported_layers]
### TODO: Add any necessary extensions ###
if len(unsupported_layers) != 0:
#print("Unsupported layers found: {}".format(unsupported_layers))
self.plugin.add_extension(CPU_EXTENSION, "CPU")
self.exec_network = self.plugin.load_network(self.net, "CPU")
### TODO: Return the loaded inference plugin ###
self.input_blob = next(iter(self.net.inputs))
#print("input_blob", self.input_blob)
#print(self.net.inputs)
#print(self.net.inputs['image_tensor'].shape)
self.output_blob = next(iter(self.net.outputs))
#print("output blob", self.output_blob)
#print(self.net.outputs)
#print("IR successfully loaded into Inference Engine.")
return
def get_input_shape(self):
'''
Gets the input shape of the network
'''
#print (self.net.inputs[self.input_blob].shape)
return self.net.inputs['image_tensor'].shape
def exec_net(self,input_shapes):
### TODO: Start an asynchronous request ###
#print("entered exec_net")
self.exec_network.start_async(request_id=0, inputs=input_shapes)
#inputs={'image_tensor': image})
return
def wait(self):
### TODO: Wait for the request to be complete. ###
#print("entered wait")
self.infer_status = self.exec_network.requests[0].wait(-1)
#print(self.infer_status)
return self.infer_status
def get_output(self):
### TODO: Extract and return the output results
out = self.exec_network.requests[0].outputs[self.output_blob]
### Note: You may need to update the function parameters. ###
return out
| 1.5 | 2 |
main.py | imdany/pyconway | 0 | 12764609 | <filename>main.py
# Conway's Game of Life - Python Implementation
# Author: Daniel: <EMAIL>
import pygame
from pygame.locals import *
from cell import Cell
from config import *
import time
from random import choice
# Initialising PyGame
pygame.init()
pygame.display.set_caption("PyConway")
gameDisplay = pygame.display.set_mode((MAX_W, MAX_H))
clock = pygame.time.Clock()
# Initialise the Grid as random
# TODO: Add an easy way of setting up situations
def createCellList():
newList = []
# Populate Initial
for j in range(GRID_H):
newList.append([])
for i in range(GRID_W):
newList[j].append(Cell(gameDisplay, i, j, choice([0, 1])))
return newList
# Logic for updating the board on each generation
def update(cellList):
newList = []
for r, row in enumerate(cellList):
newList.append([])
for c, cell in enumerate(row):
newList[r].append(Cell(gameDisplay, r, c, cell.checkNeighbors(cellList)))
return newList[::]
def main():
generation = 0
cellList = createCellList()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == K_RETURN:
running = False
# Remove all previous content on the board
gameDisplay.fill(BLACK)
# Calculate the new state of the board
cellList = update(cellList)
# Display each Cell
for row in cellList:
for cell in row:
cell.display()
generation = generation + 1
# Three Frames per second
clock.tick(3)
pygame.display.flip()
print(f"Generation: {generation}")
pygame.quit()
if __name__ == "__main__":
main()
| 3.9375 | 4 |
roi_extractor.py | IstoVisio/script_roi_extractor | 0 | 12764610 | <reponame>IstoVisio/script_roi_extractor
import os
import sys
import syglass as sy
from syglass import pyglass
import numpy as np
import tifffile
import subprocess
import tkinter as tk
returnString = ""
def extract_roi(projectPath, roi_index):
project = sy.get_project(projectPath)
head, tail = os.path.split(projectPath)
os.chdir(os.path.dirname(projectPath))
roi_block = project.get_roi_data(int(roi_index))
roi_mask = project.get_mask(int(roi_index))
print(roi_mask.data.dtype)
if roi_block.data.shape == (0, 0, 0, 1):
print("No ROI detected for that number, please double check the ROI number!")
else:
tifffile.imsave(projectPath[:-4] + "_ROI_" + str(roi_index) + "_rawData.tiff", roi_block.data)
tifffile.imsave(projectPath[:-4] + "_ROI_" + str(roi_index) +"._integerLabels.tiff", roi_mask.data)
subprocess.run(['explorer', head])
def get_roi_number():
root=tk.Tk()
mystring = tk.StringVar()
def getvalue():
global returnString
returnString = mystring.get()
root.destroy()
tk.Label(root, text="ROI #").grid(row=0) #label
tk.Entry(root, textvariable = mystring).grid(row=0, column=1) #entry textbox
tk.WSignUp = tk.Button(root, text="Extract", command=getvalue).grid(row=3, column=0) #button
root.mainloop()
def main():
print("ROI Extractor, by <NAME>")
print("Attempts to extract a specific ROI volume from a syGlass project")
print("and write it to a series of TIFF files")
print("---------------------------------------")
print("Usage: Highlight a project and use the Script Launcher in syGlass.")
print("---------------------------------------")
doExtract = True
if len(sys.argv[0]) < 1:
print("Highlight a project before running to select a project!")
doExtract = False
if len(sys.argv) > 1:
print("This script only supports 1 project at a time, please select only one project before running.")
doExtract = False
if doExtract:
syGlassProjectPath = sys.argv[0]
get_roi_number()
global returnString
print("Extracting ROI " + str(returnString) + " from: " + syGlassProjectPath)
extract_roi(syGlassProjectPath, returnString)
if __name__== "__main__":
main() | 2.8125 | 3 |
FeatureVectorGeneration/Kmeans.py | raja21068/Android_Malware_Detection | 2 | 12764611 | import re
import numpy as np
#numerical operation
import matplotlib.pyplot as plt
#matploit provides functions that draws graphs or etc.
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
import array
import numpy as np
def findminmax(dirname, filename):
print('findminmax')
mf = open(dirname+filename,'r')
#TotalInstances = list()
strfreq = ''
intfreq = 0
m = 0
tmpcount=0
minlist = list()
maxlist = list()
firstlineflag = True
numberofinsatnces = 0
numoffeattype = 0
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
strfreq = ''
elif ch == ')':
AnInstance.append(int(strfreq))
numberofinsatnces += 1
numoffeattype = len(AnInstance)
if firstlineflag == True:
for i in range(numoffeattype):
minlist.append(9999)
maxlist.append(-9999)
firstlineflag = False
for i in range(numoffeattype):
if minlist[i]>AnInstance[i]:
minlist[i]=AnInstance[i]
if maxlist[i]<AnInstance[i]:
maxlist[i]=AnInstance[i]
tmpcount+=1
strfreq = ''
elif ch == ',':
AnInstance.append(int(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
fminmax = open(dirname+"Noofinstance_minmax_"+filename,'w')
fminmax.write(str(numberofinsatnces))
fminmax.write(' ')
fminmax.write(str(numoffeattype))
fminmax.write('\n')
for minv in minlist:
fminmax.write(str(minv))
fminmax.write(' ')
fminmax.write('\n')
for maxv in maxlist:
fminmax.write(str(maxv))
fminmax.write(' ')
fminmax.close()
def convertToNormVals(dirname, filename):
print('convertToNormVals')
mf = open(dirname+filename,'r')
fminmax = open(dirname+"Noofinstance_minmax_"+filename,'r')
lines = fminmax.readlines()
minStrlist = lines[1].split()
maxStrlist = lines[2].split()
fminmax.close()
strfreq = ''
minlist = list()
for minstr in minStrlist:
minlist.append(float(minstr))
maxlist = list()
for maxstr in maxStrlist:
maxlist.append(float(maxstr))
fnorm = open(dirname+"Norm_"+filename,'w')
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
strfreq = ''
elif ch == ')':
AnInstance.append(float(strfreq))
strfreq = ''
for i in range(len(AnInstance)):
if minlist[i]>maxlist[i]:
exit()
if minlist[i] == 0 and maxlist[i] == 0:
AnInstance[i] = 0 #should be consided again later...
elif minlist[i] == maxlist[i]:
AnInstance[i] = 0 #should be consided again later...
else:
AnInstance[i] = float(float((AnInstance[i]-minlist[i]))/float((maxlist[i]-minlist[i])))
for i in range(len(AnInstance)):
fnorm.write(str(AnInstance[i]))
fnorm.write(' ')
fnorm.write('\n')
elif ch == ',':
AnInstance.append(float(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
fnorm.close()
def convertToNTemplate(dirname, filename):
print('convertToTemplate')
mf = open(dirname+filename,'r')
strfreq = ''
f = open(dirname+"NewTemp_"+filename,'w')
AllZero = True
noinstances = 0
nofeattype = 0
while True:
ch = mf.read(1)
if ch == '':
break
if ch == '(':
AnInstance = list()
AllZero = True
strfreq = ''
elif ch == ')':
if not float(strfreq) == 0.0:
AllZero = False
AnInstance.append(float(strfreq))
nofeattype = len(AnInstance)
if AllZero == False:
noinstances +=1
strfreq = ''
for i in range(len(AnInstance)):
f.write(str(AnInstance[i]))
f.write(' ')
f.write('\n')
elif ch == ',':
if not float(strfreq) == 0.0:
AllZero = False
AnInstance.append(float(strfreq))
strfreq = ''
elif ch == ' ':
continue
else:
strfreq += ch
mf.close()
f.close()
return noinstances, nofeattype
def readNormInstances(dirname, filename, numberofinsatnces, numoffeattype):
print('readNormInstances')
TotalInstances = np.empty(numberofinsatnces*numoffeattype,dtype='float64')
f = open(dirname+filename,'r')
index = 0
#for line in f:
while True:
line = f.readline()
if line == '':
break
s = line.split()
for ss in s:
TotalInstances[index] = float(ss)
index +=1
TotalInstances = np.reshape(TotalInstances, (numberofinsatnces,numoffeattype))
f.close()
return TotalInstances
def divideIntoTwoSets(TotalInstances, numoffeattypeA, numoffeattypeB):
TotalInstances = np.hsplit(TotalInstances, np.array([numoffeattypeA, numoffeattypeA+numoffeattypeB]))
return TotalInstances[0], TotalInstances[1]
def minikmeanGo(TotalInstances, dirname, filename, nocluster):
np.random.seed(5)
noOfCluster = nocluster
kmeans = MiniBatchKMeans(n_clusters=noOfCluster)
print(kmeans)
kmeans.fit(TotalInstances)
print('fitting done')
centroids = kmeans.cluster_centers_
resultF = open(dirname+filename,'w')
for centroid in centroids:
for v in centroid:
resultF.write(str(v)+' ')
resultF.write('\n')
resultF.close()
def KmeanGo(TotalInstances, dirname, filename, nocluster):
np.random.seed(5)
noOfCluster = nocluster
kmeans = KMeans(n_clusters=noOfCluster, n_jobs=5)
print(kmeans)
kmeans.fit(TotalInstances)
print('fitting done')
centroids = kmeans.cluster_centers_
resultF = open(dirname+filename,'w')
for centroid in centroids:
for v in centroid:
resultF.write(str(v)+' ')
resultF.write('\n')
resultF.close()
#findminmax('./40000TotalSets/','Funcs.txt')
#findminmax('./40000TotalSets/','Methods.txt')
#noinstances, nofeattype = convertToNTemplate('./40000TotalSets/','Funcs.txt')
#t = readNormInstances('./40000TotalSets/', 'NewTemp_Funcs.txt', noinstances, nofeattype)
#minikmeanGo(t, './40000TotalSets/', 'F13_FUNCTIONS_so.txt')
"""
noinstances, nofeattype = convertToNTemplate('./','Funcs.txt')
t = readNormInstances('./', 'NewTemp_Funcs.txt', noinstances, nofeattype)
ta, tb = divideIntoTwoSets(t, 1321, 555)
minikmeanGo(tb, './', 'F13_FUNCTIONS_so_SYS.txt', 200)
minikmeanGo(ta, './', 'F13_FUNCTIONS_so_OP.txt', 2500)
"""
noinstances, nofeattype = convertToNTemplate('./','Methods.txt')
t = readNormInstances('./', 'NewTemp_Methods.txt', noinstances, nofeattype)
ta, tb = divideIntoTwoSets(t, 217, 238)
KmeanGo(tb, './', 'F12_METHOD_smali_API.txt', 1000)
KmeanGo(ta, './', 'F12_METHOD_smali_OP.txt', 5000)
| 2.90625 | 3 |
example_configs/custom_export_formats/custom_exporters.py | stuartcampbell/tiled | 1 | 12764612 | # This file will be (temporarily) included in the Python sys.path
# when config.yml is loaded by the Tiled server.
import io
from PIL import Image
from tiled.structures.image_serializer_helpers import img_as_ubyte
def smiley_separated_variables(array, metadata):
return "\n".join("🙂".join(str(number) for number in row) for row in array)
def to_jpeg(array, metadata):
file = io.BytesIO()
# PIL detail: ensure array has compatible data type before handing to PIL.
prepared_array = img_as_ubyte(array)
image = Image.fromarray(prepared_array)
image.save(file, format="jpeg")
return file.getbuffer()
| 2.6875 | 3 |
scripts/mixPpcaDemo.py | VaibhaviMishra04/pyprobml | 2 | 12764613 | # Author: <NAME>
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logsumexp
'''
z = Wx + µ + E
the equation above represents the latent variable model which
relates a d-dimensional data vector z to a corresponding q-dimensional
latent variables x
with q < d, for isotropic noise E ∼ N (0, σ2I)
z : latent
x : data
W : latent_to_observation matrix
µ : centres_of_clusters
E : var_of_latent
This code is an implementation of generative model of mixture of PPCA
Given the number of clusters, data_dim(D) and latent_dim(L)
we generate the data for every cluster n,
we sample zn from a Gaussian prior and pass it through the
Wk matrix and add noise, where Wk maps from the L-dimensional subspace to the D-dimensional
visible space. Using the expectation maximization algorithm we estimate the parameters
and then we plot the PC vectors
'''
def mixture_ppca_parameter_initialization(data, n_clusters, latent_dim,
n_iterations):
"""
The k-means algorithm is used to determine the centres. The
priors are computed from the proportion of examples belonging to each
cluster. The covariance matrices are calculated as the sample
covariance of the points associated with (i.e. closest to) the
corresponding centres. For a mixture of PPCA model, the PPCA
decomposition is calculated for the points closest to a given centre.
This initialisation can be used as the starting point for training
the model using the EM algorithm.
W : latent_to_observation matrix
µ/mu : centres_of_clusters
pi : proportion of data in each cluster
sigma2 : variance of latent
covars : covariance of the points associated with (i.e. closest to) the
corresponding centres
"""
n_datapts, data_dim = data.shape
# initialization of the centres of clusters
init_centers = np.random.randint(0, n_datapts, n_clusters)
# Randomly choose distinct initial centres for the clusters
while (len(np.unique(init_centers)) != n_clusters):
init_centers = np.random.randint(0, n_datapts, n_clusters)
mu = data[init_centers, :]
distance_square = np.zeros((n_datapts, n_clusters))
clusters = np.zeros(n_datapts, dtype=np.int32)
# Running iterations for K means algorithm to assign centres for clusters
for k in range(n_iterations):
# assign clusters
for c in range(n_clusters):
distance_square[:, c] = np.power(data - mu[c, :], 2).sum(1)
clusters = np.argmin(distance_square, axis=1)
# compute distortion
distmin = distance_square[range(n_datapts), clusters]
# compute new centers
for c in range(n_clusters):
mu[c, :] = data[clusters == c, :].mean(0)
# parameter initialization
pi = np.zeros(n_clusters) # Sum should be equal to 1
W = np.zeros((n_clusters, data_dim, latent_dim))
sigma2 = np.zeros(n_clusters)
for c in range(n_clusters):
W[c, :, :] = np.random.randn(data_dim, latent_dim)
pi[c] = (clusters == c).sum() / n_datapts
sigma2[c] = (distmin[clusters == c]).mean() / data_dim
covars = np.zeros(n_clusters)
for i in range(n_clusters):
covars[i] = (np.var(data[clusters == i, 0]) +
np.var(data[clusters == i, 1])) / 2
return pi, mu, W, sigma2, covars, clusters
def mixture_ppca_expectation_maximization(data, pi, mu, W, sigma2, niter):
'''
we can find the p(latent|data) with the assumption that data is gaussian
z : latent
x : data
W : latent_to_observation matrix
µ/mu : centres_of_clusters
d : data_dimension
q : latent_dimention
σ2/ sigma2 : variance of latent
π/pi : cluster proportion
p(z|x) = (2πσ2)^−d/2 * exp(−1/(2σ2) * ||z − Wx − µ||)
p(z) = ∫p(z|x)p(x)dx
Solving for p(z) and then using the result we can find the p(x|z)
through which we can find
the log likelihood function which is
log_likelihood = −N/2 * (d ln(2π) + ln |Σ| + tr(Σ−1S))
We can develop an iterative EM algorithm for
optimisation of all of the model parameters µ,W and σ2
If Rn,i = p(zn, i) is the posterior responsibility of
mixture i for generating data point zn,given by
Rn,i = (p(zn|i) * πi) / p(zn)
Using EM, the parameter estimates are as follows:
µi = Σ (Rn,i * zn) / Σ Rn,i
Si = 1/(πi*N) * ΣRn,i*(zn − µi)*(zn − µi)'
Using Si we can estimate W and σ2
For more information on EM algorithm for mixture of PPCA
visit Mixtures of Probabilistic Principal Component Analysers
by <NAME> and <NAME>:
page 5-10 of http://www.miketipping.com/papers/met-mppca.pdf
'''
n_datapts, data_dim = data.shape
n_clusters = len(sigma2)
_, latent_dim = W[0].shape
M = np.zeros((n_clusters, latent_dim, latent_dim))
Minv = np.zeros((n_clusters, latent_dim, latent_dim))
Cinv = np.zeros((n_clusters, data_dim, data_dim))
logR = np.zeros((n_datapts, n_clusters))
R = np.zeros((n_datapts, n_clusters))
M[:] = 0.
Minv[:] = 0.
Cinv[:] = 0.
log_likelihood = np.zeros(niter)
for i in range(niter):
print('.', end='')
for c in range(n_clusters):
# M
'''
M = σ2I + WT.W
'''
M[c, :, :] = sigma2[c] * np.eye(latent_dim) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(data_dim)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = data - mu[c, :]
logR[:, c] = (np.log(pi[c])
+ 0.5 * np.log(
np.linalg.det(
np.eye(data_dim) - np.dot(np.dot(W[c, :, :],
Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5 * data_dim * np.log(sigma2[c])
- 0.5 * (deviation_from_center * np.dot(deviation_from_center,
Cinv[c, :, :].T)).sum(1)
)
'''
Using the log-sum-trick, visit Section 2.5.4 in "Probabilistic Machine Learning: An Introduction" by <NAME> for more information
logsumexp(logR - myMax, axis=1) can be replaced by logsumexp(logR, axis=1)
myMax + logsumexp((logR - myMax), axis=0) can be replaced by logsumexp(logR, axis=0)
myMax in the above equations refer to
myMax = logR.max(axis=0) & myMax = logR.max(axis=1).reshape((n_datapts, 1))
'''
log_likelihood[i] = (
(logsumexp(logR, axis=1)).sum(axis=0)
- n_datapts * data_dim * np.log(2 * math.pi) / 2.
)
logR = logR - np.reshape(logsumexp(logR, axis=1),
(n_datapts, 1))
logpi = logsumexp(logR, axis=0) - np.log(n_datapts)
logpi = logpi.T
pi = np.exp(logpi)
R = np.exp(logR)
for c in range(n_clusters):
mu[c, :] = (R[:, c].reshape((n_datapts, 1)) * data).sum(axis=0) / R[:, c].sum()
deviation_from_center = data - mu[c, :].reshape((1, data_dim))
'''
Si = 1/(πi*N) * ΣRn,i*(zn − µi)*(zn − µi)'
Si is used to estimate
'''
Si = ((1 / (pi[c] * n_datapts))
* np.dot((R[:, c].reshape((n_datapts, 1)) * deviation_from_center).T,
np.dot(deviation_from_center, W[c, :, :]))
)
Wnew = np.dot(Si, np.linalg.inv(sigma2[c] * np.eye(latent_dim)
+ np.dot(np.dot(Minv[c, :, :], W[c, :, :].T), Si)))
sigma2[c] = (1 / data_dim) * (
(R[:, c].reshape(n_datapts, 1) * np.power(deviation_from_center, 2)).sum()
/
(n_datapts * pi[c])
-
np.trace(np.dot(np.dot(Si, Minv[c, :, :]), Wnew.T))
)
W[c, :, :] = Wnew
return pi, mu, W, sigma2, log_likelihood
def generate_data():
n = 500
r = np.random.rand(1, n) + 1
theta = np.random.rand(1, n) * (2 * math.pi)
x1 = r * np.sin(theta)
x2 = r * np.cos(theta)
X = np.vstack((x1, x2))
return np.transpose(X)
def mixppcademo(data, n_clusters):
'''
W : latent to observation matrix
mu : centres_of_clusters
pi : proportions of data in each of the cluster
sigma2 : variance of latent
L : log likelihood after each iteration
covars : covariance of the points associated with (i.e. closest to) the
corresponding centres
'''
plt.plot(data[:, 0], data[:, 1], 'o', c='blue', mfc='none')
pi, mu, W, sigma2, covars, clusters = mixture_ppca_parameter_initialization(
data, n_clusters, latent_dim=1, n_iterations=10)
pi, mu, W, sigma2, L = mixture_ppca_expectation_maximization(data, pi, mu,
W, sigma2, 10)
for i in range(n_clusters):
v = W[i, :, :]
#Plotting the pc vectors using 2 standard deviations
start = mu[i].reshape((2, 1)) - (v * 2 * np.sqrt(sigma2[i]))
endpt = mu[i].reshape((2, 1)) + (v * 2 * np.sqrt(sigma2[i]))
linex = [start[0], endpt[0]]
liney = [start[1], endpt[1]]
plt.plot(linex, liney, linewidth=3, c='black')
theta = np.arange(0, 2 * math.pi, 0.02)
#Plotting the confidence interval ellipse using 2 standard deviations
x = 2 * np.sqrt(sigma2[i]) * np.cos(theta)
y = np.sqrt(covars[i]) * np.sin(theta)
rot_matrix = np.vstack((np.hstack((v[0], -v[1])), np.hstack((v[1], v[0]))))
ellipse = np.dot(rot_matrix, np.vstack((x, y)))
ellipse = np.transpose(ellipse)
ellipse = ellipse + np.dot(np.ones((len(theta), 1)), mu[i, :].reshape((1, 2)))
plt.plot(ellipse[:, 0], ellipse[:, 1], c='crimson')
def main():
np.random.seed(61)
data = generate_data()
plt.figure(0)
mixppcademo(data, n_clusters=1)
plt.savefig("mixppca_k-1.png", dpi=300)
np.random.seed(7)
data = generate_data()
plt.figure(1)
mixppcademo(data, n_clusters=10)
plt.savefig("mixppca_k-10.png", dpi=300)
plt.show()
if __name__ == "__main__":
main()
| 3.15625 | 3 |
dino/genetic.py | mateusnbm/chrome-dinosaur | 0 | 12764614 | <filename>dino/genetic.py
#
# genetic.py
#
import copy
import random
'''
'''
class Genome():
'''
'''
def __init__(self, length):
self.fitnesses = [0]
self.genes = [random.uniform(-1, 1) for _ in range(length)]
'''
'''
def __str__(self):
str_rep = ""
for gene in self.genes:
str_rep += str(gene) + ", "
str_rep += str(self.fitnesses[-1])
return str_rep
'''
'''
def add_fitness(self, fitness):
self.fitnesses.append(fitness)
if len(self.fitnesses) > 5:
self.fitnesses = self.fitnesses[-5:]
'''
'''
def get_fitness(self):
return self.fitnesses[-1]
#return sum(self.fitnesses)/len(self.fitnesses)
#return max(self.fitnesses)
'''
'''
def recombine(self, mate, probability):
rdn = random.randint(1, 100)
prb = probability * 100
parent_a = copy.deepcopy(self)
parent_b = copy.deepcopy(mate)
if rdn > prb:
cut_index = random.randint(0, len(self.genes)-1)
pa_part_a = parent_a.genes[:cut_index]
pa_part_b = parent_a.genes[cut_index:]
pb_part_a = parent_b.genes[:cut_index]
pb_part_b = parent_b.genes[cut_index:]
parent_a.fitnesses = [0]
parent_a.genes = pa_part_a + pb_part_b
parent_b.fitnesses = [0]
parent_b.genes = pb_part_a + pa_part_b
return parent_a, parent_b
'''
'''
def mutate(self, probability):
rdn = random.randint(1, 100)
prb = probability * 100
if rdn > prb:
for i in range(len(self.genes)):
self.genes[i] += self.genes[i] * random.uniform(-0.5, 0.5)
#self.genes[i] += self.genes[i] * (random.uniform(0, 1) - 0.5) * 3 + (random.uniform(0, 1) - 0.5)
| 3.140625 | 3 |
apps/home.py | eri3l/skinks | 0 | 12764615 | import streamlit as st
def app():
st.write("## Welcome to the Skink Search Tool app")
st.write("""
The app filters existing skink data by multiple criteria in order to help with the identification of skinks.
Latest data update: 10 Apr 2020. \n
Use the navigation bar to select the type of search you would like to perform.
""")
st.markdown("### Toes")
st.write("Use this option to search by missing toes only")
with st.beta_expander("More information"):
st.markdown("""
- This search filters by all possible combinations of **`missing toes`** and excludes other missing toes. \n
Example:
> `selected toes` = [LF1, LF2] \n
> Results: \n
> The search returns all skinks where [LF1], [LF2], [LF1, LF2] or [none] toes are missing.
""")
st.markdown("### Search")
st.write("Use this option to search by multiple criteria:")
st.markdown("""
- SVL (snout to vent length) (mm) \n
Existing skinks above 70mm are classified as adults and labelled with `projected_SVL`=100
""")
with st.beta_expander("More information"):
st.markdown("""
The search considers matches within 5 mm of the selected length. All skinks above 70 mm (`@adult`) are classified as adults.
In finding matches, it is assumed that skinks grow by **10** mm per year (`@delta`) and reach adult size at **70** mm (`@adult`).
Search is performed on a calculated variable, `projected_SVL`:
```python
projected_SVL= skink_SVL + delta*(current_year – skink_Year)
```
""")
st.markdown("""
- Paddock/traplist \n
Each paddock contains multiple traps, click below to view the full list of traps
""")
with st.beta_expander("See traps"):
st.markdown("""
| Paddock | Traps |
| ------ | ------ |
| pdk_R66 | ['R66', 'board', 'R67', 'M14', 'R68', 'R69', 'R70', 'M11', 'PR1'] |
| pdk_R71 | ['R71', 'PR2', 'R72', 'M9', 'P3', 'PR3', 'R73', 'M8', 'PR4', 'R74', 'M7', 'PR5', 'R75', 'PR6', 'R76', 'M5', 'PR7'] |
| pdk_R77 | ['R2', 'PR13', 'R3', 'PR14', 'R4', 'PR15', 'P16', 'PR16', 'R6', 'PR17'] |
| pdk_R02 | ['W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'W7', 'W8', 'W9', 'W10', 'W11', 'W12', 'W13'] |
| ... | ... |
""")
st.markdown("""
- Toes \n
Search by intact or missing toes.
""")
image = 'data/P1060519.jpg'
st.image(image, caption='El pretty skinko', use_column_width = True)
with st.sidebar.beta_expander("About"):
st.markdown(''' Copyright © 2021 <NAME>.
This app is open source. You can find it on [GitHub](https://github.com/eri3l/skinks) ''')
| 3.703125 | 4 |
ci/infra/testrunner/utils/logger.py | Klaven/skuba | 0 | 12764616 | <gh_stars>0
import logging
import os
class Logger:
def __init__(self, conf):
pass
@staticmethod
def config_logger(conf, level=None):
logger = logging.getLogger("testrunner")
logger.setLevel(logging.getLevelName("DEBUG"))
if conf.log.file:
mode = 'a'
if conf.log.overwrite:
mode = 'w'
log_file = os.path.join(conf.workspace, conf.log.file)
file_handler = logging.FileHandler(log_file)
logger.addHandler(file_handler)
if not conf.log.quiet:
if not level:
level = conf.log.level
console = logging.StreamHandler()
console.setLevel(logging.getLevelName(level.upper()))
logger.addHandler(console)
| 2.703125 | 3 |
Assignment/wordCalculator.py | TungTNg/itc110_python | 0 | 12764617 | <filename>Assignment/wordCalculator.py
# wordCalculator.py
# A program to calculate the number of words in a sentence
# by <NAME>
def main():
# declare program function:
print("This program calculates the number of words in a sentence.")
print()
# prompt user to input the sentence:
sentence = input("Enter a phrase: ")
# split the sentence into a list that contains words
listWord = sentence.split()
# count the number of words inside the list
countWord = len(listWord)
# print out the number of words as the result
print()
print("Number of words:", countWord)
main() | 4.5625 | 5 |
foundation/users/tests/factories.py | pilnujemy/foundation-manager | 1 | 12764618 | <gh_stars>1-10
from __future__ import absolute_import
from .. import models
import factory
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence('user-{0}'.format)
email = factory.Sequence('<EMAIL>'.<EMAIL>)
password = factory.PostGenerationMethodCall('set_password', 'password')
class Meta:
model = models.User
django_get_or_create = ('username', )
| 2.109375 | 2 |
converters/COCOVisualize.py | mateoKutnjak/yolact | 0 | 12764619 | import json
import sys
import imageio
import matplotlib.pyplot as plt
import cv2
import random
def search_images_by_id(_id):
for _ in valid['images']:
if _['id'] == _id:
return _
def search_categories_by_id(_id):
for _ in valid['categories']:
if _['id'] == _id:
return _
def plot_polygon(mask, polygons):
plt.imshow(mask)
for polygon in polygons:
plt.scatter(polygon[0::2], polygon[1::2], s=2)
plt.show()
with open(sys.argv[1], 'r') as f:
valid = json.load(f)
for ann in valid['annotations']:
if random.random() < 0.01:
ann_images = search_images_by_id(ann['image_id'])
ann_category = search_categories_by_id(ann['category_id'])
bbox = list(map(int, ann['bbox']))
rgb = imageio.imread(sys.argv[1] + '/../images/' + str(ann_images['file_name']))
rgb = cv2.rectangle(rgb, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (255, 0, 0), 5)
plot_polygon(rgb, ann['segmentation'])
| 2.484375 | 2 |
footy/domain/Result.py | dallinb/footy | 2 | 12764620 | """Result - Data structure for a result."""
# Is prediction before game is played, then actual once game ahs been played
# Return the outcome Briers score, home/away goals scored, Predictions if available, and actual
# result if game has been played
class Result:
"""Result - Data structure for a result."""
def __init__(self, status='SCHEDULED', home_team_goals_scored=0, away_team_goals_scored=0):
"""
Construct a Result object.
Parameters
----------
status : str, optional
The status of the result of the result. SCHEDULED or FINISHED. Defaults to SCHEDULED
home_team_goals_scored : int, optional
The number of goals scored by the home team. Defaults to 0.
away_team_goals_scored : int, optional
The number of goals scored by the away team. Defaults to 0.
"""
self._status = status # TODO: Can we use an enum?
self._home_team_goals_scored = home_team_goals_scored
self._away_team_goals_scored = away_team_goals_scored
def __eq__(self, other):
"""
Override the __eq__ method for the Result class to allow for object value comparison.
Parameters
----------
other : footy.domain.Result.Result
The result object to compare to.
Returns
-------
bool
True/False if the values in the two objects are equal.
"""
return (
self.__class__ == other.__class__ and
self._status == other._status and
self._home_team_goals_scored == other._home_team_goals_scored and
self._away_team_goals_scored == other._away_team_goals_scored
)
@property
def status(self):
"""
Getter method for property status.
Returns
-------
str
The value of property status.
"""
return self._status
@status.setter
def status(self, status):
"""
Getter method for property status.
Parameters
----------
status : str
The value you wish to set the status property to.
"""
self._status = status
@property
def home_team_goals_scored(self):
"""
Getter method for property home_team_goals_scored.
Returns
-------
int
The value of property home_team_goals_scored.
"""
return self._home_team_goals_scored
@home_team_goals_scored.setter
def home_team_goals_scored(self, home_team_goals_scored):
"""
Getter method for property home_team_goals_scored.
Parameters
----------
home_team_goals_scored : int
The value you wish to set the home_team_goals_scored property to.
"""
self._home_team_goals_scored = home_team_goals_scored
@property
def away_team_goals_scored(self):
"""
Getter method for property away_team_goals_scored.
Returns
-------
int
The value of property away_team_goals_scored.
"""
return self._away_team_goals_scored
@away_team_goals_scored.setter
def away_team_goals_scored(self, away_team_goals_scored):
"""
Getter method for property away_team_goals_scored.
Parameters
----------
away_team_goals_scored : int
The value you wish to set the away_team_goals_scored property to.
"""
self._away_team_goals_scored = away_team_goals_scored
| 3.734375 | 4 |
core/apps.py | techacademypython/hackaton_amada | 0 | 12764621 | from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
def ready(self):
from mqtt.mqtt_file import client
client.loop_start()
| 1.84375 | 2 |
deepctr_torch/models/pnn.py | sngweicong/DeepCTR-Torch | 4 | 12764622 | # -*- coding:utf-8 -*-
"""
Author:
<NAME>,<EMAIL>
Reference:
[1] <NAME>, <NAME>, <NAME>, et al. Product-based neural networks for user response prediction[C]//Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.(https://arxiv.org/pdf/1611.00144.pdf)
"""
import torch
import torch.nn as nn
from .basemodel import BaseModel
from ..inputs import combined_dnn_input
from ..layers import DNN, concat_fun, InnerProductLayer, OutterProductLayer
class PNN(BaseModel):
"""Instantiates the Product-based Neural Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param use_inner: bool,whether use inner-product or not.
:param use_outter: bool,whether use outter-product or not.
:param kernel_type: str,kernel_type used in outter-product,can be ``'mat'`` , ``'vec'`` or ``'num'``
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:param device: str, ``"cpu"`` or ``"cuda:0"``
:return: A PyTorch model instance.
"""
def __init__(self, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-5, l2_reg_dnn=0,
init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False,
kernel_type='mat', task='binary', device='cpu', ):
super(PNN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding,
init_std=init_std, seed=seed, task=task, device=device)
if kernel_type not in ['mat', 'vec', 'num']:
raise ValueError("kernel_type must be mat,vec or num")
self.use_inner = use_inner
self.use_outter = use_outter
self.kernel_type = kernel_type
self.task = task
product_out_dim = 0
num_inputs = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
num_pairs = int(num_inputs * (num_inputs - 1) / 2)
if self.use_inner:
product_out_dim += num_pairs
self.innerproduct = InnerProductLayer(device=device)
if self.use_outter:
product_out_dim += num_pairs
self.outterproduct = OutterProductLayer(
num_inputs, self.embedding_size, kernel_type=kernel_type, device=device)
self.dnn = DNN(product_out_dim + self.compute_input_dim(dnn_feature_columns), dnn_hidden_units,
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False,
init_std=init_std, device=device)
self.dnn_linear = nn.Linear(
dnn_hidden_units[-1], 1, bias=False).to(device)
self.add_regularization_weight(
filter(lambda x: 'weight' in x[0] and 'bn' not in x[0], self.dnn.named_parameters()), l2=l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2=l2_reg_dnn)
self.to(device)
def forward(self, X):
sparse_embedding_list, dense_value_list = self.input_from_feature_columns(X, self.dnn_feature_columns,
self.embedding_dict)
linear_signal = torch.flatten(
concat_fun(sparse_embedding_list), start_dim=1)
if self.use_inner:
inner_product = torch.flatten(
self.innerproduct(sparse_embedding_list), start_dim=1)
if self.use_outter:
outer_product = self.outterproduct(sparse_embedding_list)
if self.use_outter and self.use_inner:
product_layer = torch.cat(
[linear_signal, inner_product, outer_product], dim=1)
elif self.use_outter:
product_layer = torch.cat([linear_signal, outer_product], dim=1)
elif self.use_inner:
product_layer = torch.cat([linear_signal, inner_product], dim=1)
else:
product_layer = linear_signal
dnn_input = combined_dnn_input([product_layer], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit = dnn_logit
y_pred = self.out(logit)
return y_pred
| 2.953125 | 3 |
neutron/db/models/ndp_proxy.py | dangervon/neutron | 0 | 12764623 | <reponame>dangervon/neutron<filename>neutron/db/models/ndp_proxy.py
# Copyright 2022 Troila
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import l3_ndp_proxy as apidef
from neutron_lib.db import constants as db_const
from neutron_lib.db import model_base
from neutron_lib.db import standard_attr
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db.models import l3
class NDPProxy(standard_attr.HasStandardAttributes,
model_base.BASEV2, model_base.HasId,
model_base.HasProject):
__tablename__ = 'ndp_proxies'
name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE))
router_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE),
sa.ForeignKey('routers.id',
ondelete="CASCADE"),
nullable=False)
port_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE),
sa.ForeignKey('ports.id',
ondelete="CASCADE"),
nullable=False)
ip_address = sa.Column(sa.String(db_const.IP_ADDR_FIELD_SIZE),
nullable=False)
api_collections = [apidef.COLLECTION_NAME]
collection_resource_map = {apidef.COLLECTION_NAME:
apidef.RESOURCE_NAME}
class RouterNDPProxyState(model_base.BASEV2):
__tablename__ = 'router_ndp_proxy_state'
router_id = sa.Column(sa.String(db_const.UUID_FIELD_SIZE),
sa.ForeignKey('routers.id',
ondelete="CASCADE"),
nullable=False, primary_key=True)
enable_ndp_proxy = sa.Column(sa.Boolean(), nullable=False)
router = orm.relationship(
l3.Router, load_on_pending=True,
backref=orm.backref("ndp_proxy_state",
lazy='subquery', uselist=False,
cascade='delete')
)
| 1.882813 | 2 |
OpenFOAM-wrapper/utils/files.py | Lenferd/ANSYS-OpenFOAM | 0 | 12764624 | import os
import shutil
from utils.logger import Logger, LogLvl
_logger = Logger(LogLvl.LOG_ERROR)
# Creation
def is_directory_exists(dir_name):
directory_exists = os.path.exists(dir_name)
if not directory_exists:
_logger.info("Directory \"{}\" not exists".format(dir_name))
return directory_exists
def create_directory(dir_name):
if not is_directory_exists(dir_name):
os.makedirs(dir_name)
_logger.info("Creating directory {}".format(dir_name))
return dir_name
def remove_directory(dir_name):
if is_directory_exists(dir_name):
shutil.rmtree(dir_name)
_logger.info("Removing directory {}".format(dir_name))
# Query
def list_all_dirs_in_folder(folder_name):
return os.listdir(folder_name)
# Comparators
def equal(fname1, fname2):
# Open file for reading in text mode (default mode)
f1 = open(fname1)
f2 = open(fname2)
files_equal = True
# Print confirmation
_logger.info("-----------------------------------")
_logger.info("Comparing files\n > " + fname1 + "\n < " + fname2)
_logger.info("-----------------------------------")
# Read the first line from the files
f1_line = f1.readline()
f2_line = f2.readline()
# Initialize counter for line number
line_no = 1
# Loop if either file1 or file2 has not reached EOF
while f1_line != '' or f2_line != '':
# Strip the leading whitespaces
f1_line = f1_line.rstrip()
f2_line = f2_line.rstrip()
# Compare the lines from both file
if f1_line != f2_line:
# If a line does not exist on file2 then mark the output with
# + sign
if f2_line == '' and f1_line != '':
print(">+", "Line-%d" % line_no, f1_line)
# otherwise output the line on file1 and mark it with > sign
elif f1_line != '':
print(">", "Line-%d" % line_no, f1_line)
# If a line does not exist on file1 then mark the output with
# + sign
if f1_line == '' and f2_line != '':
print("<+", "Line-%d" % line_no, f2_line)
# otherwise output the line on file2 and mark it with < sign
elif f2_line != '':
print("<", "Line-%d" % line_no, f2_line)
# Print a blank line
print()
files_equal = False
# Read the next line from the file
f1_line = f1.readline()
f2_line = f2.readline()
# Increment line counter
line_no += 1
# Close the files
f1.close()
f2.close()
return files_equal | 2.890625 | 3 |
Tools/idle/FrameViewer.py | 1byte2bytes/cpython | 5 | 12764625 | <reponame>1byte2bytes/cpython<gh_stars>1-10
from repr import Repr
from Tkinter import *
class FrameViewer:
def __init__(self, root, frame):
self.root = root
self.frame = frame
self.top = Toplevel(self.root)
self.repr = Repr()
self.repr.maxstring = 60
self.load_variables()
def load_variables(self):
row = 0
if self.frame.f_locals is not self.frame.f_globals:
l = Label(self.top, text="Local Variables",
borderwidth=2, relief="raised")
l.grid(row=row, column=0, columnspan=2, sticky="ew")
row = self.load_names(self.frame.f_locals, row+1)
l = Label(self.top, text="Global Variables",
borderwidth=2, relief="raised")
l.grid(row=row, column=0, columnspan=2, sticky="ew")
row = self.load_names(self.frame.f_globals, row+1)
def load_names(self, dict, row):
names = dict.keys()
names.sort()
for name in names:
value = dict[name]
svalue = self.repr.repr(value)
l = Label(self.top, text=name)
l.grid(row=row, column=0, sticky="w")
l = Entry(self.top, width=60, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="w")
row = row+1
return row
| 2.84375 | 3 |
src/game_create.py | Boromir-the-Brave/Adventure_Game | 0 | 12764626 | from things import Room, Item
def build_rooms():
print("Building world...", end="")
house_front_yard = Room("house_front_yard")
print(" done.")
return
def generate_items():
print("Generating items...", end="")
print(" done.")
return | 2.78125 | 3 |
sphinxsimulink/diagram/directives.py | dekalinowski/sphinx-simulink | 2 | 12764627 | """
sphinx-simulink.directives
~~~~~~~~~~~~~~~~~~~~~~~
Embed Simulink diagrams on your documentation.
:copyright:
Copyright 2016 by <NAME> <<EMAIL>>.
:license:
MIT, see LICENSE for details.
"""
import hashlib
import os
import tempfile
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
from sphinx.util.osutil import ensuredir
from sphinxsimulink.diagram import nodes
def pathlist(argument):
paths = []
list = argument.split(';')
for path in list:
paths.append( directives.path(path) )
return paths
class SimulinkDiagramDirective(images.Figure):
required_arguments = 1
optional_arguments = 0
option_spec = dict(
images.Figure.option_spec, **{
'dir': directives.path,
'addpath': pathlist,
'preload': directives.path,
'subsystem': directives.unchanged,
}
)
# content used by images.Figure as caption
has_content = True
@staticmethod
def generate_uri(app, diagram_options, fileformat):
# give a unique folder name for the specific srcdir, housed under the
# system's temporary directory
outdir = os.path.join(
tempfile.gettempdir(),
'sphinxsimulink',
hashlib.sha1(
os.path.abspath( app.builder.srcdir ).encode('utf-8')
).hexdigest()
)
# FIXME: change filename hash to include contents of preload script,
# simulink system model, and other dependencies...
# use as mechanism to reuse cache, and delete on clean job
# make a unique filename for the Simulink model
hash = hashlib.sha1( repr( sorted( diagram_options.items() ) )
.encode('utf-8') ).hexdigest()
filename = "simulink-diagram-{}.{}".format( hash, fileformat )
# combine the directory and filename
uri = os.path.join(outdir, filename)
return uri
def run(self):
env = self.state.document.settings.env
app = env.app
# pop these keys out of self.options;
# place into diagram_options
diagram_options = dict(
(popped_key, self.options.pop(popped_key, None))
for popped_key in
('dir','addpath','preload','subsystem')
)
# generate image at this location; Sphinx will relocate later
uri = SimulinkDiagramDirective.generate_uri(
app, diagram_options, 'png'
)
# make an empty file, if needed, to avoid warning from Sphinx's image
# processing
ensuredir( os.path.dirname( uri ) )
open( uri, 'a' ).close()
# SimulinkDiagramDirective takes system from argument[0]
system = self.arguments[0]
# images.Figure expects uri in argument[0]
self.arguments[0] = uri;
(figure_node,) = images.Figure.run(self)
# escalate system messages
if isinstance(figure_node, nodes.system_message):
return [figure_node]
diagram_node = nodes.diagram('', figure_node, **diagram_options)
diagram_node['uri'] = uri
diagram_node['system'] = system
return [diagram_node]
| 2.140625 | 2 |
pcbhdl/test/library/test_package.py | pcbhdl/pcbhdl | 0 | 12764628 | <reponame>pcbhdl/pcbhdl
import unittest
from pcbhdl.library.package.passive import *
class TestPackage(unittest.TestCase):
def test_eia_two_terminal(self):
pkg = EIA_I_0603
self.assertEqual(pkg.name, "EIA_I_0603")
self.assertEqual(pkg.pads[0].name, "1")
self.assertEqual(pkg.pads[0].center, (-0.9, 0.0))
self.assertEqual(pkg.pads[0].width, 0.8)
self.assertEqual(pkg.pads[0].height, 1.0)
self.assertEqual(pkg.pads[1].name, "2")
self.assertEqual(pkg.pads[1].center, (0.9, 0.0))
self.assertEqual(pkg.pads[1].width, 0.8)
self.assertEqual(pkg.pads[1].height, 1.0)
| 2.734375 | 3 |
src/services/sspanel_mining/__init__.py | lkmvip/sspanel-mining | 1 | 12764629 | """
- 集爬取、清洗、分类与测试为一体的STAFF采集队列自动化更新组件
- 需要本机启动系统全局代理,或使用“国外”服务器部署
"""
from .sspanel_checker import SSPanelStaffChecker
from .sspanel_classifier import SSPanelHostsClassifier
from .sspanel_collector import SSPanelHostsCollector
__version__ = 'v0.2.2'
__all__ = ['SSPanelHostsCollector', "SSPanelStaffChecker", "SSPanelHostsClassifier"]
| 1.476563 | 1 |
tests/data/program_analysis/crop_yield.py | mikiec84/delphi | 25 | 12764630 | import sys
from typing import List
import math
from delphi.translators.for2py.format import *
from delphi.translators.for2py.arrays import *
from delphi.translators.for2py.static_save import *
from delphi.translators.for2py.strings import *
from dataclasses import dataclass
from delphi.translators.for2py.types_ext import Float32
import delphi.translators.for2py.math_ext as math
from numbers import Real
from random import random
def update_est(rain: List[float], total_rain: List[float], yield_est: List[float]):
total_rain[0] = (total_rain[0] + rain[0])
if (total_rain[0] <= 40):
yield_est[0] = (-((((total_rain[0] - 40) ** 2) / 16)) + 100)
else:
yield_est[0] = (-(total_rain[0]) + 140)
def crop_yield():
day: List[int] = [None]
rain: List[float] = [None]
yield_est: List[float] = [None]
total_rain: List[float] = [None]
max_rain: List[float] = [None]
consistency: List[float] = [None]
absorption: List[float] = [None]
max_rain[0] = 4.0
consistency[0] = 64.0
absorption[0] = 0.6
yield_est[0] = 0
total_rain[0] = 0
for day[0] in range(1, 31+1):
rain[0] = ((-((((day[0] - 16) ** 2) / consistency[0])) + max_rain[0]) * absorption[0])
update_est(rain, total_rain, yield_est)
print("Day ", day, " Estimate: ", yield_est)
print("Crop Yield(%): ", yield_est)
crop_yield()
| 2.765625 | 3 |
trachours/db.py | superyaro/trachoursplugin | 0 | 12764631 | <filename>trachours/db.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 <NAME> <<EMAIL>>
# Copyright (C) 2017 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from datetime import datetime
from trac.core import Component, implements
from trac.db.schema import Column, Index, Table
from trac.env import IEnvironmentSetupParticipant
from trac.util.datefmt import to_utimestamp, utc
from tracsqlhelper import *
from usermanual import *
# totalhours be a computed field, but computed fields don't yet exist for trac
custom_fields = {
'estimatedhours': {
'type': 'text',
'label': 'Estimated Hours',
'value': '0'
},
'totalhours': {
'type': 'text',
'label': 'Total Hours',
'value': '0'
}
}
class SetupTracHours(Component):
implements(IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
db_installed_version = None
db_version = 4
def __init__(self):
self.db_installed_version = self.version()
def environment_created(self, db=None):
if self.environment_needs_upgrade():
self.upgrade_environment()
def environment_needs_upgrade(self, db=None):
return self._system_needs_upgrade()
def _system_needs_upgrade(self):
return self.db_installed_version < self.db_version
def upgrade_environment(self, db=None):
for version in range(self.version(), len(self.steps)):
for step in self.steps[version]:
step(self)
self.env.db_transaction("""
UPDATE system SET value=%s
WHERE name='trachours.db_version'
""", (len(self.steps),))
self.db_installed_version = len(self.steps)
def _needs_user_manual(self):
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
SELECT MAX(version) FROM wiki WHERE name=%s
""", (user_manual_wiki_title,))
# rows = self.env.db_query("""
# SELECT MAX(version) FROM wiki WHERE name=%s
# """, (user_manual_wiki_title,)
for maxversion in cursor.fetchone():
maxversion = int(maxversion) \
if isinstance(maxversion, (int, long)) \
else 0
break
else:
maxversion = 0
return maxversion < user_manual_version
def _do_user_man_update(self):
when = to_utimestamp(datetime.now(utc))
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
INSERT INTO wiki
(name,version,time,author,ipnr,text,comment,readonly)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (user_manual_wiki_title, user_manual_version,
when, 'TracHours Plugin', '127.0.0.1',
user_manual_content, '', 0,))
def version(self):
for value, in self.env.db_query("""
SELECT value FROM system WHERE name = 'trachours.db_version'
"""):
return int(value)
else:
return 0
def create_db(self):
ticket_time_table = Table('ticket_time', key='id')[
Column('id', auto_increment=True),
Column('ticket', type='int'),
Column('time_submitted', type='int'),
Column('worker'),
Column('submitter'),
Column('time_started', type='int'),
Column('seconds_worked', type='int'),
Column('comments'),
Index(['ticket']),
Index(['worker']),
Index(['time_started'])]
create_table(self.env, ticket_time_table)
execute_non_query(self.env, """
INSERT INTO system (name, value)
VALUES ('trachours.db_version', '1')
""")
def update_custom_fields(self):
ticket_custom = 'ticket-custom'
for name in custom_fields:
field = custom_fields[name].copy()
field_type = field.pop('type', 'text')
if not self.config.get(ticket_custom, field_type):
self.config.set(ticket_custom, name, field_type)
for key, value in field.items():
self.config.set(ticket_custom, '%s.%s' % (name, key), value)
self.config.save()
def add_query_table(self):
time_query_table = Table('ticket_time_query', key='id')[
Column('id', auto_increment=True),
Column('title'),
Column('description'),
Column('query')]
create_table(self.env, time_query_table)
def initialize_old_tickets(self):
execute_non_query(self.env, """
INSERT INTO ticket_custom (ticket, name, value)
SELECT id, 'totalhours', '0' FROM ticket WHERE id NOT IN (
SELECT ticket FROM ticket_custom WHERE name='totalhours');""")
def install_manual(self):
if self._needs_user_manual():
self._do_user_man_update()
# ordered steps for upgrading
steps = [
[create_db, update_custom_fields], # version 1
[add_query_table], # version 2
[initialize_old_tickets], # version 3
[install_manual], # version 4
]
| 2.09375 | 2 |
setup.py | tpatja/opensoar | 9 | 12764632 | from setuptools import setup, find_packages
exec(open('opensoar/version.py').read())
with open("README.rst", "r") as f:
long_description = f.read()
setup(
name='opensoar',
version=__version__, # has been import above in exec command
license='MIT',
description='Open source python library for glider flight analysis',
url='https://github.com/glidergeek/opensoar',
packages=find_packages(exclude=['tests']),
long_description=long_description,
install_requires=[
'pygeodesy>=17.11.26',
'aerofiles>=0.4.1',
'beautifulsoup4>=4.6.0'
]
)
| 1.335938 | 1 |
Lista2/Lista2ex3.py | hugo-paiva/IntroducaoCienciasDaComputacao | 0 | 12764633 | cargo = input().lower().strip()
tempo = int(input())
salarioAtual = float(input())
if salarioAtual < 1039:
print('Salário inválido!')
else:
if cargo == 'gerente':
if tempo <= 3:
reajuste = salarioAtual * 0.12
elif tempo <= 6:
reajuste = salarioAtual * 0.13
else:
reajuste = salarioAtual * 0.15
elif cargo == 'engenheiro':
if tempo <= 3:
reajuste = salarioAtual * 0.07
elif tempo <= 6:
reajuste = salarioAtual * 0.11
else:
reajuste = salarioAtual * 0.14
else:
reajuste = salarioAtual * 0.05
#Trecho chamado apenas se salarioAtual >= 1039
salarioReajustado = salarioAtual + reajuste
print(f'{reajuste:.2f}')
print(f'{salarioReajustado:.2f}')
| 3.65625 | 4 |
tests/test_dfa.py | UCSCFormalMethods/CIToolkit | 0 | 12764634 | """ Tests for the Dfa class"""
import math
import random
import itertools
import pytest
from citoolkit.specifications.spec import AbstractSpec
from citoolkit.specifications.dfa import Dfa, State, DfaCycleError
###################################################################################################
# Basic Tests
###################################################################################################
def test_dfa_complete():
""" Creates a simple complete Dfa and ensures
this does not raise an error.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA, which should not raise an exception.
Dfa(alphabet, states, accepting_states, start_state, transitions)
def test_dfa_not_complete():
""" Attempts to create a simple incomplete Dfa and ensures
that this raises a ValueError.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
transitions = {}
# Partially completes transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
with pytest.raises(ValueError):
Dfa(alphabet, states, accepting_states, start_state, transitions)
def test_dfa_string_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with string states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_class_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with State class states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {State("0_Seen"), State("1_Seen"), State("2_Seen"), State("3_Seen")}
accepting_states = {State("3_Seen")}
start_state = State("0_Seen")
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = State("0_Seen")
# Complete transitions map.
transitions[(State("0_Seen"), "1")] = State("1_Seen")
transitions[(State("1_Seen"), "1")] = State("2_Seen")
transitions[(State("2_Seen"), "1")] = State("3_Seen")
transitions[(State("3_Seen"), "0")] = State("3_Seen")
transitions[(State("3_Seen"), "1")] = State("3_Seen")
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_mixed_states():
""" Creates a simple Dfa and ensures that select
words are correctly accepted or rejected. Dfa is
constructed with a mix of string and State class
states.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {State("0_Seen"), "1_Seen", "2_Seen", State("3_Seen")}
accepting_states = {"3_Seen"}
start_state = State("0_Seen")
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[(State("0_Seen"), "1")] = State("1_Seen")
transitions[("1_Seen", "1")] = State("2_Seen")
transitions[(State("2_Seen"), "1")] = "3_Seen"
transitions[(State("3_Seen"), "0")] = State("3_Seen")
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and check select strings against Dfa
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_topological_ordering():
""" Create an acyclic DFA and ensure that a correct
topologically sorted list of states is computed.
"""
# Create an acyclic DFA
alphabet = {"0", "1"}
states = {"A", "B", "C", "D", "E", "F", "Sink"}
accepting_states = {"F"}
start_state = "A"
transitions = {}
transitions[("A","0")] = "B"
transitions[("A","1")] = "C"
transitions[("B","0")] = "C"
transitions[("B","1")] = "C"
transitions[("C","0")] = "D"
transitions[("C","1")] = "E"
transitions[("D","0")] = "E"
transitions[("D","1")] = "F"
transitions[("E","0")] = "F"
transitions[("E","1")] = "F"
transitions[("F","0")] = "Sink"
transitions[("F","1")] = "Sink"
transitions[("Sink", "0")] = "Sink"
transitions[("Sink", "1")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Ensures that the one correct topological sort is generated.
assert dfa.states_topological() == ["A", "B", "C", "D", "E", "F"]
def test_dfa_topological_ordering_cycle():
""" Create a simple DFA with a reachable and accepting cycle
and ensure that a ValueError is raised.
"""
# Create a cyclic DFA
alphabet = {"0", "1"}
states = {"A", "B", "C", "D", "Sink"}
accepting_states = {"D"}
start_state = "A"
transitions = {}
transitions[("A","0")] = "B"
transitions[("A","1")] = "C"
transitions[("B","0")] = "D"
transitions[("B","1")] = "Sink"
transitions[("C","0")] = "B"
transitions[("C","1")] = "Sink"
transitions[("D","0")] = "C"
transitions[("D","1")] = "Sink"
transitions[("Sink", "0")] = "Sink"
transitions[("Sink", "1")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Ensures that a ValueError is rasied as a cyclical DFA does not
# have a well defined topological odering.
with pytest.raises(DfaCycleError):
dfa.states_topological()
def test_dfa_language_size():
""" Creates a Dfa that accepts only words of length
7 and ensures that language_size returns the
correct result.
"""
dfa = Dfa.exact_length_dfa({"0","1"}, 7)
assert dfa.language_size() == 2**7
def test_dfa_language_size_abstract():
""" Creates an abstract specification that is
the union of two exact length Dfas and ensures
that language_size returns the correct result.
"""
dfa_1 = Dfa.exact_length_dfa({"0","1"}, 5)
dfa_2 = Dfa.exact_length_dfa({"0","1"}, 7)
abstract_dfa = dfa_1 | dfa_2
assert abstract_dfa.language_size() == (2**5 + 2**7)
def test_dfa_language_size_param():
""" Creates a Dfa that accepts only words of length
7 and ensures that language_size returns the
correct result.
"""
dfa = Dfa.max_length_dfa({"0","1"}, 7)
assert dfa.language_size(min_length=5, max_length=7) == 2**5 + 2**6 + 2**7
def test_dfa_sample():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then verify that the sampling is over the correct
words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Sink"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple(), tuple("A"), ("A", "A"), tuple("B")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample()
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .24
assert word_prob < .26
def test_dfa_sample_abstract():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then intersect it with a Dfa that accepts only words
of length 1. Then verify that the sampling is over the
correct words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Sink"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
main_dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
length_dfa = Dfa.exact_length_dfa(alphabet, 1)
dfa = main_dfa & length_dfa
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple("A"), tuple("B")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample()
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .49
assert word_prob < .51
def test_dfa_sample_param():
""" Create a simple Dfa that when uniformly sampled
should generate the following words with relatively
uniform probabilities: [[], ["A"], ["A", "A"], ["B"]].
Then intersect it with a Dfa that accepts only words
of length 1. Then verify that the sampling is over the
correct words and reasonably accurate.
"""
# Create test Dfa
alphabet = {"A", "B"}
states = {"Start", "Top", "Bottom1", "Bottom2", "Bottom3", "Bottom4", "Sink"}
accepting_states = {"Start", "Top", "Bottom1", "Bottom2", "Bottom3", "Bottom4"}
start_state = "Start"
transitions = dict()
transitions[("Start", "A")] = "Bottom1"
transitions[("Start", "B")] = "Top"
transitions[("Top", "A")] = "Sink"
transitions[("Top", "B")] = "Sink"
transitions[("Bottom1", "A")] = "Bottom2"
transitions[("Bottom1", "B")] = "Sink"
transitions[("Bottom2", "A")] = "Bottom3"
transitions[("Bottom2", "B")] = "Sink"
transitions[("Bottom3", "A")] = "Bottom4"
transitions[("Bottom3", "B")] = "Sink"
transitions[("Bottom4", "A")] = "Sink"
transitions[("Bottom4", "B")] = "Sink"
transitions[("Sink", "A")] = "Sink"
transitions[("Sink", "B")] = "Sink"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Sample 100,000 words and keep track of
# how many of each are sampled.
dfa_language = [tuple("A"), tuple("B"), ("A", "A"), ("A", "A", "A")]
sample_counts = dict()
for word in dfa_language:
sample_counts[word] = 0
for _ in range(100000):
# Sample a word from our Dfa's language
sampled_word = dfa.sample(min_length=1, max_length=3)
# Ensure we didn't sample a word not in our language
assert sampled_word in dfa_language
# Increment the count for the word we sampled
sample_counts[tuple(sampled_word)] += 1
# Assert the sampled ratios are relatively correct
for word in dfa_language:
word_prob = sample_counts[word]/100000
assert word_prob > .24
assert word_prob < .26
def test_dfa_minimize_no_reduction():
""" Creates a simple Dfa that is already minimal,
minimizes it, and ensures that select words are
correctly accepted or rejected.
"""
# Create a DFA that only accepts strings that contain 3 "1"
# symbols in a row with no "2" inputs after them.
alphabet = {"0", "1", "2"}
states = {"0_Seen", "1_Seen", "2_Seen", "3_Seen"}
accepting_states = {"3_Seen"}
start_state = "0_Seen"
# Initialize transitions map so that all transitions go
# to "0_Seen"
transitions = {}
for state in states:
for symbol in alphabet:
transitions[(state, symbol)] = "0_Seen"
# Complete transitions map.
transitions[("0_Seen", "1")] = "1_Seen"
transitions[("1_Seen", "1")] = "2_Seen"
transitions[("2_Seen", "1")] = "3_Seen"
transitions[("3_Seen", "0")] = "3_Seen"
transitions[("3_Seen", "1")] = "3_Seen"
# Create the DFA and minimizes it.
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
minimized_dfa = dfa.minimize()
# Assert the minimized Dfa's size is the same as the
# original and check select strings against the minimized Dfa
assert len(dfa.states) == len(minimized_dfa.states)
assert not dfa.accepts([])
assert not dfa.accepts(list("0"))
assert not dfa.accepts(list("1"))
assert not dfa.accepts(list("2"))
assert dfa.accepts(list("111"))
assert not dfa.accepts(list("1112"))
assert not dfa.accepts(list("000"))
assert not dfa.accepts(list("222"))
assert dfa.accepts(list("01110"))
assert not dfa.accepts(list("00000011000020011100020001"))
assert dfa.accepts(list("0000001100002001110002000111"))
def test_dfa_minimize_reduction():
""" Creates a Dfa that has many redundancies,
minimizes it, and ensures that select words are
correctly accepted or rejected.
"""
# Create a very redundant DFA that accepts if and only if the
# string contains a "1" symbol before any 0 symbols
alphabet = {"0", "1", "2"}
s_states = {"Start_A", "Start_B", "Start_C"}
a_states = {"Accept_A", "Accept_B", "Accept_C"}
r_states = {"Reject_A", "Reject_B", "Reject_C"}
dr_states = {"DeadReject_A", "DeadReject_B"}
da_states = {"DeadAccept_A", "DeadAccept_B"}
states = s_states | a_states | r_states | dr_states |da_states
accepting_states = a_states | da_states
start_state = "Start_A"
# Create transitions map
transitions = {}
# S state transitions
for s_state in s_states:
transitions[(s_state, "0")] = "Reject_A"
transitions[(s_state, "1")] = "Accept_A"
transitions[("Start_A", "2")] = "Start_B"
transitions[("Start_B", "2")] = "Start_C"
transitions[("Start_C", "2")] = "Start_C"
# A state transitions
for symbol in alphabet:
transitions[("Accept_A", symbol)] = "Accept_B"
for symbol in alphabet:
transitions[("Accept_B", symbol)] = "Accept_C"
for symbol in alphabet:
transitions[("Accept_C", symbol)] = "Accept_C"
# R state transitions
for symbol in alphabet:
transitions[("Reject_A", symbol)] = "Reject_B"
for symbol in alphabet:
transitions[("Reject_B", symbol)] = "Reject_C"
for symbol in alphabet:
transitions[("Reject_C", symbol)] = "Reject_C"
# Dead state transitions
transitions[("DeadReject_A", "0")] = "Accept_A"
transitions[("DeadReject_A", "1")] = "Reject_A"
transitions[("DeadReject_A", "2")] = "Start_A"
transitions[("DeadReject_B", "0")] = "DeadReject_B"
transitions[("DeadReject_B", "1")] = "DeadReject_B"
transitions[("DeadReject_B", "2")] = "DeadReject_B"
transitions[("DeadAccept_A", "0")] = "Accept_A"
transitions[("DeadAccept_A", "1")] = "Reject_A"
transitions[("DeadAccept_A", "2")] = "Start_A"
transitions[("DeadAccept_B", "0")] = "DeadAccept_B"
transitions[("DeadAccept_B", "1")] = "DeadAccept_B"
transitions[("DeadAccept_B", "2")] = "DeadAccept_B"
# Create the DFA and minimizes it.
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
minimized_dfa = dfa.minimize()
# Assert the minimized Dfa's size appropriately minimized
# and check select strings against the two DFAs.
assert len(minimized_dfa.states) == 3
assert not minimized_dfa.accepts([]) and not dfa.accepts([])
assert not minimized_dfa.accepts(list("0")) and not dfa.accepts(list("0"))
assert minimized_dfa.accepts(list("1")) and dfa.accepts(list("1"))
assert not minimized_dfa.accepts(list("2")) and not dfa.accepts(list("2"))
assert not minimized_dfa.accepts(list("000")) and not dfa.accepts(list("000"))
assert minimized_dfa.accepts(list("111")) and dfa.accepts(list("111"))
assert not minimized_dfa.accepts(list("222")) and not dfa.accepts(list("222"))
assert not minimized_dfa.accepts(list("2220000110011000020100002000")) and not dfa.accepts(list("2220000110011000020100002000"))
assert minimized_dfa.accepts(list("222210000001100002001110002000111")) and dfa.accepts(list("222210000001100002001110002000111"))
def test_dfa_union():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit union have an equivalent
and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the union of dfa_1 and dfa_2. Then compute its explicit form.
abstract_union = dfa_1 | dfa_2
explicit_union = abstract_union.explicit()
assert isinstance(abstract_union, AbstractSpec)
assert isinstance(explicit_union, Dfa)
assert not abstract_union.accepts([]) and not explicit_union.accepts([])
assert not abstract_union.accepts(list("0")) and not explicit_union.accepts(list("0"))
assert abstract_union.accepts(list("1")) and explicit_union.accepts(list("1"))
assert abstract_union.accepts(list("2")) and explicit_union.accepts(list("2"))
assert not abstract_union.accepts(list("000")) and not explicit_union.accepts(list("000"))
assert abstract_union.accepts(list("111")) and explicit_union.accepts(list("111"))
assert abstract_union.accepts(list("222")) and explicit_union.accepts(list("222"))
assert abstract_union.accepts(list("010")) and explicit_union.accepts(list("010"))
assert abstract_union.accepts(list("020")) and explicit_union.accepts(list("020"))
assert abstract_union.accepts(list("12")) and explicit_union.accepts(list("12"))
def test_dfa_intersection():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit intersection
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the intersection of dfa_1 and dfa_2. Then compute its explicit form.
abstract_intersection = dfa_1 & dfa_2
explicit_intersection = abstract_intersection.explicit()
assert isinstance(abstract_intersection, AbstractSpec)
assert isinstance(explicit_intersection, Dfa)
assert not abstract_intersection.accepts([]) and not explicit_intersection.accepts([])
assert not abstract_intersection.accepts(list("0")) and not explicit_intersection.accepts(list("0"))
assert not abstract_intersection.accepts(list("1")) and not explicit_intersection.accepts(list("1"))
assert not abstract_intersection.accepts(list("2")) and not explicit_intersection.accepts(list("2"))
assert not abstract_intersection.accepts(list("000")) and not explicit_intersection.accepts(list("000"))
assert not abstract_intersection.accepts(list("111")) and not explicit_intersection.accepts(list("111"))
assert not abstract_intersection.accepts(list("222")) and not explicit_intersection.accepts(list("222"))
assert not abstract_intersection.accepts(list("010")) and not explicit_intersection.accepts(list("010"))
assert not abstract_intersection.accepts(list("020")) and not explicit_intersection.accepts(list("020"))
assert abstract_intersection.accepts(list("12")) and explicit_intersection.accepts(list("12"))
assert abstract_intersection.accepts(list("012210")) and explicit_intersection.accepts(list("012210"))
def test_dfa_negation():
""" Creates a DFA which accepts iff a string contains a "1"
symbol. Then ensure that its symbolic and explicit negation
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states = {"Reject", "Accept"}
accepting_states = {"Accept"}
start_state = "Reject"
transitions = {}
transitions[("Reject", "0")] = "Reject"
transitions[("Reject", "1")] = "Accept"
transitions[("Reject", "2")] = "Reject"
transitions[("Accept", "0")] = "Accept"
transitions[("Accept", "1")] = "Accept"
transitions[("Accept", "2")] = "Accept"
dfa = Dfa(alphabet, states, accepting_states, start_state, transitions)
# Create abstract spec for the negation of dfa and compute its explicit form.
abstract_negation = ~dfa
explicit_negation = abstract_negation.explicit()
assert isinstance(abstract_negation, AbstractSpec)
assert isinstance(explicit_negation, Dfa)
assert abstract_negation.accepts([]) and explicit_negation.accepts([])
assert abstract_negation.accepts(list("0")) and explicit_negation.accepts(list("0"))
assert not abstract_negation.accepts(list("1")) and not explicit_negation.accepts(list("1"))
assert abstract_negation.accepts(list("2")) and explicit_negation.accepts(list("2"))
assert abstract_negation.accepts(list("000")) and explicit_negation.accepts(list("000"))
assert not abstract_negation.accepts(list("111")) and not explicit_negation.accepts(list("111"))
assert abstract_negation.accepts(list("222")) and explicit_negation.accepts(list("222"))
assert not abstract_negation.accepts(list("010")) and not explicit_negation.accepts(list("010"))
assert abstract_negation.accepts(list("020")) and explicit_negation.accepts(list("020"))
assert not abstract_negation.accepts(list("12")) and not explicit_negation.accepts(list("12"))
assert not abstract_negation.accepts(list("012210")) and not explicit_negation.accepts(list("012210"))
def test_dfa_difference():
""" Creates two DFAs, one which accepts iff
a string contains a "1" symbol and another which
accepts iff a string contains a "2" symbol. Then ensure
that their symbolic and explicit difference
have an equivalent and correct language
"""
alphabet = {"0","1","2"}
# Create DFA that accepts once it encounters a "1"
states_1 = {"Reject", "Accept"}
accepting_states_1 = {"Accept"}
start_state_1 = "Reject"
transitions_1 = {}
transitions_1[("Reject", "0")] = "Reject"
transitions_1[("Reject", "1")] = "Accept"
transitions_1[("Reject", "2")] = "Reject"
transitions_1[("Accept", "0")] = "Accept"
transitions_1[("Accept", "1")] = "Accept"
transitions_1[("Accept", "2")] = "Accept"
dfa_1 = Dfa(alphabet, states_1, accepting_states_1, start_state_1, transitions_1)
# Create DFA that accepts once it encounters a "2"
states_2 = {"Reject", "Accept"}
accepting_states_2 = {"Accept"}
start_state_2 = "Reject"
transitions_2 = {}
transitions_2[("Reject", "0")] = "Reject"
transitions_2[("Reject", "1")] = "Reject"
transitions_2[("Reject", "2")] = "Accept"
transitions_2[("Accept", "0")] = "Accept"
transitions_2[("Accept", "1")] = "Accept"
transitions_2[("Accept", "2")] = "Accept"
dfa_2 = Dfa(alphabet, states_2, accepting_states_2, start_state_2, transitions_2)
# Create abstract spec for the difference of dfa_1 and dfa_2. Then compute its explicit form.
abstract_difference = dfa_1 - dfa_2
explicit_difference = abstract_difference.explicit()
assert isinstance(abstract_difference, AbstractSpec)
assert isinstance(explicit_difference, Dfa)
assert not abstract_difference.accepts([]) and not explicit_difference.accepts([])
assert not abstract_difference.accepts(list("0")) and not explicit_difference.accepts(list("0"))
assert abstract_difference.accepts(list("1")) and explicit_difference.accepts(list("1"))
assert not abstract_difference.accepts(list("2")) and not explicit_difference.accepts(list("2"))
assert not abstract_difference.accepts(list("000")) and not explicit_difference.accepts(list("000"))
assert abstract_difference.accepts(list("111")) and explicit_difference.accepts(list("111"))
assert not abstract_difference.accepts(list("222")) and not explicit_difference.accepts(list("222"))
assert abstract_difference.accepts(list("010")) and explicit_difference.accepts(list("010"))
assert not abstract_difference.accepts(list("020")) and not explicit_difference.accepts(list("020"))
assert not abstract_difference.accepts(list("12")) and not explicit_difference.accepts(list("12"))
assert not abstract_difference.accepts(list("012210")) and not explicit_difference.accepts(list("012210"))
def test_dfa_exact_length_constructor():
""" Tests that the Dfa returned by the exact_length_dfa
constructor works as expected.
"""
dfa = Dfa.exact_length_dfa({"0","1"}, 7)
assert not dfa.accepts("")
assert not dfa.accepts("0")
assert not dfa.accepts("1")
assert not dfa.accepts("01")
assert not dfa.accepts("011")
assert not dfa.accepts("0110")
assert not dfa.accepts("01101")
assert not dfa.accepts("011010")
assert dfa.accepts("0110100")
assert not dfa.accepts("01101000")
assert not dfa.accepts("000000001111000000001100001000111100110110110")
def test_dfa_min_length_constructor():
""" Tests that the Dfa returned by the min_length_dfa
constructor works as expected.
"""
dfa = Dfa.min_length_dfa({"0", "1"}, 7)
assert not dfa.accepts("")
assert not dfa.accepts("0")
assert not dfa.accepts("1")
assert not dfa.accepts("01")
assert not dfa.accepts("011")
assert not dfa.accepts("0110")
assert not dfa.accepts("01101")
assert not dfa.accepts("011010")
assert dfa.accepts("0110100")
assert dfa.accepts("01101000")
assert dfa.accepts("000000001111000000001100001000111100110110110")
def test_dfa_max_length_constructor():
""" Tests that the Dfa returned by the max_length_dfa
constructor works as expected.
"""
dfa = Dfa.max_length_dfa({"0", "1"}, 7)
assert dfa.accepts("")
assert dfa.accepts("0")
assert dfa.accepts("1")
assert dfa.accepts("01")
assert dfa.accepts("011")
assert dfa.accepts("0110")
assert dfa.accepts("01101")
assert dfa.accepts("011010")
assert dfa.accepts("0110100")
assert not dfa.accepts("01101000")
assert not dfa.accepts("000000001111000000001100001000111100110110110")
###################################################################################################
# Randomized Tests
###################################################################################################
# Randomized tests default parameters
RANDOM_TEST_NUM_ITERS = 1000 # Default to 1000, but can set lower when writing new tests.
RANDOM_DFA_MIN_STATES = 1
RANDOM_DFA_MAX_STATES = 10
RANDOM_DFA_MIN_SYMBOLS = 1
RANDOM_DFA_MAX_SYMBOLS = 3
@pytest.mark.slow
def test_dfa_minimize_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then minimizes the dfa and ensures that the minimizes version and
the complete version either accept or reject all strings of length
less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
# Generate random Dfa and calculate its minimized form.
orig_dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
min_dfa = orig_dfa.minimize()
# Check that construction is valid
assert isinstance(orig_dfa, Dfa)
assert isinstance(min_dfa, Dfa)
assert len(min_dfa.states) <= len(orig_dfa.states)
# Iterate through every possible word that has length <= the number
# of states in the original DFAs to ensure that the specs are equivalent.
for word_length in range(len(orig_dfa.states)+1):
for word in itertools.product(orig_dfa.alphabet, repeat=word_length):
assert orig_dfa.accepts(word) == min_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_union_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates 2 random DFAs with
the number of states between the square root of RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
(which puts the product construction size between these bounds) and the number of
symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS. Then takes the
logical and explicit union of the 2 DFAs and ensures that they are consistent
on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
min_states_sqrt = int(math.sqrt(RANDOM_DFA_MIN_STATES))
max_states_sqrt = int(math.sqrt(RANDOM_DFA_MAX_STATES))
# Generate random Dfa and calculate its minimized form.
dfa_1 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
dfa_2 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS, alphabet=dfa_1.alphabet)
abstract_dfa = dfa_1 | dfa_2
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new Dfa to ensure they are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_intersection_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates 2 random DFAs with
the number of states between the square root of RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
(which puts the product construction size between these bounds) and the number of
symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS. Then takes the
logical and explicit intersection of the 2 DFAs and ensures that they are consistent
on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
min_states_sqrt = int(math.sqrt(RANDOM_DFA_MIN_STATES))
max_states_sqrt = int(math.sqrt(RANDOM_DFA_MAX_STATES))
# Generate random Dfa and calculate its minimized form.
dfa_1 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
dfa_2 = generate_random_dfa(min_states_sqrt, max_states_sqrt, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS, alphabet=dfa_1.alphabet)
abstract_dfa = dfa_1 & dfa_2
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new Dfa to ensure they are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_negation_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then takes the logical and explicit negation of that DFA and ensure they are
consistent on all strings of length less than or equal to the number of states.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
# Generate random Dfa and calculate its minimized form.
dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
abstract_dfa = ~dfa
explicit_dfa = abstract_dfa.explicit()
# Check that construction is valid
assert isinstance(abstract_dfa, AbstractSpec)
assert isinstance(explicit_dfa, Dfa)
# Iterate through every possible word that has length <= the number
# of states in the new DFA to ensure that the specs are equivalent.
for word_length in range(len(explicit_dfa.states)+1):
for word in itertools.product(explicit_dfa.alphabet, repeat=word_length):
assert abstract_dfa.accepts(word) == explicit_dfa.accepts(word)
@pytest.mark.slow
def test_dfa_language_size_random():
""" For RANDOM_TEST_NUM_ITERS iterations, generates a random DFA with
the number of states between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES
and the number of symbols between RANDOM_DFA_MIN_SYMBOLS and RANDOM_DFA_MAX_SYMBOLS.
Then intersects this with a Dfa that accepts all words with length less than max_length,
a random number between RANDOM_DFA_MIN_STATES and RANDOM_DFA_MAX_STATES. Enumerates
all words in the alphabet of length at most max_length ensures the count is correct.
"""
for _ in range(RANDOM_TEST_NUM_ITERS):
max_length = random.randint(RANDOM_DFA_MIN_STATES,RANDOM_DFA_MAX_STATES)
base_dfa = generate_random_dfa(RANDOM_DFA_MIN_STATES, RANDOM_DFA_MAX_STATES, RANDOM_DFA_MIN_SYMBOLS, RANDOM_DFA_MAX_SYMBOLS)
length_limit_dfa = Dfa.max_length_dfa(base_dfa.alphabet, max_length)
dfa = base_dfa & length_limit_dfa
explicit_dfa = dfa.explicit()
enumerated_count = 0
for word_length in range(max_length+1):
for word in itertools.product(base_dfa.alphabet, repeat=word_length):
if explicit_dfa.accepts(word):
enumerated_count += 1
assert explicit_dfa.language_size() == enumerated_count
###################################################################################################
# Helper Functions
###################################################################################################
def generate_random_dfa(min_states, max_states, min_symbols, max_symbols, alphabet = None):
""" Generates a random Dfa object.
:param min_states: The minimum number of states this Dfa can have.
:param max_states: The maximum number of states this Dfa can have.
:param min_symbols: The minimum number of symbols this Dfa can have.
:param max_symbols: The maximum number of symbols this Dfa can have.
"""
# Pick number of states and symbols
num_states = random.randint(min_states, max_states)
if alphabet is None:
num_symbols = random.randint(min_symbols, max_symbols)
alphabet = set(map(str, range(num_symbols)))
else:
num_symbols = len(alphabet)
states = set()
for state_num in range(1, num_states+1):
states.add("State_" + str(state_num))
# Picks a random number of accepting states
shuffled_state_list = sorted(list(states))
random.shuffle(shuffled_state_list)
accepting_states = set(shuffled_state_list[0:random.randint(0,num_states)])
# Picks a random start state
start_state = "State_" + str(random.randint(1, num_states))
# Randomly generates transitions
transitions = {}
for symbol in alphabet:
for state in states:
transitions[(state, symbol)] = "State_" + str(random.randint(1, num_states))
# Create and return Dfa
return Dfa(alphabet, states, accepting_states, start_state, transitions)
| 2.828125 | 3 |
reporting/sortshootout.py | pinobatch/numism | 11 | 12764635 | #!/usr/bin/env python3
import os
import sys
import html5lib
from xml.etree import ElementTree as ET
import subprocess
from html import escape as H
"""
If you've found this, then you should help me report a bug in IDLE,
the official Python code editor. In IDLE 3.8.5 on Python 3.8.5 in
Xubuntu 20.04 LTS, if you open a blank file and edit it, you might
not be able to save it because IDLE couldn't tell at load time
whether it originally used UNIX newlines or CP/M newlines.
touch something.py && idle something.py
Trying to File > Save or File > Save As produces an exception on stderr:
Exception in Tkinter callback
Traceback (most recent call last):
[snip]
File "/usr/lib/python3.8/idlelib/iomenu.py", line 232, in writefile
text = self.fixnewlines()
File "/usr/lib/python3.8/idlelib/iomenu.py", line 252, in fixnewlines
text = text.replace("\n", self.eol_convention)
TypeError: replace() argument 2 must be str, not None
"""
stylesheet = """
/* Original stylesheet by Daid */
table { border-collapse: collapse }
td, th { border: #333 solid 1px; text-align: center; line-height: 1.5}
.PASS { background-color: #6e2 }
.FAIL { background-color: #e44 }
.UNKNOWN { background-color: #fd6 }
td { font-size:80% }
th { background:#eee }
th:first-child { text-align:right; padding-right:4px }
body { font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif }
/* additions by Pino */
td>img { vertical-align: text-bottom }
"""
htmlns = {
'html': 'http://www.w3.org/1999/xhtml',
}
ET.register_namespace('', "http://www.w3.org/1999/xhtml")
def eldump(el):
print(ET.tostring(el, encoding="unicode"))
def iterdump(rows):
print("\n".join(repr(row) for row in rows))
def destructive_iter(ls):
"""Destructive iterator over a mutable sequence.
Return each element of ls before setting it to None (and releasing it
to the garbage collector)."""
for i in range(len(ls)):
yield ls[i]
ls[i] = None
def xdg_open(filename):
if os.name == 'nt':
args = ["start", "", filename]
else:
args = ["xdg-open", filename]
subprocess.run(args)
def load_shootout(filename):
"""
Load an HTML file from Daid's Game Boy emulator shootout.
Return a 3-tuple (emunames, testnames, allresults) where
- emunames is [(name, num_tests_passed), ...]
- testnames is [testname, ...]
- allresults is {emuname: {testname: (True if passed, img[src]), ...}, ...}
"""
with open(filename, "r", encoding="utf-8") as infp:
doc = html5lib.parse(infp)
# Find the table in this document with the most rows, where
# a "row" is a tr child of a thead/tbody child of a table
tables = (
el.findall("./*/html:tr", htmlns)
for el in doc.findall(".//html:table", htmlns)
)
table = max(tables, key=len)
rowit = destructive_iter(table)
doc = tables = table = None # drop variables
emunames = [th.text.split("(", 1) for th in next(rowit)][1:]
emunames = [(l.rstrip(), int(r.split('/', 1)[0])) for l, r in emunames]
allresults = {n: {} for n, _ in emunames}
testnames = []
for row in rowit:
row = list(row)
# To reduce excess width of the name column on the sub-1080p
# displays in smaller laptops, the name in the table includes
# a zero-width space after each slash. It shifts the
# limiting factor to channel_3_wave_ram_locked_write.gb
testname = row[0].text.replace("\u200b", "")
testnames.append(testname)
for (emuname, _), result in zip(emunames, row[1:]):
tpass, img = result.text, result.find("./html:img", htmlns)
ispass = tpass.upper() == 'PASS'
imsrc = img.get("src") if img is not None else None
allresults[emuname][testname] = ispass, imsrc
return emunames, testnames, allresults
def input_emu(prompt, emunames):
xprompt = "\n".join(
"%4d: %s (%d)" % (i + 1, n, c) for i, (n, c) in enumerate(emunames)
)
xprompt = "\n".join((
prompt, xprompt, "Enter a number from 1 to %d: " % len(emunames)
))
while True:
num = input(xprompt).strip()
if num == '': return None
try:
num = int(num)
except ValueError:
print("%s: not a whole number" % num)
continue
if not 1 <= num <= len(emunames):
print("%s: not in range 1 to %d" % (num, len(emunames)))
continue
return num - 1
def shootoutkey(row, col1=None, coldiff=None):
"""Calculate key for sorting a shootout.
row - a tuple (testname, results) where results is [(passing, ...), ...]
and passing is a truthy or falsy value.
col1 and col2 - indices into results
Return a tuple (col12same, col1fail, failcount) where
- col1fail is 0 if results[col1] passes else 1
- col12same is 1 if passing for results[col1] and results[col2]
have same truthiness
"""
testname, results = row
fails = [0 if x[0] else 1 for x in results]
col1fail = 0 if col1 is None else fails[col1]
col2fail = 0 if coldiff is None else fails[coldiff]
return col2fail == col1fail, col1fail, sum(fails)
def format_row(row, emunames):
"""
row - a tuple (testname, [(passing, imgsrc), ...])
"""
testname, results = row
out = ["<tr>\n <th>", H(testname.replace("/", "/\u200b")), "</th>\n"]
for (emuname, _), (passing, imgsrc) in zip(emunames, results):
classname = "PASS" if passing else "FAIL"
out.append(' <td class="%s">%s<br><img src="%s" title="%s %s"></td>\n'
% (classname, classname, imgsrc, emuname, classname))
out.append("</tr>\n")
return "".join(out)
def main(argv=None):
mainshootout = load_shootout(".cache/Daid-shootout.html")
emunames, testnames, allresults = mainshootout
## mgba_extra = load_shootout(".cache/Daid-shootout-mgba.html")
## emunames = [x for x in emunames if x[0] != 'mGBA']
## emunames.extend(mgba_extra[0])
## allresults.update(mgba_extra[2])
## del mgba_extra
print("Sorting tests based on decreasing pass rate")
print("Optional: Choose emulators that one or two pass")
col1emu = input_emu("Choose an emulator for column 1", emunames)
col2emu = (input_emu("Choose an emulator for column 2", emunames)
if col1emu is not None
else None)
new_emunames = []
if col1emu is not None: new_emunames.append(emunames[col1emu])
if col2emu is not None: new_emunames.append(emunames[col2emu])
new_emunames.extend(x for i, x in enumerate(emunames)
if i != col1emu and i != col2emu)
emunames = None
rows = [
(testname, [allresults[e[0]][testname] for e in new_emunames])
for testname in testnames
]
col1ok = 0 if col1emu is not None else None
col2ok = 1 if col2emu is not None else None
rows.sort(key=lambda row: shootoutkey(row, col1ok, col2ok))
# rows is of the form
# [(testname, [(passing, image), ...]), ...]
# Now make our own table based on this
title, subtitle = "Game Boy emulator shootout", ""
if col2ok is not None:
# Calculate subtitle for pass/fail differences
emu1, emu2 = new_emunames[0][0], new_emunames[1][0]
title = "Shootout: %s vs. %s" % (emu1, emu2)
pass1not2 = pass2not1 = 0
for row in rows:
pass1, pass2 = row[1][0][0], row[1][1][0]
if pass1 and not pass2: pass1not2 += 1
if pass2 and not pass1: pass2not1 += 1
pass1not2_pl = "tests" if pass1not2 != 1 else "test"
pass2not1_pl = "tests" if pass2not1 != 1 else "test"
subtitle = ("%s passes %d %s that %s fails, and %s passes %d %s that %s fails."
% (emu1, pass1not2, pass1not2_pl, emu2,
emu2, pass2not1, pass2not1_pl, emu1))
elif col1ok is not None:
title = "Shootout: %s vs. other emulators" % (new_emunames[0][0])
tests_pl = "tests" if new_emunames[0][1] != 1 else "test"
subtitle = ("%s passes %d %s."
% (new_emunames[0][0], new_emunames[0][1], tests_pl))
values1 = sum(1 for v in allresults[new_emunames[0][0]].values() if v[0])
print(subtitle)
out = [
"""<!DOCTYPE HTML><html><head><meta charset="utf-8"><title>""",
H(title),
"""</title><style type="text/css">""",
stylesheet,
"""</style></head><body><h1>""",
H(title),
"</h1>\n<p>", H(subtitle), """
Based on test ROM results by Daid.
</p><table id="results"><thead>\n<tr><th>Name of test</th>"""
]
out.extend("<th>%s (%d)</th>" % row for row in new_emunames)
out.append("</tr>\n</thead><tbody>\n")
out.extend(format_row(row, new_emunames) for row in rows)
out.append("</tbody></table></body></html>")
outfilename = "sortshootout.html"
with open(outfilename, "w", encoding="utf-8") as outfp:
outfp.writelines(out)
xdg_open(outfilename)
if __name__=='__main__':
if 'idlelib' in sys.modules:
main(["./htmltotsv.py", ".cache/names1920s.html", "-"])
else:
main()
| 2.453125 | 2 |
htp/api/scripts/candles.py | kirkjules/machine-learned-timeseries | 1 | 12764636 | <filename>htp/api/scripts/candles.py
import click
from celery import chord
from copy import deepcopy
from uuid import uuid4 # , UUID
from htp.toolbox import dates
from htp.aux import tasks
from datetime import datetime
from htp.aux.database import db_session
from htp.aux.models import GetTickerTask, SubTickerTask, Candles, Indicators,\
IndicatorTask
# imported celery app for chord to recognise backend.
# unsure if this is required in production code, was working fine earlier.
# from htp import celery
# print(celery.conf.result_backend)
def arg_prep(queryParameters):
qPcopy = deepcopy(queryParameters)
date_gen = dates.Select(
from_=qPcopy["from"].strftime("%Y-%m-%d %H:%M:%S"),
to=qPcopy["to"].strftime("%Y-%m-%d %H:%M:%S"),
local_tz="America/New_York").by_month()
date_list = []
for i in date_gen:
qPcopy["from"] = i["from"]
qPcopy["to"] = i["to"]
date_list.append(deepcopy(qPcopy))
return date_list
def get_data(ticker, price, granularity, from_, to, smooth):
"""Function to initiate ticker data download and entry logging in a
database.
Parameters
----------
ticker : str
The target instrument to be queried using the preset function for a
given endpoint.
price : str
The candle type for which ticker data should be sourced.
granularity : str
The time interval the define the period which defines the timeseries
data.
from_ : datetime.datetime
The startpoint from which data should be downloaded.
to : datetime.datetime
The endpoint to which data should be downloaded.
smooth : bool
A flag that the api endpoint accepts to ensure the close and open
values for adjacent candles match.
Returns
-------
None
Notes
-----
- If the data download is successfull the timeseries will be saved as in
the 'candles' table in the database, with a foreign key on each row
relating the given entry the initial get ticker query to defines the ticker
type, price, granularity, and batch from and to date.
- The database logging functionality is designed to recylce pre-existing
rows that match the same ticker, price and granularity criteris, updating
the from_ and to values accordingly.
"""
for val in price:
for interval in granularity:
args = {"price": val, "granularity": interval, "from": from_,
"to": to, "smooth": smooth}
entry = db_session.query(GetTickerTask).filter(
GetTickerTask.ticker == ticker, GetTickerTask.price == val,
GetTickerTask.granularity == interval).first()
if entry is None:
batch_id = uuid4()
db_session.add(GetTickerTask(
id=batch_id, ticker=ticker, price=val, _from=from_, to=to,
granularity=interval))
else:
batch_id = entry.id
setattr(entry, "_from", from_)
setattr(entry, "to", to)
for table in [SubTickerTask, Candles, IndicatorTask,
Indicators]:
db_session.query(table).filter(table.batch_id == entry.id)\
.delete(synchronize_session=False)
header = []
param_set = arg_prep(args)
for params in param_set:
g = tasks.session_get_data.signature(
(ticker,), {"params": params, "timeout": 30})
g.freeze()
# print(g.id)
header.append(g)
db_session.add(SubTickerTask( # id=UUID(g.id),
batch_id=batch_id,
_from=datetime.strptime(
params["from"], '%Y-%m-%dT%H:%M:%S.%f000Z'),
to=datetime.strptime(
params["from"], '%Y-%m-%dT%H:%M:%S.%f000Z')))
callback = tasks.merge_data.s(
ticker, price, granularity, task_id=batch_id)
chord(header)(callback)
db_session.commit()
@click.command()
@click.argument("ticker", type=click.STRING)
@click.option("--price", default="M", type=click.STRING)
@click.option("--granularity", default="M15", type=click.STRING)
@click.option(
"--from", "-f", "from_", default=None, type=click.DateTime(formats=None))
@click.option("--to", default=None, type=click.DateTime(formats=None))
@click.option("--smooth", default=False, type=click.BOOL)
def cli_get_data(ticker, price, granularity, from_, to, smooth):
return get_data(ticker, price, granularity, from_, to, smooth).get()
if __name__ == "__main__":
cli_get_data()
| 2.328125 | 2 |
bot/player_commands/rank.py | UP929312/CommunityBot | 1 | 12764637 | <filename>bot/player_commands/rank.py
import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import Option # type: ignore
from typing import Optional
from database_manager import get_specific_networth_data, get_all_networth_data, get_sum_networth_data
from emojis import PAGE_ICON_EMOJIS
from parse_profile import input_to_uuid
from utils import error, guild_ids
def get_percent_categories(uuid: str, user_data: dict) -> dict[str, float]:
"""
Returns the percentage that the uuid's group is less than, 100% = 0 money, 0.03% = Extremely rich
"""
data = get_all_networth_data()
percent_categories = {}
for i, category in enumerate(["purse", "banking", "inventory", "accessories", "ender chest", "armor", "vault", "wardrobe", "storage", "pets"]):
# Get all purses from data, but only if they're less than the user's purse, then banking
if user_data[i] < 1:
continue
filtered = [x[i] for x in data if x[i] < user_data[i]]
percent_categories[category] = ((len(filtered)+1)/len(data))*100
return percent_categories
def overall_percent(uuid: str, user_data: dict) -> float:
"""
Returns the percentage that the total networth is less than
"""
user_total = sum(user_data)
data = get_sum_networth_data()
filtered = [x[0] for x in data if x[0] < user_total]
return ((len(filtered)+1)/len(data))*100
def fix(number_tuple: tuple) -> float:
number = round(100-number_tuple[1], 3)
return max(number, 0.01)
'''
60: error: Argument 2 to "get_percent_categories" has incompatible type "Tuple[Any, ...]"; expected "Dict[Any, Any]"
66: error: Argument 2 to "overall_percent" has incompatible type "Tuple[Any, ...]"; expected "Dict[Any, Any]"
'''
class rank_cog(commands.Cog):
def __init__(self, bot) -> None:
self.client = bot
@commands.command(name="rank")
async def rank_command(self, ctx, provided_username: Optional[str] = None) -> None:
await self.rank(ctx, provided_username, is_response=False)
@commands.slash_command(name="rank", description="See how people's networth stacks up against everyone elses", guild_ids=guild_ids)
async def rank_slash(self, ctx, username: Option(str, "username:", required=False)):
if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.rank(ctx, username, is_response=True)
#==========================================================================================================================
async def rank(self, ctx, provided_username: Optional[str] = None, is_response: bool = False) -> None:
player_data = await input_to_uuid(ctx, provided_username, is_response=is_response)
if player_data is None:
return None
username, uuid = player_data
user_data = get_specific_networth_data(uuid)
if len(user_data) == 0:
return await error(ctx, "Error, not enough data!", "We don't know how much your profile is worth right now, please use the networth command first!", is_response=is_response)
categories = get_percent_categories(uuid, user_data[0])
sorted_data = sorted(categories.items(), key=lambda x: x[1], reverse=True)
if len(sorted_data) < 4:
return await error(ctx, "Error, not enough data!", "This person's API settings are to restrictive, or they have lots of empty containers.", is_response=is_response)
total_networth_percentage = None, overall_percent(uuid, user_data[0])
string = [f"{PAGE_ICON_EMOJIS['overall']} Their overall networth is in the highest {fix(total_networth_percentage)}% of players.",
f"",
f"{PAGE_ICON_EMOJIS[sorted_data[0][0]]} For {sorted_data[0][0]}, they're in the top {fix(sorted_data[0])}% of players.",
f"{PAGE_ICON_EMOJIS[sorted_data[1][0]]} For {sorted_data[1][0]}, they're in the top {fix(sorted_data[1])}% of players.",
f"{PAGE_ICON_EMOJIS[sorted_data[2][0]]} For {sorted_data[2][0]}, they're in the top {fix(sorted_data[2])}% of players.",
f"",
f"{PAGE_ICON_EMOJIS[sorted_data[-1][0]]} For {sorted_data[-1][0]}, they're in the bottom {fix(sorted_data[-1])}% of players."]
embed = discord.Embed(title=f"{username}'s stats:", description="\n".join(string), url=f"https://sky.shiiyu.moe/stats/{username}", colour=0x3498DB)
embed.set_thumbnail(url=f"https://mc-heads.net/head/{username}")
embed.set_footer(text=f"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.")
if is_response:
await ctx.respond(embed=embed)
else:
await ctx.send(embed=embed)
| 2.703125 | 3 |
services/scrapping_service.py | fedsp/site2data | 0 | 12764638 | from bs4 import BeautifulSoup
import requests
from urllib.parse import urlsplit, urlunsplit
from config import settings
from logo_finder_service import LogoFinderService
from phone_finder_service import PhoneFinderService
from time import sleep
from selenium import webdriver
#from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
class ScrappingService():
def __init__(self, website_url:str) -> None:
self.website_url = website_url
if self.website_url[-1] == '/':
self.website_url = self.website_url[:-1]
split = urlsplit(website_url)
if split.scheme == "":
raise Exception(f"Error: {website_url} url without scheme.")
self.website_url = f"{split.scheme}://{split.netloc}"
def check_if_website_exists(self) -> bool:
'''Simple and fast requests get just to check if the domain returns something.'''
response = requests.get(self.website_url)
if 200 <= response.code <= 299:
return True
else:
return False
def scrap(self) -> None:
'''Main scrapping orchestrator'''
self.scrap_using_simple_request()
if (self.logo == "NO LOGO FOUND") or (self.phones[0] == "NO PHONE FOUND"):
self.scrap_using_selenium()
def scrap_using_simple_request(self) -> None:
'''Initial try to obtain the data. Faster than Selenium, but does not work at dynamic generated js pages'''
response = requests.get(self.website_url)
home_soup_obj = BeautifulSoup(response.content, 'html.parser')
logo_seach_obj = LogoFinderService(
soup_obj=home_soup_obj,
website_url=self.website_url
)
self.logo = logo_seach_obj.find_logo()
contact_url = self.find_contact_url(home_soup_obj)
response = requests.get(contact_url)
contact_soup_obj = BeautifulSoup(response.content, 'html.parser')
phone_seach_obj = PhoneFinderService(
soup_obj=contact_soup_obj,
website_url=self.website_url
)
self.phones = phone_seach_obj.find_phones()
def scrap_using_selenium(self) -> None:
'''Slower than scrap_using_simple_request, but works for dynamic js sites'''
chrome_options = Options()
chrome_prefs = {}
chrome_options.experimental_options["prefs"] = chrome_prefs
chrome_prefs["profile.default_content_settings"] = {"images": 2}
chrome_options.add_argument(
f'user-agent={settings["ScrappingSettings"]["BrowserUserAgent"]}')
# chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
#driver = webdriver.Chrome(ChromeDriverManager().install(),options=chrome_options)
driver = webdriver.Chrome(options=chrome_options)
driver.get(self.website_url)
sleep(settings['SleepTimeToLoadJavascript'])
home_body = driver.find_element_by_tag_name("body")
home_body = home_body.get_attribute('innerHTML')
home_soup_obj = BeautifulSoup(home_body, 'html.parser')
logo_seach_obj = LogoFinderService(
soup_obj=home_soup_obj,
website_url=self.website_url
)
self.logo = logo_seach_obj.find_logo()
contact_url = self.find_contact_url(soup_obj=home_soup_obj)
driver.get(contact_url)
sleep(settings['SleepTimeToLoadJavascript'])
contact_body = driver.find_element_by_tag_name("body")
contact_body = contact_body.get_attribute('innerHTML')
contact_soup_obj = BeautifulSoup(contact_body, 'html.parser')
phone_seach_obj = PhoneFinderService(
soup_obj=contact_soup_obj,
website_url=self.website_url
)
self.phones = phone_seach_obj.find_phones()
driver.close()
driver.quit()
def find_contact_url(self,soup_obj:BeautifulSoup) -> str:
'''In case of the page provided is the homepage, it gets the website contacts page'''
all_links = soup_obj.find_all('a', href=True)
contact_text = settings['ScrappingSettings']['ContactIdentifier']
contact_links = [
item for item in all_links if contact_text in item.text.lower()]
if len(contact_links) < 1:
return self.website_url
contact_link = contact_links[0]
contact_url = contact_link.attrs['href']
if contact_url[0] == '/':
contact_url = self.website_url+contact_url
return contact_url
| 2.796875 | 3 |
Subtitles.py | ITNano/tech-home | 0 | 12764639 | <gh_stars>0
import requests
from html.parser import HTMLParser
import os
import zipfile
import codecs
import string
import random
# Parses the data from a Google search and finds the first search result link.
# The link will be stored in the instance variable link, if found.
# Use GoogleHTMLParser.feed(input) to insert the data to parse.
class GoogleHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.activated = False
self.done = False
self.link = None
def handle_starttag(self, tag, attrs):
if not self.activated and tag == "div" and self.has_attribute_value(attrs, "class", "g"):
self.activated = True
elif self.activated and not self.done and tag == "a":
self.link = self.get_attribute(attrs, "href")
self.done = True
def get_attribute(self, attrs, attr):
matches = [c[1] for c in attrs if c[0] == attr]
if len(matches) == 1:
return matches[0]
else:
return matches
def has_attribute_value(self, attrs, attr, value):
return len([c for c in attrs if c[0] == attr and c[1] == value]) > 0
# Retrieves the subtitle file of a series episode, either locally or from the
# internet (opensubtitles.com)
# param series: The exact name of the series
# param season: The season number
# param episode: The episode number
def get_series_subtitle(series, season, episode):
target_file = series.replace(' ', '') + "_s" + get_two_digit_num(season) + "_e" + get_two_digit_num(episode) + ".srt"
searchword = series + " s" + get_two_digit_num(season) + "e" + get_two_digit_num(episode)
return get_subtitle(searchword, target_file)
# Retrieves the subtitle file of a movie, either locally or from the internet
# param movie: Searchword for the movie, the more exact the better.
def get_movie_subtitle(movie):
target_file = movie + '.srt'
return get_subtitle(movie, target_file)
# Retrieves a subtitle that matches the search word. If a file is already found
# on the given target file, this file is used and immediately returned.
# param searchword: The search word for the movie/series
# param target_filename: The wanted output filename of the result.
def get_subtitle(searchword, target_filename):
target_dir = "subtitles"
target_file = get_current_folder() + '/' + target_dir + "/" + target_filename
if os.path.exists(target_file):
return target_file
if not os.path.exists(target_dir):
os.makedirs(target_dir)
subid = get_subtitle_id(searchword)
subtitle_path = get_subtitle_from_opensubtitles(subid, target_file)
return target_file
# Retrieve an opensubtitles subtitle id from a search word. This id can be used
# to specify a particular subtitle file.
# param searchword: The search word to find a specific subtitle file.
def get_subtitle_id(searchword):
base_url = "https://www.google.se/search?q="
search_url = base_url + searchword.replace(' ', '+') + "+subs+opensubtitles&ie=utf-8&oe=utf-8"
response = requests.get(search_url)
if not response.status_code == 200:
print("Whoops, could not search for the file : Check internet connection")
return None
parser = GoogleHTMLParser()
parser.feed(response.text)
if not parser.done or parser.link is None:
print("Invalid input from the search : Check your internet connection")
return None
return parser.link.split("/")[-2]
# Retrieves a subtitle file from opensubtitles and stores it in the given file
# location if possible.
# param subid: Opensubtitles subtitle ID for the wanted subtitle
# param filename: The path to store the subtitle to.
def get_subtitle_from_opensubtitles(subid, filename):
file_url = "http://dl.opensubtitles.org/en/download/vrf-108d030f/sub/"+subid
tmp_folder = get_random_string(16)
tmp_file = tmp_folder + "/subtitle_tmp.zip"
os.makedirs(tmp_folder)
fetch_req = requests.get(file_url, stream=True)
with open(tmp_file, 'wb') as tmp:
for chunk in fetch_req.iter_content(chunk_size=512):
if chunk:
tmp.write(chunk)
if not zipfile.is_zipfile(tmp_file):
print("The subtitle could not be loaded!")
return None
zip = zipfile.ZipFile(tmp_file)
members = zip.namelist()
subtitle_file = None
for member in members:
if member[member.rfind('.'):] == ".srt":
subtitle_file = member
if subtitle_file is None:
print("Invalid source file, could not find any subtitle file in it.")
return None
zip.extract(subtitle_file, path=tmp_folder)
zip.close()
os.rename(tmp_folder+"/"+subtitle_file, filename)
os.remove(tmp_file)
os.rmdir(tmp_folder)
return filename
# Retrieves of a random string of the given length
# param length: The wanted length of the generated string.
def get_random_string(length):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
# Retrieves the absolute path to the folder that the script is currently running in.
def get_current_folder():
return os.path.dirname(os.path.realpath(__file__))
# Transforms the number so that is has two digits (and becomes a string). For example,
# 4 becomes 04 and 15 becomes 15. Values >= 100 will raise a ValueError.
# param num: The number to transform
def get_two_digit_num(num):
if int(num) >= 0:
if int(num) < 10:
return "0"+str(num)
elif int(num) < 100:
return str(num)
else:
raise ValueError("Value must be less than 100")
else:
raise ValueError("Value must be greater than 0")
| 3.15625 | 3 |
citcall/citcall.py | saggafarsyad/citcall-python | 0 | 12764640 | <gh_stars>0
import base64
import json
import re
import requests
class Citcall:
"""
This is the Python Class client library for use Citcall's API.
To use this, you'll need a Citcall account and Your IP has been filtered in citcall system.
See citcall documentation for more information. This is currently a beta release.
"""
URL_CITCALL = "https://gateway.citcall.com"
VERSION = "/v3"
METHOD_SMS = "/sms"
METHOD_SMS_OTP = "/smsotp"
METHOD_SYNC_MISCALL = "/call"
METHOD_ASYNC_MISCALL = "/asynccall"
METHOD_VERIFY_MOTP = "/verify"
def __init__(self, userid, apikey):
"""
The constructor for Citcall class.
Parameters :
userid (str) : your userid
apikey (str) : your apikey
"""
self.userid = userid
self.apikey = apikey
def sync_miscall(self, param):
"""
Synchronous miscall
Parameters :
param (dict)
Returns :
(dict)
"""
if "msisdn" in param.keys() and "gateway" in param.keys():
msisdn = param["msisdn"]
gateway = param["gateway"]
if gateway > 5 or gateway < 0:
ret = {
"rc": "06",
"info": "invalid gateway"
}
return ret
else:
_continue = False
msisdn = self.clean_msisdn(msisdn)
msisdn = re.sub('/[^0-9]/', '', msisdn)
if msisdn[0:2] == "62":
if 10 < len(msisdn) < 15:
prefix = msisdn[0:5]
if len(msisdn) > 13:
if self.is_three(prefix):
_continue = True
else:
_continue = True
else:
if 9 < len(msisdn) < 18:
_continue = True
if _continue:
param_hit = {
"msisdn": msisdn,
"gateway": gateway,
}
valid_verify = True
if "valid_time" in param.keys():
valid_time = param['valid_time']
if isinstance(valid_time, int) and valid_time > 0:
if "limit_try" in param.keys():
limit_try = param["limit_try"]
if not isinstance(valid_time, int) and valid_time <= 0:
valid_verify = False
else:
param_hit["valid_time"] = valid_time
param_hit["limit_try"] = limit_try
else:
valid_verify = False
if valid_verify:
method = "sync_miscall"
ret = self.send_request(param_hit, method)
else:
ret = {
"rc": "06",
"info": "invalid verify data"
}
return ret
else:
ret = {
"rc": "06",
"info": "invalid verify data"
}
return ret
return json.loads(ret)
def async_miscall(self, param):
"""
Asynchronous miscall
Parameters :
param (dict)
Returns :
(dict)
"""
if "msisdn" in param.keys() and "gateway" in param.keys():
msisdn = param["msisdn"]
gateway = param["gateway"]
if gateway > 5 or gateway < 0:
ret = {
"rc": "06", # 06
"info": "invalid gateway",
}
return ret
else:
_continue = False
msisdn = self.clean_msisdn(msisdn)
msisdn = re.sub('/[^0-9]/', '', msisdn)
if msisdn[0:2] == "62":
if 10 < len(msisdn) < 15:
prefix = msisdn[0:5]
if len(msisdn) > 13:
if self.is_three(prefix):
_continue = True
else:
_continue = True
else:
if 9 < len(msisdn) < 18:
_continue = True
if _continue:
param_hit = {
"msisdn": msisdn,
"gateway": gateway,
}
valid_verify = True
if "valid_time" in param.keys():
valid_time = param["valid_time"]
if isinstance(valid_time, int) and valid_time > 0:
if "limit_try" in param.keys():
limit_try = param["limit_try"]
if not isinstance(valid_time, int) and valid_time <= 10:
valid_verify = False
else:
param_hit["valid_time"] = valid_time
param_hit["limit_try"] = limit_try
else:
valid_verify = False
if valid_verify:
method = "async_miscall"
ret = self.send_request(param_hit, method)
return ret
else:
ret = {
"rc": "06",
"info": "invalid verify data",
}
return ret
else:
ret = {}
return json.loads(ret)
def sms(self, param, method="sms"):
"""
SMS
Parameters :
param (dict)
Returns :
(dict)
"""
if "msisdn" in param.keys() and "senderid" in param.keys() and "text" in param.keys():
msisdn = param["msisdn"]
senderid = param["senderid"]
text = param["text"]
list_baru = []
_list = msisdn.split(",")
for val in _list:
msisdn = self.clean_msisdn(val)
msisdn = re.sub('/[^0-9]/', '', msisdn)
if msisdn[0:2] == "62":
if 10 < len(msisdn) < 15:
prefix = msisdn[0:5]
if len(msisdn) > 13:
if self.is_three(prefix):
list_baru.append(msisdn)
else:
ret = {
"rc": "06",
"info": "invalid msisdn or msisdn has invalid format!",
}
return ret
else:
if 9 < len(msisdn) < 18:
list_baru.append(msisdn)
else:
ret = {
"rc": "06",
"info": "invalid msisdn or msisdn has invalid format!",
}
return ret
msisdn = ",".join(list_baru)
if senderid.lower().strip() == "citcall":
senderid = senderid.upper()
param_hit = {
"msisdn": msisdn,
"senderid": senderid,
"text": text,
}
# If method sms-otp set callback url
if method == "sms-otp" and "callback_url" in param:
param_hit["callback_url"] = param["callback_url"]
ret = self.send_request(param_hit, method)
else:
ret = {
"rc": "88",
"info": "missing parameter",
}
return ret
return json.loads(ret)
def verify_motp(self, param):
"""
Verify Miscall OTP
Parameters :
param (dict)
Returns :
(dict)
"""
if "msisdn" in param.keys() and "trxid" in param.keys() and "token" in param.keys():
if param["token"].isnumeric():
if len(param["token"]) > 3:
msisdn = param["msisdn"]
trxid = param["trxid"]
token = param["token"]
_continue = False
msisdn = self.clean_msisdn(msisdn)
msisdn = re.sub('/[^0-9]/', '', msisdn)
if msisdn[0:2] == "62":
if 10 < len(msisdn) < 15:
prefix = msisdn[0:5]
if len(msisdn) > 13:
if self.is_three(prefix):
_continue = True
else:
_continue = True
else:
if 9 < len(msisdn) < 18:
_continue = True
if _continue:
param_hit = {
"msisdn": msisdn,
"trxid": trxid,
"token": token,
}
method = "verify_otp"
ret = self.send_request(param_hit, method)
else:
ret = {
"rc": "06",
"info": "invalid mobile number"
}
return ret
else:
ret = {
"rc": "06",
"info": "invalid token, token length minimum 4 digits",
}
return ret
else:
ret = {
"rc": "06",
"info": "invalid token, token length minimum 4 digits",
}
return ret
else:
ret = {
"rc": "88",
"info": "missing parameter",
}
return ret
return json.loads(ret)
def send_request(self, param, method):
"""
Sending request to Citcall API
Parameters :
param (dict)
method (str)
Returns :
res (str)
"""
userid = self.userid
apikey = self.apikey
tmp_auth = userid + ":" + apikey
auth = base64.b64encode(tmp_auth.encode())
if method == "sync_miscall":
action = Citcall.METHOD_SYNC_MISCALL
elif method == "async_miscall":
action = Citcall.METHOD_ASYNC_MISCALL
elif method == "sms":
action = Citcall.METHOD_SMS
elif method == "sms-otp":
action = Citcall.METHOD_SMS_OTP
elif method == "verify_otp":
action = Citcall.METHOD_VERIFY_MOTP
else:
raise Exception("unknown request method")
pass
url = Citcall.URL_CITCALL + Citcall.VERSION + action
content = json.dumps(param)
headers = {
"Content-Type": "application/json",
"Authorization": auth,
"Content-Length": str(len(content))
}
response = requests.post(url, data=content, headers=headers)
res = response.text
return res
@staticmethod
def clean_msisdn(msisdn):
"""
Clean Msisdn
Parameters :
msisdn (str)
Returns :
msisdn (str)
"""
if msisdn[0:1] != "+":
msisdn = "+" + msisdn
if msisdn[0:2] == "+0":
msisdn = "+62" + msisdn[2:]
if msisdn[0:1] == "0":
msisdn = "+62" + msisdn[2:]
return msisdn
@staticmethod
def is_three(prefix):
"""
Cek prefix is three
Parameters :
prefix (str)
Returns :
(boolean)
"""
if prefix == "62896":
return True
elif prefix == "62897":
return True
elif prefix == "62898":
return True
elif prefix == "62899":
return True
elif prefix == "62895":
return True
else:
return False
| 2.71875 | 3 |
demo/smallbatch.py | dw/acid | 15 | 12764641 | <reponame>dw/acid<filename>demo/smallbatch.py
#
# Compare behaviour when batches are large and record size is tiny.
#
import gzip
import random
import time
import acid
import acid.core
import acid.keylib
from demo_util import store_len
from demo_util import store_size
words = sorted(line.decode().strip()
for line in gzip.open('words.gz'))
words = words[:1000]
rand = range(1, 1+len(words))
random.shuffle(rand)
def rands(coll, keys):
t = time.time()
for k in keys:
assert coll.get(k, raw=True) is not None, [k]
t1 = time.time()
return len(keys) / (t1 - t)
for strat_klass in acid.core.BatchV2Strategy, acid.core.BatchStrategy, :
compressor = acid.encoders.ZLIB
store = acid.open('list:/')
store.begin(write=True).__enter__()
doink = store.add_collection('doink')
prefix = acid.keylib.pack_int(doink.info['idx'], store.prefix)
strat = strat_klass(prefix, store, compressor)
doink.strategy = strat
for word in words:
doink.put(word)
print 'done', strat, compressor
print 'before len:', store_len(store)
print 'before size:', store_size(store)
print 'avgsz:', store_size(store)/store_len(store)
print 'look/sec', rands(doink, rand)
print
strat.batch(max_bytes=2000)
print 'done', strat, compressor
print 'after len:', store_len(store)
print 'after size:', store_size(store)
print 'avgsz:', store_size(store)/store_len(store)
print 'look/sec', rands(doink, rand)
print
li = store.engine.items[-1]
lk = li[0]
lv = li[1]
| 2.328125 | 2 |
Cryptography/CaesarCipher/caesarcipher.py | AoWangDrexel/PiApproximationExperiments | 0 | 12764642 | <gh_stars>0
"""
Author: <NAME>
Date: 08/12/19
Description: Caesar cipher encryption and decryption, also a reverse encryption
"""
# The function reverses the string parameter
def reverse(plain_text):
cipher_text = ""
for letter in range(len(plain_text)):
# starts at the end of the string, -1 and moves its way to the left
cipher_text += plain_text[-1-letter]
return cipher_text
# The function encrypts the text with a certain key
def caesar_cipher(plain_text, key):
cipher_text = ""
# loops through each letter of the text
for letter in plain_text:
# checks if letter is uppercase or lowercase and in the alphabet
# if not add it as any other character
if letter.isupper() and (ord(letter) >= ord("A") and ord(letter) <= ord("Z")):
cipher_text += chr((ord(letter) + key - ord("A")) % 26 + ord("A"))
elif letter.islower and (ord(letter) >= ord("a") and ord(letter) <= ord("z")):
cipher_text += chr((ord(letter) + key - ord("a")) % 26 + ord("a"))
else:
cipher_text += letter
return cipher_text
# The function returns a dictionary that counts the frequency of the alphabet
def letter_count(text):
letter_dict = {}
text = text.upper()
for letter in text:
# makes sure that spaces are not counted
if not " " in letter:
if not letter in letter_dict.keys():
letter_dict[letter] = 1
else:
letter_dict[letter] += 1
return letter_dict
# The function decrypts the cipher by finding the most frequent letter
def decrypt(cipher_text):
alphabet = []
# storing alphabet in list
for letter in range(26):
alphabet.append(chr(ord("A")+letter))
letter_dict = letter_count(cipher_text)
high = 0
high_key = ""
# get the most frequent letter
for keys in letter_dict.keys():
if letter_dict.get(keys) > high:
high = letter_dict.get(keys)
high_key = keys
# gets the key by finding where the most frequent letter is in the alphabet and subtract it
# where E is in the alphabet (the most frequent letter)
break_key = alphabet.index(high_key) - alphabet.index("E")
# if key is negative, add 26
if break_key <= 0:
break_key = break_key + 26
print("The key is: " + str(break_key))
# 26 - by the key reverses the effect of the cipher
# 26 - key + key = 26 nullifies the effect of the cipher
return caesar_cipher(cipher_text, 26 - break_key)
# The function prints out all the possibilities of the cipher by testing keys from 0-25
def brute_force(cipher_text):
for key in range(26):
print(caesar_cipher(cipher_text, key))
print("Welcome to the Caesar Cipher/Breaker!")
choice = input("Would you like to encrypt (e) or decrypt (d): ")
print()
if(choice == "e"):
text = input("What would you like to encrypt?: ")
key = int(input("What is the key?: "))
print()
print("Plaintext: " + text)
print("Ciphertext: " + caesar_cipher(text, key))
elif(choice == "d"):
text = input("What would you like to decrypt?: ")
print("Ciphertext: " + text)
print("Plaintext: " + decrypt(text))
else:
print("Oops there seems to be an error")
print()
# testing
with open("caesar.txt","r") as file:
f = file.read()
brute_force("Gdkkn sgdqd H gnod xnt zqd njzx")
print()
print(caesar_cipher("Hello there I hope you are okay", -1))
print()
print(decrypt("Gdkkn sgdqd H gnod xnt zqd njzx"))
print()
print(decrypt(f))
| 4.3125 | 4 |
tests/test_models.py | mishbahr/djangocms-responsive-wrapper | 18 | 12764643 | <reponame>mishbahr/djangocms-responsive-wrapper
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_djangocms-responsive-wrapper
------------
Tests for `djangocms-responsive-wrapper` models module.
"""
from django.conf import settings
from django.test import TestCase
from responsive_wrapper import models
class TestResponsive_wrapper(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass | 1.367188 | 1 |
beatup/migrations/0012_alter_post_author.py | divmoe/beatup | 0 | 12764644 | # Generated by Django 4.0.1 on 2022-02-25 04:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('beatup', '0011_alter_customer_photo'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='beatup.customer'),
),
]
| 1.4375 | 1 |
authenticate.py | kaamilmirza/UltimateTwitterScrapper | 1 | 12764645 | import tweepy
import pandas as pd
config = pd.read_csv("./config.csv")
twitterAPIkey = config['twitterApiKey'][0]
twitterAPIS = config['twitterApiSecret'][0]
twitterAPIAT = config['twitterApiAccessToken'][0]
twitterAPIATS = config['twitterApiAccessTokenSecret'][0]
auth = tweepy.OAuthHandler(twitterAPIkey, twitterAPIS)
| 2.265625 | 2 |
backend/moderation/__init__.py | ranwise/djangochannel | 45 | 12764646 | <filename>backend/moderation/__init__.py
default_app_config = "backend.moderation.apps.ModerationConfig" | 1.164063 | 1 |
events/migrations/0019_eventspage.py | meagles/site | 4 | 12764647 | <gh_stars>1-10
# Generated by Django 3.2.6 on 2021-08-27 19:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0062_comment_models_and_pagesubscription'),
('events', '0018_alter_apicalls_datetime'),
]
operations = [
migrations.CreateModel(
name='EventsPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')),
('link_url', models.CharField(blank=True, max_length=255, null=True, verbose_name='link to a custom URL')),
('url_append', models.CharField(blank=True, help_text='Use this to optionally append a #hash or querystring to the URL.', max_length=255, verbose_name='append to URL')),
('extra_classes', models.CharField(blank=True, help_text='Optionally specify css classes to be added to this page when it appears in menus.', max_length=100, verbose_name='menu item css classes')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.page', verbose_name='link to an internal page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| 1.851563 | 2 |
src/image_predict/trainer.py | mina-moto/probspace_kiva_public | 2 | 12764648 | """
"""
import argparse
import os
import sys
import mlflow
import pandas as pd
import pytorch_lightning as pl
import yaml
from dotenv import load_dotenv
load_dotenv() # noqa
sys.path.append(f"{os.getenv('PROJECT_ROOT')}src/") # noqa
from image_predict.data_module.kiva_data_module import KivaDataModule
from image_predict.module import mlflow_module
from module.utils import set_seed
from pytorch_lightning import callbacks
from pytorch_lightning.loggers import MLFlowLogger
from sklearn.model_selection import KFold
from image_predict.models.swin_t_transfer_model import SwinTTransferModel
from image_predict.models.swin_t_finetune_model import SwinTFinetuneModel
class Trainer:
def __init__(
self,
train_path: str,
validation_dataset_save_dir: str,
model_dir_save_path: str,
model_class_name: str = "SwinTTransferModel",
seed: int = 0,
validation_num: int = 4,
model_params: dict = None,
pl_trainer_params: dict = None,
early_stopping_params: dict = None,
train_loader_params: dict = None,
val_loader_params: dict = None,
*args,
**kwargs,
):
"""
Args:
model_class_name:
train_path:
validation_dataset_save_dir:
model_dir_save_path:
seed:
validation_num:
pl_trainer_params:
early_stopping_params:
train_loader_params:
val_loader_params:
"""
self.model_class_name = model_class_name
self.data_df = pd.read_csv(train_path)
self.validation_dataset_save_dir = validation_dataset_save_dir
self.model_dir_save_path = model_dir_save_path
self.seed = seed
self.validation_num = validation_num
self.model_params = model_params
self.pl_trainer_params = pl_trainer_params
self.early_stopping_params = early_stopping_params
self.train_loader_params = train_loader_params
self.val_loader_params = val_loader_params
def __train(self, train, valid, fold_name):
model = eval(self.model_class_name)(model_params=self.model_params, fold_name=fold_name)
datamodule = KivaDataModule(
train,
valid,
train_loader_params=self.train_loader_params,
val_loader_params=self.val_loader_params,
)
early_stopping = callbacks.EarlyStopping(
monitor=f"val_{fold_name}_loss",
**self.early_stopping_params
)
lr_monitor = callbacks.LearningRateMonitor()
os.makedirs(self.model_dir_save_path, exist_ok=True)
loss_checkpoint = callbacks.ModelCheckpoint(
dirpath=self.model_dir_save_path,
filename=fold_name,
monitor=f"val_{fold_name}_loss",
save_top_k=1,
mode="min",
save_last=False,
)
mlf_logger = MLFlowLogger()
mlf_logger._run_id = mlflow.active_run().info.run_id
trainer = pl.Trainer(
logger=mlf_logger,
callbacks=[lr_monitor, loss_checkpoint, early_stopping],
**self.pl_trainer_params,
)
trainer.fit(model, datamodule=datamodule)
mlflow.log_metric(f"epoch_{fold_name}", trainer.current_epoch)
def run(self):
set_seed(self.seed)
kf = KFold(n_splits=self.validation_num, shuffle=True, random_state=self.seed)
for fold, (train_index, valid_index) in enumerate(kf.split(self.data_df["IMAGE_PATH"])):
train = self.data_df.loc[train_index]
valid = self.data_df.loc[valid_index]
os.makedirs(self.validation_dataset_save_dir, exist_ok=True)
train.to_csv(f"{self.validation_dataset_save_dir}train_fold_{fold}.csv", index=False)
valid.to_csv(f"{self.validation_dataset_save_dir}valid_fold_{fold}.csv", index=False)
self.__train(train=train, valid=valid, fold_name=f"fold_{fold}")
params = {
"validation_dataset_save_dir": self.validation_dataset_save_dir,
"model_dir_save_path": self.model_dir_save_path,
"seed": self.seed,
"validation_num": self.validation_num,
"model_params": self.model_params,
"pl_trainer_params": self.pl_trainer_params,
"early_stopping_params": self.early_stopping_params,
"train_loader_params": self.train_loader_params,
"val_loader_params": self.val_loader_params,
}
mlflow.log_params(params)
mlflow.log_artifact(self.validation_dataset_save_dir)
mlflow.log_artifact(self.model_dir_save_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--config',
type=str,
default='config/image_predict/trainer/trainer001.yaml',
help='config path')
args = parser.parse_args()
with open(args.config) as f:
config = yaml.safe_load(f)
mlflow_module.start_experiment(tracking_uri=os.getenv("TRACKING_URI"), **config["experiment_setting"])
mlflow.log_artifact(args.config)
trainer = Trainer(**config)
trainer.run()
mlflow.end_run()
if __name__ == '__main__':
main()
| 2.09375 | 2 |
cotidia/team/migrations/0003_auto_20170902_1700.py | guillaumepiot/cotidia-team | 0 | 12764649 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-02 17:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20170901_1317'),
]
operations = [
migrations.AlterField(
model_name='member',
name='bio',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='member',
name='email',
field=models.EmailField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='member',
name='phone',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='member',
name='role',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='member',
name='slug',
field=models.SlugField(null=True, unique=True),
),
]
| 1.609375 | 2 |
plyxproto/helpers.py | sbconsulting/plyprotobuf | 0 | 12764650 | class LexHelper:
offset = 0
def get_max_linespan(self, p):
defSpan = [1e60, -1]
mSpan = [1e60, -1]
for sp in range(0, len(p)):
csp = p.linespan(sp)
if csp[0] == 0 and csp[1] == 0:
if hasattr(p[sp], "linespan"):
csp = p[sp].linespan
else:
continue
if csp is None or len(csp) != 2:
continue
if csp[0] == 0 and csp[1] == 0:
continue
if csp[0] < mSpan[0]:
mSpan[0] = csp[0]
if csp[1] > mSpan[1]:
mSpan[1] = csp[1]
if defSpan == mSpan:
return (0, 0)
return tuple([mSpan[0] - self.offset, mSpan[1] - self.offset])
def get_max_lexspan(self, p):
defSpan = [1e60, -1]
mSpan = [1e60, -1]
for sp in range(0, len(p)):
csp = p.lexspan(sp)
if csp[0] == 0 and csp[1] == 0:
if hasattr(p[sp], "lexspan"):
csp = p[sp].lexspan
else:
continue
if csp is None or len(csp) != 2:
continue
if csp[0] == 0 and csp[1] == 0:
continue
if csp[0] < mSpan[0]:
mSpan[0] = csp[0]
if csp[1] > mSpan[1]:
mSpan[1] = csp[1]
if defSpan == mSpan:
return (0, 0)
return tuple([mSpan[0] - self.offset, mSpan[1] - self.offset])
def set_parse_object(self, dst, p):
dst.setLexData(
linespan=self.get_max_linespan(p),
lexspan=self.get_max_lexspan(p))
dst.setLexObj(p)
class Base(object):
parent = None
lexspan = None
linespan = None
def v(self, obj, visitor):
if obj is None:
return
elif hasattr(obj, "accept"):
obj.accept(visitor)
elif isinstance(obj, list):
for s in obj:
self.v(s, visitor)
pass
pass
@staticmethod
def p(obj, parent):
if isinstance(obj, list):
for s in obj:
Base.p(s, parent)
if hasattr(obj, "parent"):
obj.parent = parent
# Lexical unit - contains lexspan and linespan for later analysis.
class LU(Base):
def __init__(self, p, idx):
self.p = p
self.idx = idx
self.pval = p[idx]
self.lexspan = p.lexspan(idx)
self.linespan = p.linespan(idx)
# If string is in the value (raw value) and start and stop lexspan is the same, add real span
# obtained by string length.
if isinstance(self.pval, str) \
and self.lexspan is not None \
and self.lexspan[0] == self.lexspan[1] \
and self.lexspan[0] != 0:
self.lexspan = tuple(
[self.lexspan[0], self.lexspan[0] + len(self.pval)])
super(LU, self).__init__()
@staticmethod
def i(p, idx):
if isinstance(p[idx], LU):
return p[idx]
if isinstance(p[idx], str):
return LU(p, idx)
return p[idx]
def describe(self):
return "LU(%s,%s)" % (self.pval, self.lexspan)
def __str__(self):
return self.pval
def __repr__(self):
return self.describe()
def accept(self, visitor):
self.v(self.pval, visitor)
def __iter__(self):
for x in self.pval:
yield x
# Base node
class SourceElement(Base):
'''
A SourceElement is the base class for all elements that occur in a Protocol Buffers
file parsed by plyproto.
'''
def __init__(self, linespan=[], lexspan=[], p=None):
super(SourceElement, self).__init__()
self._fields = [] # ['linespan', 'lexspan']
self.linespan = linespan
self.lexspan = lexspan
self.p = p
def __repr__(self):
equals = ("{0}={1!r}".format(k, getattr(self, k))
for k in self._fields)
args = ", ".join(equals)
return "{0}({1})".format(self.__class__.__name__, args)
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def setLexData(self, linespan, lexspan):
self.linespan = linespan
self.lexspan = lexspan
def setLexObj(self, p):
self.p = p
def accept(self, visitor):
pass
class Visitor(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __getattr__(self, name):
if not name.startswith('visit_'):
raise AttributeError(
'name must start with visit_ but was {}'.format(name))
def f(element):
if self.verbose:
msg = 'unimplemented call to {}; ignoring ({})'
print(msg.format(name, element))
return True
return f
# visitor.visit_PackageStatement(self)
# visitor.visit_ImportStatement(self)
# visitor.visit_OptionStatement(self)
# visitor.visit_FieldDirective(self)
# visitor.visit_FieldType(self)
# visitor.visit_FieldDefinition(self)
# visitor.visit_EnumFieldDefinition(self)
# visitor.visit_EnumDefinition(self)
# visitor.visit_MessageDefinition(self)
# visitor.visit_MessageExtension(self)
# visitor.visit_MethodDefinition(self)
# visitor.visit_ServiceDefinition(self)
# visitor.visit_ExtensionsDirective(self)
# visitor.visit_Literal(self)
# visitor.visit_Name(self)
# visitor.visit_Proto(self)
# visitor.visit_LU(self)
| 2.84375 | 3 |